python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Model layer normalisation and dropout utilities."""
import sonnet as snt
import tensorflow.compat.v2 as tf
class ResidualDropoutWrapper(snt.Module):
"""Wrapper that applies residual connections, dropout and layer norm."""
def __init__(self, layer, dropout_rate, apply_layer_norm=True, name=None):
"""Creates the Wrapper Class.
Args:
layer: module to wrap.
dropout_rate: dropout rate. A rate of 0. will turn off dropout.
apply_layer_norm: (default True) whether to apply layer norm after
residual.
name: name of the module.
"""
super(ResidualDropoutWrapper, self).__init__(name=name)
self._layer = layer
self._dropout_rate = dropout_rate
self._apply_layer_norm = apply_layer_norm
if self._apply_layer_norm:
self._layer_norm = snt.LayerNorm(
axis=-1, create_scale=True, create_offset=True)
def __call__(self, inputs, *args, **kwargs):
"""Returns the result of the residual dropout computation.
Args:
inputs: inputs to the main module.
*args: Additional arguments to inner layer.
**kwargs: Additional named arguments to inner layer.
"""
# Apply main module.
outputs = self._layer(inputs, *args, **kwargs)
# Dropout before residual.
if kwargs.get('is_training', False):
outputs = tf.nn.dropout(outputs, rate=self._dropout_rate)
if 'query_inputs' in kwargs:
outputs += kwargs['query_inputs']
else:
outputs += inputs
if self._apply_layer_norm:
outputs = self._layer_norm(outputs)
return outputs
| neural_lns-main | layer_norm.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Common utilities for Solver."""
import abc
import enum
from typing import Any, Dict, Optional
import ml_collections
from neural_lns import mip_utils
class SolverState(enum.Enum):
INIT = 0
MODEL_LOADED = 1
FINISHED = 2
class Solver(abc.ABC):
"""Wrapper around a given classical MIP solver.
This class contains the API needed to communicate with a MIP solver, e.g.
SCIP.
"""
def load_model(self, mip: Any) -> SolverState:
"""Loads a MIP model into the solver."""
raise NotImplementedError('load_model method should be implemented')
def solve(
self, solving_params: ml_collections.ConfigDict
) -> mip_utils.MPSolverResponseStatus:
"""Solves the loaded MIP model."""
raise NotImplementedError('solve method should be implemented')
def get_best_solution(self) -> Optional[Any]:
"""Returns the best solution found from the last solve call."""
raise NotImplementedError('get_best_solution method should be implemented')
def add_solution(self, solution: Any) -> bool:
"""Adds a known solution to the solver."""
raise NotImplementedError('add_solution method should be implemented')
def extract_lp_features_at_root(
self, solving_params: ml_collections.ConfigDict) -> Dict[str, Any]:
"""Returns a dictionary of root node features."""
raise NotImplementedError(
'extract_lp_features_at_root method should be implemented')
| neural_lns-main | solving_utils.py |
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Setup for pip package."""
import os
import setuptools
here = os.path.dirname(os.path.abspath(__file__))
def _get_version():
"""Returns the JAXline version."""
with open(os.path.join(here, 'jaxline', '__init__.py')) as f:
try:
version_line = next(
line for line in f if line.startswith('__version__'))
except StopIteration:
raise ValueError('__version__ not defined in jaxline/__init__.py')
else:
ns = {}
exec(version_line, ns) # pylint: disable=exec-used
return ns['__version__']
def _parse_requirements(path):
with open(os.path.join(here, path)) as f:
return [
line.rstrip() for line in f
if not (line.isspace() or line.startswith('#'))
]
EXTRA_PACKAGES = {
'jax': ['jax>=0.1.71'],
'jaxlib': ['jaxlib>=0.1.49'],
'tensorflow': ['tensorflow>=2'],
'tensorflow with gpu': ['tensorflow-gpu>=2'],
}
setuptools.setup(
name='jaxline',
version=_get_version(),
url='https://github.com/deepmind/jaxline',
description='JAXline is a distributed JAX training framework.',
license='Apache 2.0',
author='DeepMind',
author_email='[email protected]',
long_description=open(os.path.join(here, 'README.md')).read(),
long_description_content_type='text/markdown',
# Contained modules and scripts.
packages=setuptools.find_namespace_packages(exclude=['*_test.py']),
install_requires=_parse_requirements('requirements.txt'),
extras_require=EXTRA_PACKAGES,
requires_python='>=3.6',
include_package_data=True,
zip_safe=False,
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
],
)
| jaxline-master | setup.py |
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""pytest configuration."""
from absl import flags
def pytest_configure(config):
del config # Unused.
flags.FLAGS.mark_as_parsed()
| jaxline-master | jaxline/conftest.py |
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base config."""
from ml_collections import config_dict
def validate_keys(base_cfg, config, base_filename="base_config.py"):
"""Validates that the config "inherits" from a base config.
Args:
base_cfg (`ConfigDict`): base config object containing the required fields
for each experiment config.
config (`ConfigDict`): experiment config to be checked against base_cfg.
base_filename (str): file used to generate base_cfg.
Raises:
ValueError: if base_cfg contains keys that are not present in config.
"""
for key in base_cfg.keys():
if key not in config:
raise ValueError("Key {!r} missing from config. This config is required "
"to have keys: {}. See {} for details.".format(
key, list(base_cfg.keys()), base_filename))
if (isinstance(base_cfg[key], config_dict.ConfigDict) and
config[key] is not None):
validate_keys(base_cfg[key], config[key])
def check_constraints(config):
"""Validates that the config parameters comply to specific set of rules.
Args:
config (`ConfigDict`): experiment config to be checked against base_cfg.
Raises:
ValueError: if config has train_checkpoint_all_hosts set to True and the
resulting interval type for checkpointing is time-based (secs).
"""
if config.train_checkpoint_all_hosts:
if config.checkpoint_interval_type not in ["steps", None] or (
config.checkpoint_interval_type is None and
config.interval_type != "steps"):
raise ValueError(
"Invalid interval type selected for the experiment. "
'When "train_checkpoint_all_hosts = True" True, you need to specify '
'"checkpoint_interval_type = steps". This is to avoid saving '
"checkpoints with different global_step on different hosts, which "
"causes hosts to run out of sync upon restore. Got: "
f"train_checkpoint_all_hosts: {config.train_checkpoint_all_hosts}, "
f"interval_type: {config.interval_type}, "
f"checkpoint_interval_type: {config.checkpoint_interval_type}.")
if (config.periodic_action_growth_ratios is not None
and config.interval_type != "steps"):
raise ValueError(
f"Invalid interval type {config.interval_type}."
"When you set periodic_action_growth_ratios you must use "
"config.interval_type='steps'."
)
def validate_config(config):
validate_keys(get_base_config(), config)
check_constraints(config)
def get_base_config():
"""Returns base config object for an experiment."""
config = config_dict.ConfigDict()
config.experiment_kwargs = config_dict.ConfigDict()
config.training_steps = 10000 # Number of training steps.
config.interval_type = "secs"
config.save_checkpoint_interval = 300
config.log_tensors_interval = 60
# This is an optional config to allow for logging at growth ratios.
# This logging is *in addition* to the standard linear intervals.
# For example, setting [1, 2, 5, 10] will log at [1, 2, 5, 10, 20, 50, ...]
# Should only be used with interval_type="steps".
config.periodic_action_growth_ratios = None
config.log_train_data_interval = 120.0 # None to turn off
config.log_async = True
# Log from all training hosts. You can facet on jax_process_index.
config.log_all_hosts = False
# Overrides of `interval_type` for specific periodic operations. If `None`,
# we use the value of `interval_type`.
config.logging_interval_type = None
config.checkpoint_interval_type = None
# If set to True we save the initial checkpoint before executing the first
# train step. This speeds up eval worker start time as they can immediately
# begin compilation, and sets a reference point for init model performance.
config.save_initial_train_checkpoint = False
# If set to True we checkpoint on all hosts, which may be useful
# for model parallelism. Otherwise we checkpoint on host 0.
config.train_checkpoint_all_hosts = False
config.best_checkpoint_all_hosts = False
# If True, asynchronously logs training data from every training step.
config.log_all_train_data = False
# If true, run evaluate() on the experiment once before you load a checkpoint.
# This is useful for getting initial values of metrics at random weights, or
# when debugging locally if you do not have any train job running.
config.eval_initial_weights = False
# When True, the eval job immediately loads a checkpoint runs evaluate()
# once, then terminates.
config.one_off_evaluate = False
# Number of checkpoints to keep by default
config.max_checkpoints_to_keep = 5
# Settings for the RNGs used during training and evaluation.
config.random_seed = 42
config.random_mode_train = "unique_host_unique_device"
config.random_mode_eval = "same_host_same_device"
# The metric (returned by the step function) used as a fitness score.
# It saves a separate series of checkpoints corresponding to
# those which produce a better fitness score than previously seen.
# By default it is assumed that higher is better, but this behaviour can be
# changed to lower is better, i.e. behaving as a loss score, by setting
# `best_model_eval_metric_higher_is_better = False`.
# If `best_model_eval_metric` is empty (the default), best checkpointing is
# disabled.
config.best_model_eval_metric = ""
config.best_model_eval_metric_higher_is_better = True
return config
| jaxline-master | jaxline/base_config.py |
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for jaxline's utils."""
import functools
import itertools as it
import time
from unittest import mock
from absl.testing import absltest
from absl.testing import flagsaver
import jax
import jax.numpy as jnp
from jaxline import utils
import numpy as np
def _num_unique_keys(keys):
return len(np.unique(jax.random.key_data(keys), axis=0))
class PyPrefetchTest(absltest.TestCase):
def testEmpty(self):
self.assertEqual(list(utils.py_prefetch(lambda: ())), [])
def testBaseCase(self):
self.assertEqual(list(utils.py_prefetch(lambda: range(100))),
list(range(100)))
def testBadFunction(self):
def _bad_function():
raise ValueError
iterable = utils.py_prefetch(_bad_function)
with self.assertRaises(ValueError):
next(iterable)
def testBadFunctionIteration(self):
def _bad_iterable():
yield 1
raise ValueError
iterable = utils.py_prefetch(_bad_iterable)
self.assertEqual(next(iterable), 1)
with self.assertRaises(ValueError):
next(iterable)
class TreePsumTest(absltest.TestCase):
def testBaseCase(self):
# pick leaf objects with leading dimension one as these tests will
# be run on a single device.
data = {"a": jnp.array([1]), "b": jnp.array([2])}
data_summed = jax.pmap(
lambda x: utils.tree_psum(x, axis_name="i"), axis_name="i")(data)
self.assertEqual(data_summed, data)
def testEmpty(self):
data = {"a": jnp.array([]), "b": jnp.array([])}
with self.assertRaises(ZeroDivisionError):
jax.pmap(lambda x: utils.tree_psum(x, axis_name="i"), axis_name="i")(data)
def testSingleLeafTree(self):
data = jnp.array([1])
data_summed = jax.pmap(
lambda x: utils.tree_psum(x, axis_name="i"), axis_name="i")(data)
self.assertEqual(data_summed, data)
def testNotNumpy(self):
data = [1]
with self.assertRaises(ValueError):
jax.pmap(lambda x: utils.tree_psum(x, axis_name="i"), axis_name="i")(data)
def testNumDevicesMismatch(self):
data = jnp.array([1, 2]) # assumes 2 devices but we only have 1
with self.assertRaises(ValueError):
jax.pmap(lambda x: utils.tree_psum(x, axis_name="i"), axis_name="i")(data)
def testNoPmapWrapper(self):
with self.assertRaises(NameError): # axis_name will be undefined
utils.tree_psum(jnp.array([1]), axis_name="i")
def testAxisNameMismatch(self):
data = jnp.array([1])
with self.assertRaises(NameError):
jax.pmap(lambda x: utils.tree_psum(x, axis_name="i"), axis_name="j")(data)
class MakeAsyncTest(absltest.TestCase):
def testBaseCase(self):
"""Tests correct execution for single call."""
r = []
async_fn = utils.make_async()(lambda: r.append("a"))
async_fn()
time.sleep(1)
self.assertListEqual(r, ["a"])
def testNonBlocking(self):
"""Tests async function doesn't block the main thread."""
r = []
async_fn = utils.make_async()(lambda: r.append((time.sleep(5), "a")))
r.append((None, "b"))
async_fn().result()
self.assertListEqual(r, [(None, "b"), (None, "a")])
def testSerialExecution(self):
"""Tests multiple calls to async function execute serially."""
r = []
a = lambda: r.append((time.sleep(5), "a"))
b = lambda: r.append((None, "b"))
async_fn = utils.make_async()(lambda f: f())
async_fn(a)
async_fn(b).result()
self.assertListEqual(r, [(None, "a"), (None, "b")])
def testErrorOnNextCall(self):
"""Tests background thread error raised in main thread on next call."""
@utils.make_async()
def async_fn():
raise ValueError()
# First call will trigger an error in the background thread.
async_fn()
with self.assertRaises(ValueError):
# Background thread error will be raised in the main thread on next call
async_fn()
def testSubsequentCallsDontRun(self):
"""Tests that subsequent calls don't run after an error has occurred."""
runs = []
@utils.make_async()
def async_fn():
runs.append(None)
raise ValueError()
# First call will trigger an error in the background thread.
async_fn()
for _ in range(2):
with self.assertRaises(ValueError):
# Background thread error will be raised in the main thread on
# subsequent calls and _bad_function will not be run.
async_fn()
self.assertListEqual(runs, [None])
def testErrorInBackgroundThread(self):
"""Tests background thread raises the error."""
@utils.make_async()
def async_fn():
raise ValueError()
future = async_fn() # pylint: disable=assignment-from-no-return
self.assertIsNotNone(future.exception())
class TestBroadcast(absltest.TestCase):
def test_bcast_local_devices(self):
self.assertEqual(utils.bcast_local_devices(jnp.zeros([])),
jnp.zeros([jax.local_device_count()]))
self.assertEqual(utils.bcast_local_devices(jnp.ones([])),
jnp.ones([jax.local_device_count()]))
def test_bcast_local_devices_empty_tree(self):
self.assertIsNone(utils.bcast_local_devices(None))
self.assertEqual(utils.bcast_local_devices({}), {})
def test_bcast_local_devices_tree(self):
num_devices = jax.local_device_count()
tree = utils.bcast_local_devices({"ones": jnp.ones([]),
"zeros": jnp.zeros([])})
self.assertEqual(tree, {"ones": jnp.ones([num_devices]),
"zeros": jnp.zeros([num_devices])})
class TestLogActivity(absltest.TestCase):
@mock.patch("jaxline.utils.logging.info")
def test_log_success(self, mock_info):
"""Tests that logging an activity is successful."""
with utils.log_activity("for test"):
pass
mock_info.assert_any_call("[jaxline] %s starting...", "for test")
mock_info.assert_any_call("[jaxline] %s finished.", "for test")
@mock.patch("absl.logging.exception")
@mock.patch("absl.logging.info")
def test_log_failure(self, mock_info, mock_exc):
"""Tests that an error thrown by an activity is correctly caught."""
with self.assertRaisesRegex(ValueError, "Intentional"):
with utils.log_activity("for test"):
raise ValueError("Intentional")
mock_info.assert_any_call("[jaxline] %s starting...", "for test")
mock_exc.assert_any_call("[jaxline] %s failed with error.", "for test")
class TestSpecializeRngHostDevice(absltest.TestCase):
@classmethod
def setUpClass(cls):
super(TestSpecializeRngHostDevice, cls).setUpClass()
rng = jax.random.PRNGKey(0)
cls.rng = jnp.broadcast_to(
rng, (jax.local_device_count(),) + rng.shape)
def test_unique_device(self):
"""Tests that rngs are unique across devices."""
mode = "unique_host_unique_device"
host_id_devices = utils.host_id_devices_for_rng(mode)
specialize_func = jax.pmap(functools.partial(
utils.specialize_rng_host_device, axis_name="i",
mode=mode), axis_name="i")
rng = specialize_func(self.rng, host_id_devices)
self.assertEqual(_num_unique_keys(rng), jax.local_device_count())
def test_same_device(self):
"""Tests rngs are same across devices."""
mode = "unique_host_same_device"
host_id_devices = utils.host_id_devices_for_rng(mode)
specialize_func = jax.pmap(functools.partial(
utils.specialize_rng_host_device, axis_name="i",
mode=mode), axis_name="i")
rng = specialize_func(self.rng, host_id_devices)
self.assertEqual(_num_unique_keys(rng), 1)
def test_unique_host(self):
"""Tests rngs unique between hosts."""
mode = "unique_host_same_device"
with mock.patch.object(utils.jax, "process_index", return_value=0):
host_id_devices = utils.host_id_devices_for_rng(mode)
specialize_func = jax.pmap(functools.partial(
utils.specialize_rng_host_device, axis_name="i",
mode=mode), axis_name="i")
rng0 = specialize_func(self.rng, host_id_devices)
with mock.patch.object(utils.jax, "process_index", return_value=1):
host_id_devices = utils.host_id_devices_for_rng(mode)
specialize_func = jax.pmap(functools.partial(
utils.specialize_rng_host_device, axis_name="i",
mode=mode), axis_name="i")
rng1 = specialize_func(self.rng, host_id_devices)
keys = jnp.concatenate([rng0, rng1], axis=0)
self.assertEqual(_num_unique_keys(keys), 2)
class TestRendezvous(absltest.TestCase):
def test_rendezvous(self):
"""Test that rendezvous doesn't fail."""
utils.rendezvous()
class TestJaxlineDisablePmapJit(absltest.TestCase):
@mock.patch.object(utils.chex, "fake_pmap_and_jit", autospec=True)
def test_pmap_jit_disabled(self, mock_fake_pmap_and_jit):
"""Tests pmap/jit are disabled if --jaxline_disable_pmap_jit is set."""
with self.subTest("PmapJitNotDisabled"):
with flagsaver.flagsaver(jaxline_disable_pmap_jit=False):
utils.disable_pmap_jit(lambda: None)()
mock_fake_pmap_and_jit.assert_not_called()
with self.subTest("PmapJitDisabled"):
with flagsaver.flagsaver(jaxline_disable_pmap_jit=True):
utils.disable_pmap_jit(lambda: None)()
mock_fake_pmap_and_jit.assert_called_once()
class DoubleBufferTest(absltest.TestCase):
def test_double_buffer(self):
if jax.default_backend() != "gpu":
self.skipTest("Only necessary on GPU.")
n = jax.local_device_count()
dataset = it.repeat(np.ones([n]))
iterator = iter(utils.double_buffer(dataset))
batch_ptrs = []
while len(batch_ptrs) < 4:
batch = next(iterator)
ptrs = [b.unsafe_buffer_pointer() for b in batch.device_buffers]
batch_ptrs.append(ptrs)
del batch
self.assertEqual(batch_ptrs[0], batch_ptrs[2])
self.assertEqual(batch_ptrs[1], batch_ptrs[3])
self.assertNotEqual(batch_ptrs[0], batch_ptrs[1])
self.assertNotEqual(batch_ptrs[2], batch_ptrs[3])
class PeriodicActionTest(absltest.TestCase):
"""Tests for PeriodicAction."""
def test_log_growth_ratios(self):
"""Checks a specific value of growth ratios logs as expected."""
data = []
logger = utils.PeriodicAction(
fn=lambda step, _: data.append(step), # fn logs step to data
interval_type="steps",
interval=1_000_000, # Large interval won't get called early on.
logging_growth_ratios=[1, 2, 5, 10], # Example growth ratios
)
for step in range(1010):
logger(time.time(), step+1, {"fake_data": 0}) # pytype: disable=wrong-arg-types # jax-ndarray
logger.wait_to_finish()
# Check that we got the results that we expected
target_data = [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000]
self.assertEqual(data, target_data)
def test_log_last_step(self):
"""Checks that when it's enabled, we also log the last step."""
data = []
logger = utils.PeriodicAction(
fn=lambda step, _: data.append(step), # fn logs step to data
interval_type="steps",
interval=200,
end_step_to_action=1010,
)
for step in range(1010):
logger(time.time(), step+1, {"fake_data": 0}) # pytype: disable=wrong-arg-types # jax-ndarray
logger.wait_to_finish()
# Check that we got the results that we expected
target_data = [200, 400, 600, 800, 1000, 1010]
self.assertEqual(data, target_data)
if __name__ == "__main__":
absltest.main()
| jaxline-master | jaxline/utils_test.py |
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Jaxline's train."""
import copy
from typing import Dict, Optional
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as jnp
from jaxline import base_config
from jaxline import experiment
from jaxline import train
from jaxline import utils
from ml_collections import config_dict
import numpy as np
_IMPROVEMENT_STEPS = [2, 5, 15, 27, 99] # Arbitrary.
_FITNESS_METRIC_KEY = "A_GOOD_METRIC"
class DummyExperiment(experiment.AbstractExperiment):
"""An experiment whose evaluate improves at set intervals."""
def __init__(self, mode):
super().__init__(mode=mode)
self.evaluate_counter = 0
self.fitness_metric = 0
self.init_rng = None
self.step_rngs = []
self.steps = 0
def initialize_train_step_rng(self, rng: jnp.ndarray) -> jnp.ndarray:
"""Remembers the value returned from AbstractExperiment."""
self.init_rng = super().initialize_train_step_rng(rng)
return self.init_rng
def step(
self,
*,
global_step: jnp.ndarray,
rng: jnp.ndarray,
writer: Optional[utils.Writer],
) -> Dict[str, np.ndarray]:
"""Test implementation, counts steps and records the rngs it was given."""
self.steps += 1
self.step_rngs.append(rng)
return {"loss": -1}
def evaluate(self, *args, **kwargs) -> Optional[Dict[str, np.ndarray]]:
"""Test implementation, improves fitness metric at specified steps."""
if self.evaluate_counter in _IMPROVEMENT_STEPS:
self.fitness_metric += 1
self.evaluate_counter += 1
return {_FITNESS_METRIC_KEY: self.fitness_metric}
class DummyCheckpoint:
"""Do nothing but record when save is called."""
def __init__(self, **kwargs):
del kwargs # Unused for this class.
self._state = config_dict.ConfigDict()
self._state_list = []
self._checkpoint_path_int = 0
self._global_step_int = -1
def get_experiment_state(
self,
unused_ckpt_series: str,
) -> config_dict.ConfigDict:
return self._state
def save(self, unused_ckpt_series: str) -> None:
self._state_list.append(copy.copy(self._state))
def can_be_restored(self, ckpt_series: str) -> bool:
return ckpt_series == "latest"
def restore(self, unused_ckpt_series: str) -> None:
self._global_step_int += 1
self._state.global_step = self._global_step_int
def restore_path(self, unused_ckpt_series) -> Optional[str]:
"""Always return something new so there"s no waiting."""
self._checkpoint_path_int += 1
return str(self._checkpoint_path_int)
def wait_for_checkpointing_to_finish(self) -> None:
"""Noop, needed for API compatibility."""
class TrainTest(parameterized.TestCase):
@parameterized.parameters(1, 4)
def test_train_step_rng(self, num_steps: int):
config = base_config.get_base_config()
config.training_steps = num_steps
checkpointer = DummyCheckpoint()
writer = mock.create_autospec(utils.Writer, instance=True)
train.train(DummyExperiment, config, checkpointer, writer)
if jax.config.jax_enable_custom_prng:
expected_rng_shape = (jax.local_device_count(),)
else:
expected_rng_shape = (jax.local_device_count(), 2)
state = checkpointer.get_experiment_state("latest")
self.assertEqual(state.global_step, num_steps)
self.assertEqual(state.train_step_rng.shape, expected_rng_shape)
experiment_module = state.experiment_module
self.assertEqual(experiment_module.init_rng.shape, expected_rng_shape)
self.assertLen(experiment_module.step_rngs, num_steps)
for step_rng in experiment_module.step_rngs:
self.assertEqual(step_rng.shape, expected_rng_shape)
@parameterized.parameters(
dict(process_id=0, checkpoint_all_hosts=False, should_checkpoint=True),
dict(process_id=0, checkpoint_all_hosts=True, should_checkpoint=True),
dict(process_id=0, checkpoint_all_hosts=None, should_checkpoint=True),
dict(process_id=3, checkpoint_all_hosts=True, should_checkpoint=True),
dict(process_id=3, checkpoint_all_hosts=False, should_checkpoint=False),
dict(process_id=3, checkpoint_all_hosts=None, should_checkpoint=False),
)
def test_best_checkpoint_saves_only_at_improved_best_metrics(
self,
process_id: int,
checkpoint_all_hosts: Optional[bool],
should_checkpoint: bool,
):
self.enter_context(
mock.patch.object(jax, "process_index", new=lambda: process_id))
config = base_config.get_base_config()
config.best_model_eval_metric = _FITNESS_METRIC_KEY
if checkpoint_all_hosts is not None:
config.best_checkpoint_all_hosts = checkpoint_all_hosts
config.training_steps = 100
ckpt = DummyCheckpoint()
writer = mock.Mock()
train.evaluate(DummyExperiment, config, ckpt, writer, jaxline_mode="eval")
if not should_checkpoint:
self.assertEmpty(ckpt._state_list)
else:
# The first step will always checkpoint.
self.assertLen(
ckpt._state_list, len(_IMPROVEMENT_STEPS) + 1)
checkpointed_states = [
s.global_step for s in ckpt._state_list]
self.assertEqual(checkpointed_states, [0] + _IMPROVEMENT_STEPS)
if __name__ == "__main__":
absltest.main()
| jaxline-master | jaxline/train_test.py |
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Jaxline Distributed Training system."""
__version__ = "0.0.7"
| jaxline-master | jaxline/__init__.py |
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Deepmind-specific platform for running Experiments with Jaxline."""
from concurrent import futures
import os
from typing import Any, Mapping
from absl import flags
from absl import logging
import chex
import jax
from jaxline import base_config
from jaxline import train
from jaxline import utils
from ml_collections import config_dict
from ml_collections import config_flags
import numpy as np
import tensorflow as tf
# TODO(tomhennigan) Add support for ipdb and pudb.
_CONFIG = config_flags.DEFINE_config_file(
name="config",
help_string="Training configuration file.",
)
# This flag is expected to be used only internally by jaxline.
# It is prefixed by "jaxline" to prevent a conflict with a "mode" flag defined
# by Monarch.
_JAXLINE_MODE = flags.DEFINE_string(
name="jaxline_mode",
default="train",
help=("Execution mode. "
" `train` will run training, `eval` will run evaluation."),
)
_JAXLINE_TPU_DRIVER = flags.DEFINE_string(
name="jaxline_tpu_driver",
default="",
help="Whether to use tpu_driver.",
)
_JAXLINE_ENSURE_TPU = flags.DEFINE_bool(
name="jaxline_ensure_tpu",
default=False,
help="Whether to ensure we have a TPU connected.",
)
def create_checkpointer(
config: config_dict.ConfigDict,
mode: str,
) -> utils.Checkpointer:
"""Creates an object to be used as a checkpointer."""
return utils.InMemoryCheckpointer(config, mode)
class TensorBoardLogger:
"""Writer to write experiment data to stdout."""
def __init__(self, config, mode: str):
"""Initializes the writer."""
log_dir = os.path.join(config.checkpoint_dir, mode)
self._writer = tf.summary.create_file_writer(log_dir)
def write_scalars(self, global_step: int, scalars: Mapping[str, Any]):
"""Writes scalars to stdout."""
global_step = int(global_step)
with self._writer.as_default():
for k, v in scalars.items():
tf.summary.scalar(k, v, step=global_step)
self._writer.flush()
def write_images(self, global_step: int, images: Mapping[str, np.ndarray]):
"""Writes images to writers that support it."""
global_step = int(global_step)
with self._writer.as_default():
for k, v in images.items():
# Tensorboard only accepts [B, H, W, C] but we support [H, W] also.
if v.ndim == 2:
v = v[None, ..., None]
tf.summary.image(k, v, step=global_step)
self._writer.flush()
def create_writer(config: config_dict.ConfigDict, mode: str) -> Any:
"""Creates an object to be used as a writer."""
return TensorBoardLogger(config, mode)
@utils.debugger_fallback
def main(experiment_class, argv, checkpointer_factory=create_checkpointer):
"""Main potentially under a debugger."""
del argv # Unused.
# Make sure the required fields are available in the config.
config = _CONFIG.value
base_config.validate_config(config)
if _JAXLINE_TPU_DRIVER.value:
jax.config.update("jax_xla_backend", "tpu_driver")
jax.config.update("jax_backend_target", _JAXLINE_TPU_DRIVER.value)
logging.info("Backend: %s %r", _JAXLINE_TPU_DRIVER.value, jax.devices())
if _JAXLINE_ENSURE_TPU.value:
# JAX currently falls back to CPU if it cannot register the TPU platform.
# In multi-host setups this can happen if we timeout waiting for hosts to
# come back up at startup or after pre-emption. This test will crash the
# task if TPU devices are not available. We have increased the number of
# acceptable failures per-task to allow for this.
# TODO(tomhennigan) This test will eventually be part of JAX itself.
chex.assert_tpu_available()
jaxline_mode = _JAXLINE_MODE.value
if jaxline_mode == "train":
# Run training.
checkpointer = checkpointer_factory(config, jaxline_mode)
writer = create_writer(config, jaxline_mode)
train.train(experiment_class, config, checkpointer, writer)
elif jaxline_mode.startswith("eval"):
# Run evaluation.
checkpointer = checkpointer_factory(config, jaxline_mode)
writer = create_writer(config, jaxline_mode)
train.evaluate(experiment_class, config, checkpointer, writer,
jaxline_mode)
elif jaxline_mode == "train_eval_multithreaded":
pool = futures.ThreadPoolExecutor(1)
# Run training in a background thread!
pool.submit(train.train, experiment_class, config,
checkpointer_factory(config, "train"),
create_writer(config, "train"))
# Run eval!
train.evaluate(experiment_class, config,
checkpointer_factory(config, "eval"),
create_writer(config, "eval"))
# If we're here, eval has finished. Wait for train to finish!
pool.shutdown()
else:
raise ValueError(f"Mode {jaxline_mode} not recognized.")
| jaxline-master | jaxline/platform.py |
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base classes for training JAXline experiments.
Any class that implements this interface is compatible with
the JAXline Distributed Training system.
"""
import abc
import functools
import time
from typing import Dict, List, Mapping, Optional
from absl import logging
import jax
import jax.numpy as jnp
from jaxline import utils
from ml_collections import config_dict
import numpy as np
class AbstractExperiment(abc.ABC):
"""The base class for training JAXline experiments."""
# A dict mapping attributes of this class to a name they are stored under.
#
# Pmapped attributes should be included in CHECKPOINT_ATTRS and will be
# assumed to have a leading dimension corresponding to the pmapped axis when
# saving and restoring.
#
# Non-pmapped attributes should be included in NON_BROADCAST_CHECKPOINT_ATTRS
# and will be assumed to have no such leading dimension.
CHECKPOINT_ATTRS = {}
NON_BROADCAST_CHECKPOINT_ATTRS = {}
@abc.abstractmethod
def __init__(self, mode: str, init_rng: Optional[jax.Array] = None):
"""Constructs the experiment.
Args:
mode: A string, equivalent to FLAGS.jaxline_mode when running normally.
init_rng: A `PRNGKey` to use for experiment initialization.
"""
# TODO(b/205109371): Make init_rng non-optional.
def initialize_train_step_rng(self, rng: jnp.ndarray) -> jnp.ndarray:
"""Initializes train_step_rng on devices as a suitable JAX array.
Ensures the rng key is placed on the devices in the desired way, producing
a JAX array of required type (i.e. `ShardedDeviceArray` or
`GlobalDeviceArray`), partitioning and shape.
The default implementation broadcasts the key to all local devices, forming
a `ShardedDeviceArray` with a new leading axis. This behavior is suitable
for the pmap-based data-parallel training.
Args:
rng: a single `PRNGKey`.
Returns:
A JAX array representing a desired configuration of rng keys on devices,
ready to use in the training loop.
"""
return utils.bcast_local_devices(rng)
@abc.abstractmethod
def step(
self,
*,
global_step: jnp.ndarray,
rng: jnp.ndarray,
writer: Optional[utils.Writer],
) -> Dict[str, np.ndarray]:
"""Performs a step of computation e.g. a training step.
This function will be wrapped by `utils.kwargs_only` meaning that when
the user re-defines this function they can take only the arguments
they want e.g. def step(self, global_step, **unused_args).
Args:
global_step: A `ShardedDeviceArray` of the global step, one copy
for each local device. The values are guaranteed to be the same across
all local devices, it is just passed this way for consistency with
`rng`.
rng: A `ShardedDeviceArray` of `PRNGKey`s, one for each local device,
and unique to the global_step. The relationship between the keys is set
by config.random_mode_train.
writer: An optional writer for performing additional logging (note that
logging of the returned scalars is performed automatically by
jaxline/train.py)
Returns:
A dictionary of scalar `np.array`s to be logged.
"""
@abc.abstractmethod
def evaluate(
self,
*,
global_step: jnp.ndarray,
rng: jnp.ndarray,
writer: Optional[utils.Writer],
) -> Optional[Dict[str, np.ndarray]]:
"""Performs the full evaluation of the model.
This function will be wrapped by `utils.kwargs_only` meaning that when
the user re-defines this function they can take only the arguments
they want e.g. def evaluate(self, global_step, **unused_args).
Args:
global_step: A `ShardedDeviceArray` of the global step, one copy
for each local device.
rng: A `ShardedDeviceArray` of random keys, one for each local device,
and, unlike in the step function, *independent* of the global step (i.e.
the same array of keys is passed at every call to the function). The
relationship between the keys is set by config.random_mode_eval.
writer: An optional writer for performing additional logging (note that
logging of the returned scalars is performed automatically by
jaxline/train.py)
Returns:
A dictionary of scalar `np.array`s to be logged.
"""
def should_run_step(
self,
global_step: int,
config: config_dict.ConfigDict,
) -> bool:
"""Returns whether the step function will be run given the global_step."""
return global_step < config.training_steps
def train_loop(
self,
config: config_dict.ConfigDict,
state,
periodic_actions: List[utils.PeriodicAction],
writer: Optional[utils.Writer] = None,
) -> None:
"""Default training loop implementation.
Can be overridden for advanced use cases that need a different training loop
logic, e.g. on device training loop with jax.lax.while_loop or to add custom
periodic actions.
Args:
config: The config of the experiment that is being run.
state: Checkpointed state of the experiment.
periodic_actions: List of actions that should be called after every
training step, for checkpointing and logging.
writer: An optional writer to pass to the experiment step function.
"""
@functools.partial(jax.pmap, axis_name="i")
def next_device_state(
global_step: jnp.ndarray,
rng: jnp.ndarray,
host_id: Optional[jnp.ndarray],
):
"""Updates device global step and rng in one pmap fn to reduce overhead."""
global_step += 1
step_rng, state_rng = tuple(jax.random.split(rng))
step_rng = utils.specialize_rng_host_device(
step_rng, host_id, axis_name="i", mode=config.random_mode_train)
return global_step, (step_rng, state_rng)
global_step_devices = np.broadcast_to(state.global_step,
[jax.local_device_count()])
host_id_devices = utils.host_id_devices_for_rng(config.random_mode_train)
if host_id_devices is not None:
# Transfer to device to avoid host->device transfer on every step.
host_id_devices = jax.pmap(lambda x: x)(host_id_devices)
# Get step key for first step, do not update global_step_devices yet.
_, (step_key, state.train_step_rng) = next_device_state(
global_step_devices, state.train_step_rng, host_id_devices)
with utils.log_activity("training loop"):
while self.should_run_step(state.global_step, config):
with jax.profiler.StepTraceAnnotation(
"train", step_num=state.global_step):
scalar_outputs = self.step(
global_step=global_step_devices, rng=step_key, writer=writer)
t = time.time()
# Update state's (scalar) global step (for checkpointing).
# global_step_devices will be back in sync with this after the call
# to next_device_state below.
state.global_step += 1
global_step_devices, (step_key, state.train_step_rng) = (
next_device_state(global_step_devices,
state.train_step_rng,
host_id_devices))
for action in periodic_actions:
action(t, state.global_step, scalar_outputs) # pytype: disable=wrong-arg-types # jax-ndarray
def snapshot_state(self) -> Mapping[str, jnp.ndarray]:
"""Takes a frozen copy of the current experiment state for checkpointing.
Returns:
A mapping from experiment attributes to names to stored under in the
snapshot.
"""
snapshot_state = {}
if not self.CHECKPOINT_ATTRS and not self.NON_BROADCAST_CHECKPOINT_ATTRS:
logging.warning(
"Your experiment's self.CHECKPOINT_ATTRS and "
"self.NON_BROADCAST_CHECKPOINT_ATTRS are empty. Your job will not "
"checkpoint any state or parameters.")
for attr_name, chk_name in self.CHECKPOINT_ATTRS.items():
snapshot_state[chk_name] = utils.get_first(getattr(self, attr_name))
for attr_name, chk_name in self.NON_BROADCAST_CHECKPOINT_ATTRS.items():
snapshot_state[chk_name] = getattr(self, attr_name)
return snapshot_state
def restore_from_snapshot(
self,
snapshot_state: Mapping[str, jnp.ndarray],
) -> None:
"""Restores experiment state from a snapshot.
Args:
snapshot_state: A mapping from experiment attributes to names they are
stored under in the snapshot.
"""
def clear(attributes):
for attr_name in attributes:
if hasattr(self, attr_name):
delattr(self, attr_name)
def write(attributes, broadcast=False):
for attr_name, chk_name in attributes.items():
value = snapshot_state[chk_name]
if broadcast:
value = utils.bcast_local_devices(value)
setattr(self, attr_name, value)
# Explicitly clear existing attributes first, this (potentially) allows
# broadcast values to reuse previous allocations leading to reduced
# fragmentation of device memory.
clear(self.CHECKPOINT_ATTRS)
clear(self.NON_BROADCAST_CHECKPOINT_ATTRS)
write(self.CHECKPOINT_ATTRS, broadcast=True)
write(self.NON_BROADCAST_CHECKPOINT_ATTRS)
def on_new_best_model(self, best_state):
"""Hook to perform a custom logic when the best model is obtained.
This method will be run before each best model checkpoint save and can
implement any custom logic (checkpointing will still be done by jaxline).
It will only be run if jaxline is configured to track the best model,
i.e. if `config.best_model_eval_metric` is set.
Args:
best_state: Evaluator best state. Holds `best_eval_metric_value`. The state
can also be mutated to dump additional information from the evaluator.
"""
| jaxline-master | jaxline/experiment.py |
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for Jaxline experiments."""
import collections
from concurrent import futures
import contextlib
import copy
import enum
import functools
import pdb
import queue
import sys
import threading
from typing import Any, Callable, Dict, Generator, Iterable, Mapping, Optional, Sequence, TypeVar
from absl import flags
from absl import logging
import chex
import jax
import jax.numpy as jnp
from ml_collections import config_dict
from typing_extensions import Protocol
import wrapt
_JAXLINE_POST_MORTEM = flags.DEFINE_bool(
name="jaxline_post_mortem",
default=False,
help="Whether to enter into post-mortem after an exception. ",
)
_JAXLINE_DISABLE_PMAP_JIT = flags.DEFINE_bool(
name="jaxline_disable_pmap_jit",
default=False,
help=("Whether to disable all pmaps and jits, making it easier to inspect "
"and trace code in a debugger."),
)
def _get_function_name(function: Callable[..., Any]) -> str:
if isinstance(function, functools.partial):
return f"partial({function.func.__name__})"
return function.__name__
# TODO(jaslanides): Replace these with typing.NamedTuple.
SnapshotNT = collections.namedtuple("SnapshotNT", ["id", "pickle_nest"])
CheckpointNT = collections.namedtuple("CheckpointNT", ["active", "history"])
T = TypeVar("T")
F = TypeVar("F", bound=Callable)
class Writer(Protocol):
"""Interface for writers/loggers."""
def write_scalars(self, global_step: int, scalars: Mapping[str, Any]):
"""Writes a dictionary of scalars."""
class Checkpointer(Protocol):
"""An interface for checkpointer objects."""
def save(self, ckpt_series: str) -> None:
"""Saves the checkpoint."""
def restore(self, ckpt_series: str) -> None:
"""Restores the checkpoint."""
def get_experiment_state(self, ckpt_series: str):
"""Returns the experiment state for a given checkpoint series."""
def restore_path(self, ckpt_series: str) -> Optional[str]:
"""Returns the restore path for the checkpoint, or None."""
def can_be_restored(self, ckpt_series: str) -> bool:
"""Returns whether or not a given checkpoint series can be restored."""
def wait_for_checkpointing_to_finish(self) -> None:
"""Waits for any async checkpointing to complete."""
def py_prefetch(
iterable_function: Callable[[], Iterable[T]],
buffer_size: int = 5,
*,
thread_name: Optional[str] = None,
) -> Generator[T, None, None]:
"""Performs prefetching of elements from an iterable in a separate thread.
Args:
iterable_function: A python function that when called with no arguments
returns an iterable. This is used to build a fresh iterable for each
thread (crucial if working with tensorflow datasets because tf.graph
objects are thread local).
buffer_size (int): Number of elements to keep in the prefetch buffer.
thread_name (str): Optional name for the producer thread.
Yields:
Prefetched elements from the original iterable.
Raises:
ValueError if the buffer_size <= 1.
Any error thrown by the iterable_function. Note this is not raised inside
the producer, but after it finishes executing.
"""
if buffer_size <= 1:
raise ValueError("the buffer_size should be > 1")
buffer = queue.Queue(maxsize=(buffer_size - 1))
producer_error = []
end = object()
def producer():
"""Enques items from iterable on a given thread."""
try:
# Build a new iterable for each thread. This is crucial if working with
# tensorflow datasets because tf.graph objects are thread local.
iterable = iterable_function()
for item in iterable:
buffer.put(item)
except Exception as e: # pylint: disable=broad-except
logging.exception("Error in producer thread for %s",
_get_function_name(iterable_function))
producer_error.append(e)
finally:
buffer.put(end)
threading.Thread(target=producer, daemon=True, name=thread_name).start()
# Consumer.
while True:
value = buffer.get()
if value is end:
break
yield value
if producer_error:
raise producer_error[0]
# TODO(tomhennigan) Remove this alias.
tree_psum = jax.lax.psum
def double_buffer_on_gpu(ds):
if jax.default_backend() == "gpu":
# This keeps two batches per-device in memory at all times, allowing
# h2d transfers to overlap with execution (see b/173483287 for details).
return double_buffer(ds)
else:
return ds
def _device_put_sharded(sharded_tree, devices):
leaves, treedef = jax.tree_util.tree_flatten(sharded_tree)
n = leaves[0].shape[0]
return jax.device_put_sharded(
[jax.tree_util.tree_unflatten(
treedef, [l[i] for l in leaves]) for i in range(n)],
devices)
def double_buffer(ds: Iterable[T]) -> Generator[T, None, None]:
"""Keeps at least two batches on the accelerator.
The current GPU allocator design reuses previous allocations. For a training
loop this means batches will (typically) occupy the same region of memory as
the previous batch. An issue with this is that it means we cannot overlap a
host->device copy for the next batch until the previous step has finished and
the previous batch has been freed.
By double buffering we ensure that there are always two batches on the device.
This means that a given batch waits on the N-2'th step to finish and free,
meaning that it can allocate and copy the next batch to the accelerator in
parallel with the N-1'th step being executed.
Args:
ds: Iterable of batches of numpy arrays.
Yields:
Batches of sharded device arrays.
"""
batch = None
devices = jax.local_devices()
device_put_sharded = make_async("async_device_put_sharded")( # pylint: disable=no-value-for-parameter
_device_put_sharded)
for next_batch in ds:
assert next_batch is not None
next_batch = device_put_sharded(next_batch, devices) # pylint: disable=not-callable
if batch is not None:
yield batch.result()
batch = next_batch
if batch is not None:
yield batch.result()
def get_first(xs):
"""Gets values from the first device."""
return jax.tree_util.tree_map(lambda x: x[0], xs)
def bcast_local_devices(value):
"""Broadcasts an object to all local devices."""
devices = jax.local_devices()
return jax.tree_util.tree_map(
lambda v: jax.device_put_sharded(len(devices) * [v], devices), value)
def make_async(thread_name_prefix: str = ""):
"""Returns a decorator that runs any function it wraps in a background thread.
When called, the decorated function will immediately return a future
representing its result.
The function being decorated can be an instance method or normal function.
Consecutive calls to the decorated function are guaranteed to be in order
and non overlapping.
An error raised by the decorated function will be raised in the background
thread at call-time. Raising the error in the main thread is deferred until
the next call, so as to be non-blocking.
All subsequent calls to the decorated function after an error has been raised
will not run (regardless of whether the arguments have changed); instead
they will re-raise the original error in the main thread.
Args:
thread_name_prefix: Str prefix for the background thread, for easier
debugging.
Returns:
decorator that runs any function it wraps in a background thread, and
handles any errors raised.
"""
# We have a single thread pool per wrapped function to ensure that calls to
# the function are run in order (but in a background thread).
pool = futures.ThreadPoolExecutor(max_workers=1,
thread_name_prefix=thread_name_prefix)
errors = []
@wrapt.decorator
def decorator(wrapped, instance, args, kwargs):
"""Runs wrapped in a background thread so result is non-blocking.
Args:
wrapped: A function to wrap and execute in background thread.
Can be instance method or normal function.
instance: The object to which the wrapped function was bound when it was
called (None if wrapped is a normal function).
args: List of position arguments supplied when wrapped function
was called.
kwargs: Dict of keyword arguments supplied when the wrapped function was
called.
Returns:
A future representing the result of calling wrapped.
Raises:
Exception object caught in background thread, if call to wrapped fails.
Exception object with stacktrace in main thread, if the previous call to
wrapped failed.
"""
def trap_errors(*args, **kwargs):
"""Wraps wrapped to trap any errors thrown."""
if errors:
# Do not execute wrapped if previous call errored.
return
try:
return wrapped(*args, **kwargs)
except Exception as e:
errors.append(sys.exc_info())
logging.exception("Error in producer thread for %s",
thread_name_prefix)
raise e
if errors:
# Previous call had an error, re-raise in main thread.
exc_info = errors[-1]
raise exc_info[1].with_traceback(exc_info[2])
del instance
return pool.submit(trap_errors, *args, **kwargs)
return decorator
def kwargs_only(f):
@functools.wraps(f)
def wrapped(**kwargs):
return f(**kwargs)
return wrapped
@contextlib.contextmanager
def log_activity(activity_name: str):
logging.info("[jaxline] %s starting...", activity_name)
try:
yield
finally:
if sys.exc_info()[0] is not None:
logging.exception("[jaxline] %s failed with error.", activity_name)
else:
logging.info("[jaxline] %s finished.", activity_name)
class DistributedRNGMode(enum.Enum):
"""Enumeration of the allowed modes for distributed rng handling."""
UNIQUE_HOST_UNIQUE_DEVICE = "unique_host_unique_device"
UNIQUE_HOST_SAME_DEVICE = "unique_host_same_device"
SAME_HOST_UNIQUE_DEVICE = "same_host_unique_device"
SAME_HOST_SAME_DEVICE = "same_host_same_device"
@property
def unique_host(self):
return self in {DistributedRNGMode.UNIQUE_HOST_UNIQUE_DEVICE,
DistributedRNGMode.UNIQUE_HOST_SAME_DEVICE}
@property
def unique_device(self):
return self in {DistributedRNGMode.UNIQUE_HOST_UNIQUE_DEVICE,
DistributedRNGMode.SAME_HOST_UNIQUE_DEVICE}
def host_id_devices_for_rng(mode="unique_host_unique_device"):
if not DistributedRNGMode(mode).unique_host:
return None
return jnp.broadcast_to(jax.process_index(), (jax.local_device_count(),))
def specialize_rng_host_device(
rng: jax.Array,
host_id: Optional[int],
axis_name: str,
mode: str = "unique_host_unique_device",
) -> jax.Array:
"""Specializes a rng to the host/device we are on.
Must be called from within a pmapped function.
Args:
rng: a jax.random.PRNGKey.
host_id: the host ID to fold in, or None. Must be specified (not None) for
the "unique_host_*" modes.
axis_name: the axis of the devices we are specializing across.
mode: str mode. Must be one of "unique_host_unique_device",
"unique_host_same_device", "same_host_unique_device",
"same_host_same_device".
Returns:
jax.random.PRNGKey specialized to host/device.
"""
# Will throw an error if mode is not a valid enumeration.
enum_mode = DistributedRNGMode(mode)
if enum_mode.unique_host:
# Note that we intentionally do NOT call `jax.process_index()` here, taking
# it as an input instead. This is because we don't want to (effectively) use
# a hard-coded Python int inside a potentially `pmap`ped context as that
# results in different executable fingerprints across hosts.
if host_id is None:
raise ValueError(f"host_id must be given in RNG mode: {enum_mode}")
rng = jax.random.fold_in(rng, host_id)
if enum_mode.unique_device:
rng = jax.random.fold_in(rng, jax.lax.axis_index(axis_name))
return rng
def rendezvous() -> None:
"""Forces all hosts to check in."""
with log_activity("rendezvous"):
x = jnp.ones([jax.local_device_count()])
x = jax.device_get(jax.pmap(lambda x: jax.lax.psum(x, "i"), "i")(x))
if x[0] != jax.device_count():
raise ValueError(f"Expected {jax.device_count()} got {x}")
class PeriodicAction:
"""An action that executes periodically (e.g. logging)."""
def __init__(
self,
fn: Callable[[int, Dict[str, float]], None],
interval_type: str,
interval: float,
start_time: float = 0.0,
start_step: int = 0,
run_async: bool = True,
log_all_data: bool = False,
logging_growth_ratios: Optional[Sequence[float]] = None,
end_step_to_action: Optional[int] = None,
):
"""Initializes attributes for periodic action.
Args:
fn: Function representing the action to be run periodically. Takes global
step and scalars returned by `Experiment.step` as arguments.
interval_type: "secs" or "steps".
interval: Interval between function calls.
start_time: The start epoch time as a float to calculate time intervals
with respect to.
start_step: The start step number to calculate step intervals with respect
to.
run_async: boolean whether to run this perodic action in a background
thread.
log_all_data: boolean whether to accumulate scalar_outputs at each step.
logging_growth_ratios: optional sequence of ratios of steps that will
get logged in terms of powers of ten. For example, pass [1, 2, 5, 10]
then steps [1, 2, 5, 10, 20, 50, ...] are logged in *addition* to the
standard intervals. Only used for interval_type='steps'. This is useful
to allow for more logging early in training, and less later on.
end_step_to_action: If not None, then it is the final step of training, on
which we will call the action regardless.
"""
if interval_type not in ["secs", "steps"]:
raise ValueError(f"Unrecognized interval type {interval_type}.")
self._fn = fn
self._interval_type = interval_type
self._interval = interval
self._init_interval = interval
self._prev_time = start_time
self._prev_step = start_step
self._end_step_to_action = end_step_to_action
self._apply_fn_future = None
if run_async:
self._apply_fn = make_async(self._fn.__name__)(self._apply_fn) # pylint: disable=no-value-for-parameter
self.log_all_data = log_all_data
self.log = {}
self._logging_growth_ratios = logging_growth_ratios
# This is only supported when interval_type='steps'
if self._interval_type != "steps":
assert self._logging_growth_ratios is None
def _apply_fn(self, step, steps_per_sec, scalar_outputs):
"""Runs periodic action, optionally dumping all intermediate logged data."""
# Add data for this step to the log.
self.log[step] = scalar_outputs
# Note device_get copies from device <> host so is expensive.
# However, JAX's DeviceArray has a cache which be reused for all
# subsequent PeriodicActions that make the same call. Also, in async mode
# this function runs in a background thread.
log = jax.device_get(self.log)
# Reset self.log here to prevent async weirdness
self.log = {}
# Steps per sec must be added after device-get
log[step]["steps_per_sec"] = steps_per_sec
for logged_step, logged_scalars in log.items():
self._fn(logged_step, logged_scalars)
def _apply_condition(self, t: float, step: int):
"""Checks to see if we should perform the periodic action."""
if self._end_step_to_action and step == self._end_step_to_action:
return True
if self._interval_type == "secs":
return t - self._prev_time >= self._interval
else:
assert self._interval_type == "steps" # error should've be caught in init
# Check for logging intervals that come at growth ratios
growth_log = False
if self._logging_growth_ratios:
exponent = jnp.floor(jnp.log10(jnp.maximum(1, step)))
valid_ratios = jnp.round(jnp.array(
[ratio * 10**exponent for ratio in self._logging_growth_ratios]))
growth_log = any(jnp.isclose(step, valid_ratios))
# Log if a growth logging step *or* divisible by interval
interval_log = step % self._interval == 0
return growth_log or interval_log
def update_time(self, t: float, step: int):
"""Updates the internal time measurements."""
self._prev_time = t
self._prev_step = step
def wait_to_finish(self):
"""Waits for any periodic actions running in own threads to complete."""
if not (self._apply_fn_future is None or self._apply_fn_future.done()):
logging.info("Waiting for a periodic action to finish...")
self._apply_fn_future.result()
def __call__(
self,
t: float,
step: int,
scalar_outputs: Dict[str, jnp.ndarray],
) -> None:
"""Calls periodic action if interval since last call sufficiently large.
Args:
t: The current epoch time as a float.
step: The current step number.
scalar_outputs: Scalars to be processed by the periodic action.
"""
if self._apply_condition(t, step):
steps_per_sec = (step - self._prev_step) / (t - self._prev_time)
self._apply_fn_future = self._apply_fn( # pylint: disable=not-callable
step, steps_per_sec, scalar_outputs)
self.update_time(t, step)
elif self.log_all_data:
# Log data for dumping at next interval.
self.log[step] = scalar_outputs
def debugger_fallback(f: F) -> F:
"""Maybe wraps f with a pdb-callback."""
@functools.wraps(f)
def inner_wrapper(*args, **kwargs):
"""Main entry function."""
try:
return f(*args, **kwargs)
# KeyboardInterrupt and SystemExit are not derived from BaseException,
# hence not caught by the post-mortem.
except Exception as e: # pylint: disable=broad-except
if _JAXLINE_POST_MORTEM.value:
pdb.post_mortem(e.__traceback__)
raise
return inner_wrapper
# TODO(b/205109371): Remove support for `evaluate` that doesn't return a dict.
def evaluate_should_return_dict(f: F) -> F:
"""Prints a deprecation warning for old-usage of evaluate.
The `evaluate` method on an experiment should
return a dictionary of scalars to be logged, just like the step method.
`evaluate` is currently allowed to return nothing (the
older behavior). Soon, returning nothing will be an error.
Please update old code. If you do not wish Jaxline to log anything for you,
return an empty dictionary. Otherwise a dictionary of scalars may be returned
like `step`.
Args:
f: The evaluate method.
Returns:
The evaluate function wrapped with a deprecation warning.
"""
none_return_is_deprecated_msg = (
"Your experiment\'s evaluate function returned no output, this is "
"deprecated behavior. `evaluate` should now return a dictionary of "
"scalars to log, just like `step`. Please update your code. "
"We will soon update this code and returning None will error.")
@functools.wraps(f)
def evaluate_with_warning(*args, **kwargs):
evaluate_out = f(*args, **kwargs)
if evaluate_out is None:
logging.log_first_n(logging.WARNING, none_return_is_deprecated_msg, 1)
return {}
return evaluate_out
return evaluate_with_warning
# We use a global dictionary so that multiple different checkpoints can share
# underlying data.
GLOBAL_CHECKPOINT_DICT = {}
class InMemoryCheckpointer:
"""A Checkpointer reliant on an in-memory global dictionary."""
def __init__(self, config, mode: str):
self._max_checkpoints_to_keep = config.max_checkpoints_to_keep
del mode
def _override_or_insert(self, current_state, snapshot):
"""Update the current state based on a snapshot."""
for sk, sv in snapshot.items():
# Duck-typing for "is this a Jaxline Experiment class?".
if (sk in current_state
and hasattr(current_state[sk], "CHECKPOINT_ATTRS")
and hasattr(current_state[sk], "NON_BROADCAST_CHECKPOINT_ATTRS")):
for kk in sv.CHECKPOINT_ATTRS:
setattr(current_state[sk], kk, getattr(sv, kk))
for kk in sv.NON_BROADCAST_CHECKPOINT_ATTRS:
setattr(
current_state[sk], kk,
jax.tree_util.tree_map(copy.copy, getattr(sv, kk)))
else:
current_state[sk] = sv
def get_experiment_state(self, ckpt_series: str):
"""Returns the experiment state for a given checkpoint series."""
if ckpt_series not in GLOBAL_CHECKPOINT_DICT:
active = threading.local()
new_series = CheckpointNT(active, [])
GLOBAL_CHECKPOINT_DICT[ckpt_series] = new_series
if not hasattr(GLOBAL_CHECKPOINT_DICT[ckpt_series].active, "state"):
GLOBAL_CHECKPOINT_DICT[ckpt_series].active.state = (
config_dict.ConfigDict())
return GLOBAL_CHECKPOINT_DICT[ckpt_series].active.state
def save(self, ckpt_series: str) -> None:
"""Saves the checkpoint."""
series = GLOBAL_CHECKPOINT_DICT[ckpt_series]
active_state = self.get_experiment_state(ckpt_series)
id_ = 0 if not series.history else series.history[-1].id + 1
snapshot = copy.copy(active_state)
series.history.append(SnapshotNT(id_, snapshot))
if len(series.history) > self._max_checkpoints_to_keep:
GLOBAL_CHECKPOINT_DICT[ckpt_series] = series._replace(
history=series.history[-self._max_checkpoints_to_keep:])
logging.info("Saved checkpoint %s with id %s.", ckpt_series, id_)
def can_be_restored(self, ckpt_series: str) -> bool:
"""Returns whether or not a given checkpoint series can be restored."""
return ((ckpt_series in GLOBAL_CHECKPOINT_DICT) and
GLOBAL_CHECKPOINT_DICT[ckpt_series].history)
def restore(self, ckpt_series: str) -> None:
"""Restores the checkpoint."""
snapshot = GLOBAL_CHECKPOINT_DICT[ckpt_series].history[-1].pickle_nest
current_state = self.get_experiment_state(ckpt_series)
self._override_or_insert(current_state, snapshot)
logging.info("Returned checkpoint %s with id %s.", ckpt_series,
GLOBAL_CHECKPOINT_DICT[ckpt_series].history[-1].id)
def restore_path(self, ckpt_series: str) -> Optional[str]:
"""Returns the restore path for the checkpoint, or None."""
if not self.can_be_restored(ckpt_series):
return None
return GLOBAL_CHECKPOINT_DICT[ckpt_series].history[-1].id
def wait_for_checkpointing_to_finish(self) -> None:
"""Waits for any async checkpointing to complete."""
def disable_pmap_jit(fn: F) -> F:
"""Disables pmaps/jits inside a function if `--jaxline_disable_pmap_jit=True`.
Args:
fn: function to be wrapped, with arbitrary call-signature and return type.
Returns:
A function that when called, calls fn within a chex context that strips out
all pmaps and jits if `--jaxline_disable_pmap_jit=True`, and otherwise calls
fn unmodified.
"""
@functools.wraps(fn)
def inner_wrapper(*args, **kwargs):
if _JAXLINE_DISABLE_PMAP_JIT.value:
with chex.fake_pmap_and_jit():
return fn(*args, **kwargs)
else:
return fn(*args, **kwargs)
return inner_wrapper
| jaxline-master | jaxline/utils.py |
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training script to run Jaxline experiments.
Your experiment file must implement the API in experiment.py to be
compatible with this pipeline.
"""
import functools
import inspect
import time
from typing import Optional
from absl import flags
from absl import logging
import chex
import jax
import jax.numpy as jnp
from jaxline import utils
FLAGS = flags.FLAGS
def _log_outputs(step, scalar_values):
# f_list for less verbosity; e.g., "4." instead of "array(4., dtype=float32)".
array_types = (chex.Array, chex.ArrayNumpy)
f_list = (lambda x: x.tolist() if isinstance(x, array_types) else x)
logging.info("global_step: %d, %s", step,
jax.tree_util.tree_map(f_list, scalar_values))
def _initialize_experiment(experiment_class, mode, rng, experiment_kwargs):
"""Initializes experiment catching old style init methods."""
init_args = inspect.signature(experiment_class).parameters
if "init_rng" in init_args:
experiment = experiment_class(
mode, init_rng=rng, **experiment_kwargs)
else:
# TODO(b/205109371): Make init_rng non-optional.
logging.warning(
"You should add init_rng to your Experiment"
" constructor, which we will use to pass you"
" jaxline.base_config.random_seed. Please deprecate any use of"
" experiment_kwargs.config.random_seed: model initialization should"
" be performed with init_rng and any sweeps directly with"
" jaxline.base_config.random_seed. The use of"
" experiment_kwargs.config.random_seed was flawed design introduced"
" by some of our JAXline examples and meant that the rng used for"
" initialization (and sweeps) was decoupled from that used by the"
" step function. This will soon become unsupported behaviour.")
experiment = experiment_class(mode, **experiment_kwargs)
return experiment
@utils.disable_pmap_jit
def train(
experiment_class,
config,
checkpointer: utils.Checkpointer,
writer: Optional[utils.Writer],
periodic_actions=(),
):
"""Main training loop."""
logging.info("Training with config:\n%s", config)
is_chief = jax.process_index() == 0
is_checkpointer = config.train_checkpoint_all_hosts or is_chief
rng = jax.random.PRNGKey(config.random_seed)
with utils.log_activity("experiment init"):
experiment = _initialize_experiment(
experiment_class, "train", rng, config.experiment_kwargs)
state = checkpointer.get_experiment_state("latest")
state.global_step = 0
state.experiment_module = experiment
state.train_step_rng = experiment.initialize_train_step_rng(rng)
if checkpointer.can_be_restored("latest"):
with utils.log_activity("checkpoint restore"):
checkpointer.restore("latest")
periodic_actions += (
utils.PeriodicAction(
_log_outputs,
interval_type=config.logging_interval_type or config.interval_type,
interval=config.log_tensors_interval,
logging_growth_ratios=config.periodic_action_growth_ratios,
run_async=config.log_async),
)
if is_checkpointer:
if config.save_checkpoint_interval > 0:
periodic_actions += (
utils.PeriodicAction(
lambda *_: checkpointer.save("latest"),
interval_type=(config.checkpoint_interval_type
or config.interval_type),
interval=config.save_checkpoint_interval,
logging_growth_ratios=config.periodic_action_growth_ratios,
run_async=False),) # run_async True would not be thread-safe.
if is_chief or config.log_all_hosts:
if writer is not None:
def write_scalars(global_step: int, scalar_values):
writer.write_scalars(global_step, scalar_values)
periodic_actions += (utils.PeriodicAction(
write_scalars,
interval_type=(config.logging_interval_type or config.interval_type),
interval=config.log_train_data_interval,
logging_growth_ratios=config.periodic_action_growth_ratios,
log_all_data=config.log_all_train_data,
end_step_to_action=config.training_steps),)
for pa in periodic_actions:
pa.update_time(time.time(), state.global_step)
if (is_checkpointer and config.save_initial_train_checkpoint and
not checkpointer.can_be_restored("latest")):
with utils.log_activity("first checkpoint"):
checkpointer.save("latest")
experiment.train_loop(config, state, periodic_actions, writer)
if is_checkpointer:
with utils.log_activity("final checkpoint"):
checkpointer.save("latest")
checkpointer.wait_for_checkpointing_to_finish()
# Join all async periodic actions that are unfinished.
for pa in periodic_actions:
pa.wait_to_finish()
# We occasionally see errors when the final checkpoint is being written if
# the other hosts exit. Here we force all hosts to participate in one final
# collective so the non-master hosts cannot exit before the master writes out
# the final checkpoint.
utils.rendezvous()
@utils.disable_pmap_jit
def evaluate(
experiment_class,
config,
checkpointer: utils.Checkpointer,
writer: Optional[utils.Writer],
jaxline_mode: Optional[str] = None,
):
"""Main evaluation loop."""
if jaxline_mode is None:
jaxline_mode = FLAGS.jaxline_mode
logging.info("Evaluating with config:\n%s", config)
global_step = 0
eval_rng = jax.random.PRNGKey(config.random_seed)
experiment = _initialize_experiment(
experiment_class, jaxline_mode, eval_rng, config.experiment_kwargs)
should_save_best_checkpoint = config.best_model_eval_metric and (
config.best_checkpoint_all_hosts or jax.process_index() == 0)
if should_save_best_checkpoint:
# Initialize best state.
best_state = checkpointer.get_experiment_state("best")
if config.best_model_eval_metric_higher_is_better:
best_state.best_eval_metric_value = float("-inf")
eval_metric_is_better_op = jnp.greater
eval_metric_comparison_str = ">"
else:
best_state.best_eval_metric_value = float("inf")
eval_metric_is_better_op = jnp.less
eval_metric_comparison_str = "<"
best_state.best_model_eval_metric = config.best_model_eval_metric
best_state.experiment_module = experiment
# Restore to preserve 'best_eval_metric_value' if evaluator was preempted.
if checkpointer.can_be_restored("best"):
with utils.log_activity("best checkpoint restore"):
checkpointer.restore("best")
# Will evaluate the latest checkpoint in the directory.
state = checkpointer.get_experiment_state("latest")
state.global_step = global_step
state.experiment_module = experiment
state.train_step_rng = None
eval_rng = jnp.broadcast_to(
eval_rng, (jax.local_device_count(),) + eval_rng.shape)
host_id_devices = utils.host_id_devices_for_rng(config.random_mode_eval)
eval_rng = jax.pmap(functools.partial(
utils.specialize_rng_host_device, axis_name="i",
mode=config.random_mode_eval), axis_name="i")(eval_rng, host_id_devices)
old_checkpoint_path = None
initial_weights_are_evaluated = False
while True:
checkpoint_path = checkpointer.restore_path("latest")
if config.one_off_evaluate:
if checkpointer.can_be_restored("latest"):
with utils.log_activity("one off evaluate checkpoint restore"):
checkpointer.restore("latest")
elif config.eval_initial_weights:
logging.info("Evaluating initial weights for one_off_evaluate.")
else:
raise ValueError(
"Checkpoint invalid and eval_initial_weights set to False")
elif (checkpoint_path is None and config.eval_initial_weights
and not initial_weights_are_evaluated):
# Skip restoring a checkpoint and directly call evaluate if
# `config.eval_initial_weights` but don"t do it more than once.
initial_weights_are_evaluated = True
else:
if (checkpoint_path in (None, old_checkpoint_path) or
not checkpointer.can_be_restored("latest")):
logging.info("Checkpoint %s invalid or already evaluated, waiting 10s.",
checkpoint_path)
time.sleep(10)
continue
checkpointer.restore("latest")
global_step_devices = utils.bcast_local_devices(
jnp.asarray(state.global_step))
scalar_values = utils.evaluate_should_return_dict(experiment.evaluate)(
global_step=global_step_devices, rng=eval_rng, writer=writer)
if writer is not None:
writer.write_scalars(state.global_step, scalar_values)
old_checkpoint_path = checkpoint_path
# Decide whether to save a "best checkpoint".
if should_save_best_checkpoint:
if config.best_model_eval_metric not in scalar_values:
raise ValueError(f"config.best_model_eval_metric has been specified "
f"as {config.best_model_eval_metric}, but this key "
f"was not returned by the evaluate method. Got: "
f"{scalar_values.keys()}")
current_eval_metric_value = scalar_values[config.best_model_eval_metric]
old_eval_metric_value = best_state.best_eval_metric_value
if eval_metric_is_better_op(current_eval_metric_value,
old_eval_metric_value):
logging.info("%s: %s %s %s, saving new best checkpoint.",
config.best_model_eval_metric, current_eval_metric_value,
eval_metric_comparison_str, old_eval_metric_value)
best_state.global_step = state.global_step
best_state.experiment_module = experiment
best_state.best_eval_metric_value = current_eval_metric_value
best_state.train_step_rng = state.train_step_rng
# Optional best model processing defined by the experiment.
experiment.on_new_best_model(best_state)
checkpointer.save("best")
if config.one_off_evaluate or not experiment.should_run_step(
state.global_step, config):
logging.info("Last checkpoint (iteration %d) evaluated, exiting.",
state.global_step)
break
| jaxline-master | jaxline/train.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Setuptools installation script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import find_packages
from setuptools import setup
description = """Graph Nets is DeepMind's library for building graph networks in
Tensorflow and Sonnet.
"""
setup(
name="graph_nets",
version="1.1.1.dev",
description="Library for building graph networks in Tensorflow and Sonnet.",
long_description=description,
author="DeepMind",
license="Apache License, Version 2.0",
keywords=["graph networks", "tensorflow", "sonnet", "machine learning"],
url="https://github.com/deepmind/graph-nets",
packages=find_packages(),
# Additional "tensorflow" and "tensorflow_probability" requirements should
# be installed separately (See README).
install_requires=[
"absl-py",
"dm-sonnet",
"dm-tree",
"future",
"networkx",
"numpy",
"setuptools",
"six",
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS :: MacOS X",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
| graph_nets-master | setup.py |
# Lint as: python2, python3
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base classes for modules, defined depending on the Sonnet version.
Strategy to be compatible with both Sonnet 1 and Sonnet 2 works as follows:
- Dynamically decide which version we are using.
- Create an adapter base class, with a unified API, that would allow child
classes to implement interfaces similar to Sonnet 1.
- All GraphNet modules Networks inherit for that same base class, and work with
either Sonnet 1 or Sonnet 2, depending on how the library is configured.
We do not recommmend users to inherit from this main class, as we only adapt the
functionality for the GraphNets use cases.
We also define a `WrappedModelFnModule`. This is similar to `sonnet.v1.Module`,
except that is receives a callable that returns the build method, rather than
receiving the build method directly. We need this because:
- There is no analogous to `sonnet.v1.Module` in Sonnet 2.
- `sonnet.v1.Module` relies on `get_variable` to return always the same
variables in subsequent calls to the Sonnet module. This means that passing
a single build method that builds submodules inside of it, yields the right
variable sharing when called multiple times, thanks to custom variable
getters. This mechanism does not work in Sonnet 2, and it would lead to
separate varaibles/submodules being isntantiated every time the module is
connected. This is why our `WrappedModelFnModule` instead, takes a callable
that can be called in the `__init__` similarly to how `*_model_fn` arguments
work in `blocks.py` and `modules.py`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import six
import sonnet as snt
_sonnet_version = snt.__version__
_sonnet_major_version = int(_sonnet_version.split(".")[0])
if _sonnet_major_version == 1:
AbstractModule = snt.AbstractModule
elif _sonnet_major_version == 2:
@six.add_metaclass(abc.ABCMeta)
class AbstractModule(snt.Module):
"""Makes Sonnet1-style childs from this look like a Sonnet2 module."""
def __init__(self, *args, **kwargs):
super(AbstractModule, self).__init__(*args, **kwargs)
self.__call__.__func__.__doc__ = self._build.__doc__ # pytype: disable=attribute-error
# In snt2 calls to `_enter_variable_scope` are ignored.
@contextlib.contextmanager
def _enter_variable_scope(self, *args, **kwargs):
yield None
def __call__(self, *args, **kwargs):
return self._build(*args, **kwargs)
@abc.abstractmethod
def _build(self, *args, **kwargs):
"""Similar to Sonnet 1 ._build method."""
else:
raise RuntimeError(
"Unexpected sonnet major version %d" % (_sonnet_major_version))
class WrappedModelFnModule(AbstractModule):
"""Wraps a model_fn as a Sonnet module with a name.
Following `blocks.py` convention, a `model_fn` is a callable that, when called
with no arguments, returns a callable similar to a Sonnet module instance.
"""
def __init__(self, model_fn, name):
"""Inits the module.
Args:
model_fn: callable that, when called with no arguments, returns a callable
similar to a Sonnet module instance.
name: Name for the wrapper module.
"""
super(WrappedModelFnModule, self).__init__(name=name)
with self._enter_variable_scope():
self._model = model_fn()
def _build(self, *args, **kwargs):
return self._model(*args, **kwargs)
| graph_nets-master | graph_nets/_base.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Auxiliary methods that operate on graph structured data.
This modules contains functions to convert between python data structures
representing graphs and `graphs.GraphsTuple` containing numpy arrays.
In particular:
- `networkx_to_data_dict` and `data_dict_to_networkx` convert from/to an
instance of `networkx.OrderedMultiDiGraph` from/to a data dictionary;
- `networkxs_to_graphs_tuple` and `graphs_tuple_to_networkxs` convert
from instances of `networkx.OrderedMultiDiGraph` to `graphs.GraphsTuple`;
- `data_dicts_to_graphs_tuple` and `graphs_tuple_to_data_dicts` convert to and
from lists of data dictionaries and `graphs.GraphsTuple`;
- `get_graph` allows to index or slice a `graphs.GraphsTuple` to extract a
subgraph or a subbatch of graphs.
The functions in these modules are able to deal with graphs containing `None`
fields (e.g. featureless nodes, featureless edges, or no edges).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from graph_nets import graphs
import networkx as nx
import numpy as np
from six.moves import range
from six.moves import zip # pylint: disable=redefined-builtin
NODES = graphs.NODES
EDGES = graphs.EDGES
GLOBALS = graphs.GLOBALS
RECEIVERS = graphs.RECEIVERS
SENDERS = graphs.SENDERS
GLOBALS = graphs.GLOBALS
N_NODE = graphs.N_NODE
N_EDGE = graphs.N_EDGE
GRAPH_DATA_FIELDS = graphs.GRAPH_DATA_FIELDS
GRAPH_NUMBER_FIELDS = graphs.GRAPH_NUMBER_FIELDS
ALL_FIELDS = graphs.ALL_FIELDS
GRAPH_NX_FEATURES_KEY = "features"
def _check_valid_keys(keys):
if any([x in keys for x in [EDGES, RECEIVERS, SENDERS]]):
if not (RECEIVERS in keys and SENDERS in keys):
raise ValueError("If edges are present, senders and receivers should "
"both be defined.")
def _defined_keys(dict_):
return {k for k, v in dict_.items() if v is not None}
def _check_valid_sets_of_keys(dicts):
"""Checks that all dictionaries have exactly the same valid key sets."""
prev_keys = None
for dict_ in dicts:
current_keys = _defined_keys(dict_)
_check_valid_keys(current_keys)
if prev_keys and current_keys != prev_keys:
raise ValueError(
"Different set of keys found when iterating over data dictionaries "
"({} vs {})".format(prev_keys, current_keys))
prev_keys = current_keys
def _compute_stacked_offsets(sizes, repeats):
"""Computes offsets to add to indices of stacked np arrays.
When a set of np arrays are stacked, the indices of those from the second on
must be offset in order to be able to index into the stacked np array. This
computes those offsets.
Args:
sizes: A 1D sequence of np arrays of the sizes per graph.
repeats: A 1D sequence of np arrays of the number of repeats per graph.
Returns:
The index offset per graph.
"""
return np.repeat(np.cumsum(np.hstack([0, sizes[:-1]])), repeats)
def _check_key(node_index, key):
if node_index != key:
raise ValueError(
"Nodes of the networkx.OrderedMultiDiGraph must have sequential "
"integer keys consistent with the order of the nodes (e.g. "
"`list(graph_nx.nodes)[i] == i`), found node with index {} and key {}"
.format(node_index, key))
return True
def networkx_to_data_dict(graph_nx,
node_shape_hint=None,
edge_shape_hint=None,
data_type_hint=np.float32):
"""Returns a data dict of Numpy data from a networkx graph.
The networkx graph should be set up such that, for fixed shapes `node_shape`,
`edge_shape` and `global_shape`:
- `graph_nx.nodes(data=True)[i][-1]["features"]` is, for any node index i, a
tensor of shape `node_shape`, or `None`;
- `graph_nx.edges(data=True)[i][-1]["features"]` is, for any edge index i, a
tensor of shape `edge_shape`, or `None`;
- `graph_nx.edges(data=True)[i][-1]["index"]`, if present, defines the order
in which the edges will be sorted in the resulting `data_dict`;
- `graph_nx.graph["features"] is a tensor of shape `global_shape`, or
`None`.
The dictionary `type_hints` can provide hints of the "float" and "int" types
for missing values.
The output data is a sequence of data dicts with fields:
NODES, EDGES, RECEIVERS, SENDERS, GLOBALS, N_NODE, N_EDGE.
Args:
graph_nx: A `networkx.OrderedMultiDiGraph`. The node keys must be sequential
integer values following the order in which nodes are added to the graph
starting from zero. That is `list(graph_nx.nodes)[i] == i`.
node_shape_hint: (iterable of `int` or `None`, default=`None`) If the graph
does not contain nodes, the trailing shape for the created `NODES` field.
If `None` (the default), this field is left `None`. This is not used if
`graph_nx` contains at least one node.
edge_shape_hint: (iterable of `int` or `None`, default=`None`) If the graph
does not contain edges, the trailing shape for the created `EDGES` field.
If `None` (the default), this field is left `None`. This is not used if
`graph_nx` contains at least one edge.
data_type_hint: (numpy dtype, default=`np.float32`) If the `NODES` or
`EDGES` fields are autocompleted, their type.
Returns:
The data `dict` of Numpy data.
Raises:
TypeError: If `graph_nx` is not an instance of networkx.
KeyError: If `graph_nx` contains at least one node without the "features"
key in its attribute dictionary, or at least one edge without the
"features" key in its attribute dictionary.
ValueError: If `graph_nx` contains at least one node with a `None`
"features" attribute and one least one node with a non-`None` "features"
attribute; or if `graph_nx` contains at least one edge with a `None`
"features" attribute and one least one edge with a non-`None` "features"
attribute.
ValueError: If the nodes have keys that are not consistent with the order
of the nodes.
"""
nodes = None
try:
number_of_nodes = graph_nx.number_of_nodes()
except ValueError as e:
raise TypeError("Argument `graph_nx` of wrong type {}".format(
type(graph_nx))) from e
if number_of_nodes == 0:
if node_shape_hint is not None:
nodes = np.zeros([0] + list(node_shape_hint), dtype=data_type_hint)
else:
try:
nodes_data = [
data[GRAPH_NX_FEATURES_KEY]
for node_i, (key, data) in enumerate(graph_nx.nodes(data=True))
if _check_key(node_i, key) and data[GRAPH_NX_FEATURES_KEY] is not None
]
if nodes_data:
if len(nodes_data) != number_of_nodes:
raise ValueError(
"Either all the nodes should have features, or none of them")
nodes = np.array(nodes_data)
except KeyError as e:
raise KeyError("Missing 'features' field from the graph nodes. "
"This could be due to the node having been silently added "
"as a consequence of an edge addition when creating the "
"networkx instance") from e
edges = None
number_of_edges = graph_nx.number_of_edges()
if number_of_edges == 0:
senders = np.zeros(0, dtype=np.int32)
receivers = np.zeros(0, dtype=np.int32)
if edge_shape_hint is not None:
edges = np.zeros([0] + list(edge_shape_hint), dtype=data_type_hint)
else:
if "index" in list(graph_nx.edges(data=True))[0][2]:
senders, receivers, edge_attr_dicts = zip(
*sorted(graph_nx.edges(data=True), key=lambda x: x[2]["index"]))
else:
senders, receivers, edge_attr_dicts = zip(*graph_nx.edges(data=True))
senders = np.array(senders, dtype=np.int32)
receivers = np.array(receivers, dtype=np.int32)
edges_data = [
x[GRAPH_NX_FEATURES_KEY]
for x in edge_attr_dicts
if x[GRAPH_NX_FEATURES_KEY] is not None
]
if edges_data:
if len(edges_data) != number_of_edges:
raise ValueError(
"Either all the edges should have features, or none of them")
edges = np.array(edges_data)
globals_ = None
if GRAPH_NX_FEATURES_KEY in graph_nx.graph:
globals_ = graph_nx.graph[GRAPH_NX_FEATURES_KEY]
return {
NODES: nodes,
EDGES: edges,
RECEIVERS: receivers,
SENDERS: senders,
GLOBALS: globals_,
N_NODE: number_of_nodes,
N_EDGE: number_of_edges,
}
def _unstack(array):
"""Similar to `tf.unstack`."""
num_splits = int(array.shape[0])
return [np.squeeze(x, 0) for x in np.split(array, num_splits, axis=0)]
def data_dict_to_networkx(data_dict):
"""Returns a networkx graph that contains the stored data.
Depending on the content of `data_dict`, the returned `networkx` instance has
the following properties:
- The nodes feature are placed in the nodes attribute dictionary under the
"features" key. If the `NODES` fields is `None`, a `None` value is placed
here;
- If the `RECEIVERS` field is `None`, no edges are added to the graph.
Otherwise, edges are added with the order in which they appeared in
`data_dict` stored in the "index" field of their attributes dictionary;
- The edges features are placed in the edges attribute dictionary under the
"features" key. If the `EDGES` field is `None`, a `None` value is placed;
- The global feature are placed under the key "features" of the graph
property of the returned instance. If the `GLOBALS` field is `None`, a
`None` global property is created.
Args:
data_dict: A graph `dict` of Numpy data.
Returns:
The `networkx.OrderedMultiDiGraph`. The node keys will be the data_dict
integer node indices.
Raises:
ValueError: If the `NODES` field of `data_dict` contains `None`, and
`data_dict` does not have a `N_NODE` field.
"""
graph_nx = nx.OrderedMultiDiGraph()
data_dict = _populate_number_fields(data_dict)
graph_nx.graph[GRAPH_NX_FEATURES_KEY] = data_dict[GLOBALS]
if data_dict[NODES] is not None:
if data_dict[NODES].shape[0] > 0:
nodes_list = _unstack(data_dict[NODES])
for i, x in enumerate(nodes_list):
graph_nx.add_node(i, **{GRAPH_NX_FEATURES_KEY: x})
elif data_dict[N_NODE] is not None:
for i in range(data_dict[N_NODE]):
graph_nx.add_node(i, **{GRAPH_NX_FEATURES_KEY: None})
else:
raise ValueError("Cannot create a graph with unspecified number of nodes")
if data_dict[EDGES] is not None and data_dict[EDGES].shape[0] > 0:
edges_features = [{ # pylint: disable=g-complex-comprehension
"index": i,
GRAPH_NX_FEATURES_KEY: x
} for i, x in enumerate(_unstack(data_dict[EDGES]))]
edges_data = zip(data_dict[SENDERS], data_dict[RECEIVERS], edges_features)
graph_nx.add_edges_from(edges_data)
elif data_dict[RECEIVERS] is not None and data_dict[RECEIVERS].shape[0] > 0:
edges_features = [{ # pylint: disable=g-complex-comprehension
"index": i,
GRAPH_NX_FEATURES_KEY: None
} for i in range(data_dict[RECEIVERS].shape[0])]
edges_data = zip(data_dict[SENDERS], data_dict[RECEIVERS], edges_features)
graph_nx.add_edges_from(edges_data)
return graph_nx
def networkxs_to_graphs_tuple(graph_nxs,
node_shape_hint=None,
edge_shape_hint=None,
data_type_hint=np.float32):
"""Constructs an instance from an iterable of networkx graphs.
The networkx graph should be set up such that, for fixed shapes `node_shape`,
`edge_shape` and `global_shape`:
- `graph_nx.nodes(data=True)[i][-1]["features"]` is, for any node index i, a
tensor of shape `node_shape`, or `None`;
- `graph_nx.edges(data=True)[i][-1]["features"]` is, for any edge index i, a
tensor of shape `edge_shape`, or `None`;
- `graph_nx.edges(data=True)[i][-1]["index"]`, if present, defines the order
in which the edges will be sorted in the resulting `data_dict`;
- `graph_nx.graph["features"] is a tensor of shape `global_shape`, or
`None`.
The output data is a sequence of data dicts with fields:
NODES, EDGES, RECEIVERS, SENDERS, GLOBALS, N_NODE, N_EDGE.
Args:
graph_nxs: A container of `networkx.OrderedMultiDiGraph`s. The node keys
must be sequential integer values following the order in which nodes are
added to the graph starting from zero. That is
`list(graph_nx.nodes)[i] == i`.
node_shape_hint: (iterable of `int` or `None`, default=`None`) If the graph
does not contain nodes, the trailing shape for the created `NODES` field.
If `None` (the default), this field is left `None`. This is not used if
`graph_nx` contains at least one node.
edge_shape_hint: (iterable of `int` or `None`, default=`None`) If the graph
does not contain edges, the trailing shape for the created `EDGES` field.
If `None` (the default), this field is left `None`. This is not used if
`graph_nx` contains at least one edge.
data_type_hint: (numpy dtype, default=`np.float32`) If the `NODES` or
`EDGES` fields are autocompleted, their type.
Returns:
The instance.
Raises:
ValueError: If `graph_nxs` is not an iterable of networkx instances.
"""
data_dicts = []
try:
for graph_nx in graph_nxs:
data_dict = networkx_to_data_dict(graph_nx, node_shape_hint,
edge_shape_hint, data_type_hint)
data_dicts.append(data_dict)
except TypeError as e:
raise ValueError("Could not convert some elements of `graph_nxs`. "
"Did you pass an iterable of networkx instances?") from e
return data_dicts_to_graphs_tuple(data_dicts)
def graphs_tuple_to_networkxs(graphs_tuple):
"""Converts a `graphs.GraphsTuple` to a sequence of networkx graphs.
Args:
graphs_tuple: A `graphs.GraphsTuple` instance containing numpy arrays.
Returns:
The list of `networkx.OrderedMultiDiGraph`s. The node keys will be the data
dict integer node indices.
"""
return [
data_dict_to_networkx(x) for x in graphs_tuple_to_data_dicts(graphs_tuple)
]
def data_dicts_to_graphs_tuple(data_dicts):
"""Constructs a `graphs.GraphsTuple` from an iterable of data dicts.
The graphs represented by the `data_dicts` argument are batched to form a
single instance of `graphs.GraphsTuple` containing numpy arrays.
Args:
data_dicts: An iterable of dictionaries with keys `GRAPH_DATA_FIELDS`, plus,
potentially, a subset of `GRAPH_NUMBER_FIELDS`. The NODES and EDGES fields
should be numpy arrays of rank at least 2, while the RECEIVERS, SENDERS
are numpy arrays of rank 1 and same dimension as the EDGES field first
dimension. The GLOBALS field is a numpy array of rank at least 1.
Returns:
An instance of `graphs.GraphsTuple` containing numpy arrays. The
`RECEIVERS`, `SENDERS`, `N_NODE` and `N_EDGE` fields are cast to `np.int32`
type.
"""
data_dicts = [dict(d) for d in data_dicts]
for key in graphs.GRAPH_DATA_FIELDS:
for data_dict in data_dicts:
data_dict.setdefault(key, None)
_check_valid_sets_of_keys(data_dicts)
data_dicts = _to_compatible_data_dicts(data_dicts)
return graphs.GraphsTuple(**_concatenate_data_dicts(data_dicts))
def graphs_tuple_to_data_dicts(graph):
"""Splits the stored data into a list of individual data dicts.
Each list is a dictionary with fields NODES, EDGES, GLOBALS, RECEIVERS,
SENDERS.
Args:
graph: A `graphs.GraphsTuple` instance containing numpy arrays.
Returns:
A list of the graph data dictionaries. The GLOBALS field is a tensor of
rank at least 1, as the RECEIVERS and SENDERS field (which have integer
values). The NODES and EDGES fields have rank at least 2.
"""
offset = _compute_stacked_offsets(graph.n_node, graph.n_edge)
nodes_splits = np.cumsum(graph.n_node[:-1])
edges_splits = np.cumsum(graph.n_edge[:-1])
graph_of_lists = collections.defaultdict(lambda: [])
if graph.nodes is not None:
graph_of_lists[NODES] = np.split(graph.nodes, nodes_splits)
if graph.edges is not None:
graph_of_lists[EDGES] = np.split(graph.edges, edges_splits)
if graph.receivers is not None:
graph_of_lists[RECEIVERS] = np.split(graph.receivers - offset, edges_splits)
graph_of_lists[SENDERS] = np.split(graph.senders - offset, edges_splits)
if graph.globals is not None:
graph_of_lists[GLOBALS] = _unstack(graph.globals)
n_graphs = graph.n_node.shape[0]
# Make all fields the same length.
for k in GRAPH_DATA_FIELDS:
graph_of_lists[k] += [None] * (n_graphs - len(graph_of_lists[k]))
graph_of_lists[N_NODE] = graph.n_node
graph_of_lists[N_EDGE] = graph.n_edge
result = []
for index in range(n_graphs):
result.append({field: graph_of_lists[field][index] for field in ALL_FIELDS})
return result
def _to_compatible_data_dicts(data_dicts):
"""Converts the content of `data_dicts` to arrays of the right type.
All fields are converted to numpy arrays. The index fields (`SENDERS` and
`RECEIVERS`) and number fields (`N_NODE`, `N_EDGE`) are cast to `np.int32`.
Args:
data_dicts: An iterable of dictionaries with keys `ALL_KEYS` and values
either `None`s, or quantities that can be converted to numpy arrays.
Returns:
A list of dictionaries containing numpy arrays or `None`s.
"""
results = []
for data_dict in data_dicts:
result = {}
for k, v in data_dict.items():
if v is None:
result[k] = None
else:
dtype = np.int32 if k in [SENDERS, RECEIVERS, N_NODE, N_EDGE] else None
result[k] = np.asarray(v, dtype)
results.append(result)
return results
def _populate_number_fields(data_dict):
"""Returns a dict with the number fields N_NODE, N_EDGE filled in.
The N_NODE field is filled if the graph contains a non-None NODES field;
otherwise, it is set to 0.
The N_EDGE field is filled if the graph contains a non-None RECEIVERS field;
otherwise, it is set to 0.
Args:
data_dict: An input `dict`.
Returns:
The data `dict` with number fields.
"""
dct = data_dict.copy()
for number_field, data_field in [[N_NODE, NODES], [N_EDGE, RECEIVERS]]:
if dct.get(number_field) is None:
if dct[data_field] is not None:
dct[number_field] = np.array(
np.shape(dct[data_field])[0], dtype=np.int32)
else:
dct[number_field] = np.array(0, dtype=np.int32)
return dct
def _concatenate_data_dicts(data_dicts):
"""Concatenate a list of data dicts to create the equivalent batched graph.
Args:
data_dicts: An iterable of data dictionaries with keys `GRAPH_DATA_FIELDS`,
plus, potentially, a subset of `GRAPH_NUMBER_FIELDS`. Each dictionary is
representing a single graph.
Returns:
A data dictionary with the keys `GRAPH_DATA_FIELDS + GRAPH_NUMBER_FIELDS`,
representing the concatenated graphs.
"""
# Create a single dict with fields that contain sequences of graph tensors.
concatenated_dicts = collections.defaultdict(lambda: [])
for data_dict in data_dicts:
data_dict = _populate_number_fields(data_dict)
for k, v in data_dict.items():
if v is not None:
concatenated_dicts[k].append(v)
else:
concatenated_dicts[k] = None
concatenated_dicts = dict(concatenated_dicts)
for field, arrays in concatenated_dicts.items():
if arrays is None:
concatenated_dicts[field] = None
elif field in list(GRAPH_NUMBER_FIELDS) + [GLOBALS]:
concatenated_dicts[field] = np.stack(arrays)
else:
concatenated_dicts[field] = np.concatenate(arrays, axis=0)
if concatenated_dicts[RECEIVERS] is not None:
offset = _compute_stacked_offsets(concatenated_dicts[N_NODE],
concatenated_dicts[N_EDGE])
for field in (RECEIVERS, SENDERS):
concatenated_dicts[field] += offset
return concatenated_dicts
def get_graph(input_graphs, index):
"""Indexes into a graph.
Given a `graphs.GraphsTuple` containing arrays and an index (either
an `int` or a `slice`), index into the nodes, edges and globals to extract the
graphs specified by the slice, and returns them into an another instance of a
`graphs.GraphsTuple` containing `Tensor`s.
Args:
input_graphs: A `graphs.GraphsTuple` containing numpy arrays.
index: An `int` or a `slice`, to index into `graph`. `index` should be
compatible with the number of graphs in `graphs`.
Returns:
A `graphs.GraphsTuple` containing numpy arrays, made of the extracted
graph(s).
Raises:
TypeError: if `index` is not an `int` or a `slice`.
"""
if isinstance(index, int):
graph_slice = slice(index, index + 1)
elif isinstance(index, slice):
graph_slice = index
else:
raise TypeError("unsupported type: %s" % type(index))
data_dicts = graphs_tuple_to_data_dicts(input_graphs)[graph_slice]
return graphs.GraphsTuple(**_concatenate_data_dicts(data_dicts))
| graph_nets-master | graph_nets/utils_np.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Graph networks library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from graph_nets import blocks
from graph_nets import graphs
from graph_nets import modules
from graph_nets import utils_np
from graph_nets import utils_tf
| graph_nets-master | graph_nets/__init__.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Common Graph Network architectures.
The modules in this files are Sonnet modules that:
- take a `graphs.GraphsTuple` containing `Tensor`s as input, with possibly
`None` fields (depending on the module);
- return a `graphs.GraphsTuple` with updated values for some fields
(depending on the module).
The provided modules are:
- `GraphNetwork`: a general purpose Graph Network composed of configurable
`EdgeBlock`, `NodeBlock` and `GlobalBlock` from `blocks.py`;
- `GraphIndependent`: a Graph Network producing updated edges (resp. nodes,
globals) based on the input's edges (resp. nodes, globals) only;
- `InteractionNetwork` (from https://arxiv.org/abs/1612.00222): a
network propagating information on the edges and nodes of a graph;
- RelationNetwork (from https://arxiv.org/abs/1706.01427): a network
updating the global property based on the relation between the input's
nodes properties;
- DeepSets (from https://arxiv.org/abs/1703.06114): a network that operates on
sets (graphs without edges);
- CommNet (from https://arxiv.org/abs/1605.07736 and
https://arxiv.org/abs/1706.06122): a network updating nodes based on their
previous features and the features of the adjacent nodes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from graph_nets import _base
from graph_nets import blocks
import tensorflow as tf
_DEFAULT_EDGE_BLOCK_OPT = {
"use_edges": True,
"use_receiver_nodes": True,
"use_sender_nodes": True,
"use_globals": True,
}
_DEFAULT_NODE_BLOCK_OPT = {
"use_received_edges": True,
"use_sent_edges": False,
"use_nodes": True,
"use_globals": True,
}
_DEFAULT_GLOBAL_BLOCK_OPT = {
"use_edges": True,
"use_nodes": True,
"use_globals": True,
}
class InteractionNetwork(_base.AbstractModule):
"""Implementation of an Interaction Network.
An interaction networks computes interactions on the edges based on the
previous edges features, and on the features of the nodes sending into those
edges. It then updates the nodes based on the incomming updated edges.
See https://arxiv.org/abs/1612.00222 for more details.
This model does not update the graph globals, and they are allowed to be
`None`.
"""
def __init__(self,
edge_model_fn,
node_model_fn,
reducer=tf.math.unsorted_segment_sum,
name="interaction_network"):
"""Initializes the InteractionNetwork module.
Args:
edge_model_fn: A callable that will be passed to `EdgeBlock` to perform
per-edge computations. The callable must return a Sonnet module (or
equivalent; see `blocks.EdgeBlock` for details), and the shape of the
output of this module must match the one of the input nodes, but for the
first and last axis.
node_model_fn: A callable that will be passed to `NodeBlock` to perform
per-node computations. The callable must return a Sonnet module (or
equivalent; see `blocks.NodeBlock` for details).
reducer: Reducer to be used by NodeBlock to aggregate edges. Defaults to
tf.math.unsorted_segment_sum.
name: The module name.
"""
super(InteractionNetwork, self).__init__(name=name)
with self._enter_variable_scope():
self._edge_block = blocks.EdgeBlock(
edge_model_fn=edge_model_fn, use_globals=False)
self._node_block = blocks.NodeBlock(
node_model_fn=node_model_fn,
use_sent_edges=False,
use_globals=False,
received_edges_reducer=reducer)
def _build(self,
graph,
edge_model_kwargs=None,
node_model_kwargs=None):
"""Connects the InterationNetwork.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s. `graph.globals` can be
`None`. The features of each node and edge of `graph` must be
concatenable on the last axis (i.e., the shapes of `graph.nodes` and
`graph.edges` must match but for their first and last axis).
edge_model_kwargs: Optional keyword arguments to pass to
the edge block model.
node_model_kwargs: Optional keyword arguments to pass to
the node block model.
Returns:
An output `graphs.GraphsTuple` with updated edges and nodes.
Raises:
ValueError: If any of `graph.nodes`, `graph.edges`, `graph.receivers` or
`graph.senders` is `None`.
"""
return self._node_block(
self._edge_block(graph, edge_model_kwargs), node_model_kwargs)
class RelationNetwork(_base.AbstractModule):
"""Implementation of a Relation Network.
See https://arxiv.org/abs/1706.01427 for more details.
The global and edges features of the input graph are not used, and are
allowed to be `None` (the receivers and senders properties must be present).
The output graph has updated, non-`None`, globals.
"""
def __init__(self,
edge_model_fn,
global_model_fn,
reducer=tf.math.unsorted_segment_sum,
name="relation_network"):
"""Initializes the RelationNetwork module.
Args:
edge_model_fn: A callable that will be passed to EdgeBlock to perform
per-edge computations. The callable must return a Sonnet module (or
equivalent; see EdgeBlock for details).
global_model_fn: A callable that will be passed to GlobalBlock to perform
per-global computations. The callable must return a Sonnet module (or
equivalent; see GlobalBlock for details).
reducer: Reducer to be used by GlobalBlock to aggregate edges. Defaults to
tf.math.unsorted_segment_sum.
name: The module name.
"""
super(RelationNetwork, self).__init__(name=name)
with self._enter_variable_scope():
self._edge_block = blocks.EdgeBlock(
edge_model_fn=edge_model_fn,
use_edges=False,
use_receiver_nodes=True,
use_sender_nodes=True,
use_globals=False)
self._global_block = blocks.GlobalBlock(
global_model_fn=global_model_fn,
use_edges=True,
use_nodes=False,
use_globals=False,
edges_reducer=reducer)
def _build(self,
graph,
edge_model_kwargs=None,
global_model_kwargs=None):
"""Connects the RelationNetwork.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s, except for the edges
and global properties which may be `None`.
edge_model_kwargs: Optional keyword arguments to pass to
the edge block model.
global_model_kwargs: Optional keyword arguments to pass to
the global block model.
Returns:
A `graphs.GraphsTuple` with updated globals.
Raises:
ValueError: If any of `graph.nodes`, `graph.receivers` or `graph.senders`
is `None`.
"""
output_graph = self._global_block(
self._edge_block(graph, edge_model_kwargs), global_model_kwargs)
return graph.replace(globals=output_graph.globals)
def _make_default_edge_block_opt(edge_block_opt):
"""Default options to be used in the EdgeBlock of a generic GraphNetwork."""
edge_block_opt = dict(edge_block_opt.items()) if edge_block_opt else {}
for k, v in _DEFAULT_EDGE_BLOCK_OPT.items():
edge_block_opt[k] = edge_block_opt.get(k, v)
return edge_block_opt
def _make_default_node_block_opt(node_block_opt, default_reducer):
"""Default options to be used in the NodeBlock of a generic GraphNetwork."""
node_block_opt = dict(node_block_opt.items()) if node_block_opt else {}
for k, v in _DEFAULT_NODE_BLOCK_OPT.items():
node_block_opt[k] = node_block_opt.get(k, v)
for key in ["received_edges_reducer", "sent_edges_reducer"]:
node_block_opt[key] = node_block_opt.get(key, default_reducer)
return node_block_opt
def _make_default_global_block_opt(global_block_opt, default_reducer):
"""Default options to be used in the GlobalBlock of a generic GraphNetwork."""
global_block_opt = dict(global_block_opt.items()) if global_block_opt else {}
for k, v in _DEFAULT_GLOBAL_BLOCK_OPT.items():
global_block_opt[k] = global_block_opt.get(k, v)
for key in ["edges_reducer", "nodes_reducer"]:
global_block_opt[key] = global_block_opt.get(key, default_reducer)
return global_block_opt
class GraphNetwork(_base.AbstractModule):
"""Implementation of a Graph Network.
See https://arxiv.org/abs/1806.01261 for more details.
"""
def __init__(self,
edge_model_fn,
node_model_fn,
global_model_fn,
reducer=tf.math.unsorted_segment_sum,
edge_block_opt=None,
node_block_opt=None,
global_block_opt=None,
name="graph_network"):
"""Initializes the GraphNetwork module.
Args:
edge_model_fn: A callable that will be passed to EdgeBlock to perform
per-edge computations. The callable must return a Sonnet module (or
equivalent; see EdgeBlock for details).
node_model_fn: A callable that will be passed to NodeBlock to perform
per-node computations. The callable must return a Sonnet module (or
equivalent; see NodeBlock for details).
global_model_fn: A callable that will be passed to GlobalBlock to perform
per-global computations. The callable must return a Sonnet module (or
equivalent; see GlobalBlock for details).
reducer: Reducer to be used by NodeBlock and GlobalBlock to aggregate
nodes and edges. Defaults to tf.math.unsorted_segment_sum. This will be
overridden by the reducers specified in `node_block_opt` and
`global_block_opt`, if any.
edge_block_opt: Additional options to be passed to the EdgeBlock. Can
contain keys `use_edges`, `use_receiver_nodes`, `use_sender_nodes`,
`use_globals`. By default, these are all True.
node_block_opt: Additional options to be passed to the NodeBlock. Can
contain the keys `use_received_edges`, `use_nodes`, `use_globals` (all
set to True by default), `use_sent_edges` (defaults to False), and
`received_edges_reducer`, `sent_edges_reducer` (default to `reducer`).
global_block_opt: Additional options to be passed to the GlobalBlock. Can
contain the keys `use_edges`, `use_nodes`, `use_globals` (all set to
True by default), and `edges_reducer`, `nodes_reducer` (defaults to
`reducer`).
name: The module name.
"""
super(GraphNetwork, self).__init__(name=name)
edge_block_opt = _make_default_edge_block_opt(edge_block_opt)
node_block_opt = _make_default_node_block_opt(node_block_opt, reducer)
global_block_opt = _make_default_global_block_opt(global_block_opt, reducer)
with self._enter_variable_scope():
self._edge_block = blocks.EdgeBlock(
edge_model_fn=edge_model_fn, **edge_block_opt)
self._node_block = blocks.NodeBlock(
node_model_fn=node_model_fn, **node_block_opt)
self._global_block = blocks.GlobalBlock(
global_model_fn=global_model_fn, **global_block_opt)
def _build(self,
graph,
edge_model_kwargs=None,
node_model_kwargs=None,
global_model_kwargs=None):
"""Connects the GraphNetwork.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s. Depending on the block
options, `graph` may contain `None` fields; but with the default
configuration, no `None` field is allowed. Moreover, when using the
default configuration, the features of each nodes, edges and globals of
`graph` should be concatenable on the last dimension.
edge_model_kwargs: Optional keyword arguments to pass to
the edge block model.
node_model_kwargs: Optional keyword arguments to pass to
the node block model.
global_model_kwargs: Optional keyword arguments to pass to
the global block model.
Returns:
An output `graphs.GraphsTuple` with updated edges, nodes and globals.
"""
node_input = self._edge_block(graph, edge_model_kwargs)
global_input = self._node_block(node_input, node_model_kwargs)
return self._global_block(global_input, global_model_kwargs)
class GraphIndependent(_base.AbstractModule):
"""A graph block that applies models to the graph elements independently.
The inputs and outputs are graphs. The corresponding models are applied to
each element of the graph (edges, nodes and globals) in parallel and
independently of the other elements. It can be used to encode or
decode the elements of a graph.
"""
def __init__(self,
edge_model_fn=None,
node_model_fn=None,
global_model_fn=None,
name="graph_independent"):
"""Initializes the GraphIndependent module.
Args:
edge_model_fn: A callable that returns an edge model function. The
callable must return a Sonnet module (or equivalent). If passed `None`,
will pass through inputs (the default).
node_model_fn: A callable that returns a node model function. The callable
must return a Sonnet module (or equivalent). If passed `None`, will pass
through inputs (the default).
global_model_fn: A callable that returns a global model function. The
callable must return a Sonnet module (or equivalent). If passed `None`,
will pass through inputs (the default).
name: The module name.
"""
super(GraphIndependent, self).__init__(name=name)
with self._enter_variable_scope():
# The use of snt.Module below is to ensure the ops and variables that
# result from the edge/node/global_model_fns are scoped analogous to how
# the Edge/Node/GlobalBlock classes do.
if edge_model_fn is None:
self._edge_model = lambda x: x
else:
self._edge_model = _base.WrappedModelFnModule(
edge_model_fn, name="edge_model")
if node_model_fn is None:
self._node_model = lambda x: x
else:
self._node_model = _base.WrappedModelFnModule(
node_model_fn, name="node_model")
if global_model_fn is None:
self._global_model = lambda x: x
else:
self._global_model = _base.WrappedModelFnModule(
global_model_fn, name="global_model")
def _build(self,
graph,
edge_model_kwargs=None,
node_model_kwargs=None,
global_model_kwargs=None):
"""Connects the GraphIndependent.
Args:
graph: A `graphs.GraphsTuple` containing non-`None` edges, nodes and
globals.
edge_model_kwargs: Optional keyword arguments to pass to
the edge block model.
node_model_kwargs: Optional keyword arguments to pass to
the node block model.
global_model_kwargs: Optional keyword arguments to pass to
the global block model.
Returns:
An output `graphs.GraphsTuple` with updated edges, nodes and globals.
"""
if edge_model_kwargs is None:
edge_model_kwargs = {}
if node_model_kwargs is None:
node_model_kwargs = {}
if global_model_kwargs is None:
global_model_kwargs = {}
return graph.replace(
edges=self._edge_model(graph.edges, **edge_model_kwargs),
nodes=self._node_model(graph.nodes, **node_model_kwargs),
globals=self._global_model(graph.globals, **global_model_kwargs))
class DeepSets(_base.AbstractModule):
"""DeepSets module.
Implementation for the model described in https://arxiv.org/abs/1703.06114
(M. Zaheer, S. Kottur, S. Ravanbakhsh, B. Poczos, R. Salakhutdinov, A. Smola).
See also PointNet (https://arxiv.org/abs/1612.00593, C. Qi, H. Su, K. Mo,
L. J. Guibas) for a related model.
This module operates on sets, which can be thought of as graphs without
edges. The nodes features are first updated based on their value and the
globals features, and new globals features are then computed based on the
updated nodes features.
Note that in the original model, only the globals are updated in the returned
graph, while this implementation also returns updated nodes.
The original model can be reproduced by writing:
```
deep_sets = DeepSets()
output = deep_sets(input)
output = input.replace(globals=output.globals)
```
This module does not use the edges data or the information contained in the
receivers or senders; the output graph has the same value in those fields as
the input graph. Those fields can also have `None` values in the input
`graphs.GraphsTuple`.
"""
def __init__(self,
node_model_fn,
global_model_fn,
reducer=tf.math.unsorted_segment_sum,
name="deep_sets"):
"""Initializes the DeepSets module.
Args:
node_model_fn: A callable to be passed to NodeBlock. The callable must
return a Sonnet module (or equivalent; see NodeBlock for details). The
shape of this module's output must equal the shape of the input graph's
global features, but for the first and last axis.
global_model_fn: A callable to be passed to GlobalBlock. The callable must
return a Sonnet module (or equivalent; see GlobalBlock for details).
reducer: Reduction to be used when aggregating the nodes in the globals.
This should be a callable whose signature matches
tf.math.unsorted_segment_sum.
name: The module name.
"""
super(DeepSets, self).__init__(name=name)
with self._enter_variable_scope():
self._node_block = blocks.NodeBlock(
node_model_fn=node_model_fn,
use_received_edges=False,
use_sent_edges=False,
use_nodes=True,
use_globals=True)
self._global_block = blocks.GlobalBlock(
global_model_fn=global_model_fn,
use_edges=False,
use_nodes=True,
use_globals=False,
nodes_reducer=reducer)
def _build(self,
graph,
node_model_kwargs=None,
global_model_kwargs=None):
"""Connects the DeepSets network.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s, whose edges, senders
or receivers properties may be `None`. The features of every node and
global of `graph` should be concatenable on the last axis (i.e. the
shapes of `graph.nodes` and `graph.globals` must match but for their
first and last axis).
node_model_kwargs: Optional keyword arguments to pass to
the node block model.
global_model_kwargs: Optional keyword arguments to pass to
the global block model.
Returns:
An output `graphs.GraphsTuple` with updated globals.
"""
return self._global_block(
self._node_block(graph, node_model_kwargs), global_model_kwargs)
class CommNet(_base.AbstractModule):
"""CommNet module.
Implementation for the model originally described in
https://arxiv.org/abs/1605.07736 (S. Sukhbaatar, A. Szlam, R. Fergus), in the
version presented in https://arxiv.org/abs/1706.06122 (Y. Hoshen).
This module internally creates edge features based on the features from the
nodes sending to that edge, and independently learns an embedding for each
node. It then uses these edges and nodes features to compute updated node
features.
This module does not use the global nor the edges features of the input, but
uses its receivers and senders information. The output graph has the same
value in edge and global fields as the input graph. The edge and global
features fields may have a `None` value in the input `gn_graphs.GraphsTuple`.
"""
def __init__(self,
edge_model_fn,
node_encoder_model_fn,
node_model_fn,
reducer=tf.math.unsorted_segment_sum,
name="comm_net"):
"""Initializes the CommNet module.
Args:
edge_model_fn: A callable to be passed to EdgeBlock. The callable must
return a Sonnet module (or equivalent; see EdgeBlock for details).
node_encoder_model_fn: A callable to be passed to the NodeBlock
responsible for the first encoding of the nodes. The callable must
return a Sonnet module (or equivalent; see NodeBlock for details). The
shape of this module's output should match the shape of the module built
by `edge_model_fn`, but for the first and last dimension.
node_model_fn: A callable to be passed to NodeBlock. The callable must
return a Sonnet module (or equivalent; see NodeBlock for details).
reducer: Reduction to be used when aggregating the edges in the nodes.
This should be a callable whose signature matches
tf.math.unsorted_segment_sum.
name: The module name.
"""
super(CommNet, self).__init__(name=name)
with self._enter_variable_scope():
# Computes $\Psi_{com}(x_j)$ in Eq. (2) of 1706.06122
self._edge_block = blocks.EdgeBlock(
edge_model_fn=edge_model_fn,
use_edges=False,
use_receiver_nodes=False,
use_sender_nodes=True,
use_globals=False)
# Computes $\Phi(x_i)$ in Eq. (2) of 1706.06122
self._node_encoder_block = blocks.NodeBlock(
node_model_fn=node_encoder_model_fn,
use_received_edges=False,
use_sent_edges=False,
use_nodes=True,
use_globals=False,
received_edges_reducer=reducer,
name="node_encoder_block")
# Computes $\Theta(..)$ in Eq.(2) of 1706.06122
self._node_block = blocks.NodeBlock(
node_model_fn=node_model_fn,
use_received_edges=True,
use_sent_edges=False,
use_nodes=True,
use_globals=False,
received_edges_reducer=reducer)
def _build(self,
graph,
edge_model_kwargs=None,
node_encoder_model_kwargs=None,
node_model_kwargs=None):
"""Connects the CommNet network.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s, with non-`None` nodes,
receivers and senders.
edge_model_kwargs: Optional keyword arguments to pass to
the edge block model.
node_encoder_model_kwargs: Optional keyword arguments to pass to
the node block ecoder model.
node_model_kwargs: Optional keyword arguments to pass to
the node block model.
Returns:
An output `graphs.GraphsTuple` with updated nodes.
Raises:
ValueError: if any of `graph.nodes`, `graph.receivers` or `graph.senders`
is `None`.
"""
node_input = self._node_encoder_block(
self._edge_block(graph, edge_model_kwargs), node_encoder_model_kwargs)
return graph.replace(
nodes=self._node_block(node_input, node_model_kwargs).nodes)
def _unsorted_segment_softmax(data,
segment_ids,
num_segments,
name="unsorted_segment_softmax"):
"""Performs an elementwise softmax operation along segments of a tensor.
The input parameters are analogous to `tf.math.unsorted_segment_sum`. It
produces an output of the same shape as the input data, after performing an
elementwise sofmax operation between all of the rows with common segment id.
Args:
data: A tensor with at least one dimension.
segment_ids: A tensor of indices segmenting `data` across the first
dimension.
num_segments: A scalar tensor indicating the number of segments. It should
be at least `max(segment_ids) + 1`.
name: A name for the operation (optional).
Returns:
A tensor with the same shape as `data` after applying the softmax operation.
"""
with tf.name_scope(name):
segment_maxes = tf.math.unsorted_segment_max(data, segment_ids,
num_segments)
maxes = tf.gather(segment_maxes, segment_ids)
# Possibly refactor to `tf.stop_gradient(maxes)` for better performance.
data -= maxes
exp_data = tf.exp(data)
segment_sum_exp_data = tf.math.unsorted_segment_sum(exp_data, segment_ids,
num_segments)
sum_exp_data = tf.gather(segment_sum_exp_data, segment_ids)
return exp_data / sum_exp_data
def _received_edges_normalizer(graph,
normalizer,
name="received_edges_normalizer"):
"""Performs elementwise normalization for all received edges by a given node.
Args:
graph: A graph containing edge information.
normalizer: A normalizer function following the signature of
`modules._unsorted_segment_softmax`.
name: A name for the operation (optional).
Returns:
A tensor with the resulting normalized edges.
"""
with tf.name_scope(name):
return normalizer(
data=graph.edges,
segment_ids=graph.receivers,
num_segments=tf.reduce_sum(graph.n_node))
class SelfAttention(_base.AbstractModule):
"""Multi-head self-attention module.
The module is based on the following three papers:
* A simple neural network module for relational reasoning (RNs):
https://arxiv.org/abs/1706.01427
* Non-local Neural Networks: https://arxiv.org/abs/1711.07971.
* Attention Is All You Need (AIAYN): https://arxiv.org/abs/1706.03762.
The input to the modules consists of a graph containing values for each node
and connectivity between them, a tensor containing keys for each node
and a tensor containing queries for each node.
The self-attention step consist of updating the node values, with each new
node value computed in a two step process:
- Computing the attention weights between each node and all of its senders
nodes, by calculating sum(sender_key*receiver_query) and using the softmax
operation on all attention weights for each node.
- For each receiver node, compute the new node value as the weighted average
of the values of the sender nodes, according to the attention weights.
- Nodes with no received edges, get an updated value of 0.
Values, keys and queries contain a "head" axis to compute independent
self-attention for each of the heads.
"""
def __init__(self, name="self_attention"):
"""Inits the module.
Args:
name: The module name.
"""
super(SelfAttention, self).__init__(name=name)
self._normalizer = _unsorted_segment_softmax
def _build(self, node_values, node_keys, node_queries, attention_graph):
"""Connects the multi-head self-attention module.
The self-attention is only computed according to the connectivity of the
input graphs, with receiver nodes attending to sender nodes.
Args:
node_values: Tensor containing the values associated to each of the nodes.
The expected shape is [total_num_nodes, num_heads, key_size].
node_keys: Tensor containing the key associated to each of the nodes. The
expected shape is [total_num_nodes, num_heads, key_size].
node_queries: Tensor containing the query associated to each of the nodes.
The expected shape is [total_num_nodes, num_heads, query_size]. The
query size must be equal to the key size.
attention_graph: Graph containing connectivity information between nodes
via the senders and receivers fields. Node A will only attempt to attend
to Node B if `attention_graph` contains an edge sent by Node A and
received by Node B.
Returns:
An output `graphs.GraphsTuple` with updated nodes containing the
aggregated attended value for each of the nodes with shape
[total_num_nodes, num_heads, value_size].
Raises:
ValueError: if the input graph does not have edges.
"""
# Sender nodes put their keys and values in the edges.
# [total_num_edges, num_heads, query_size]
sender_keys = blocks.broadcast_sender_nodes_to_edges(
attention_graph.replace(nodes=node_keys))
# [total_num_edges, num_heads, value_size]
sender_values = blocks.broadcast_sender_nodes_to_edges(
attention_graph.replace(nodes=node_values))
# Receiver nodes put their queries in the edges.
# [total_num_edges, num_heads, key_size]
receiver_queries = blocks.broadcast_receiver_nodes_to_edges(
attention_graph.replace(nodes=node_queries))
# Attention weight for each edge.
# [total_num_edges, num_heads]
attention_weights_logits = tf.reduce_sum(
sender_keys * receiver_queries, axis=-1)
normalized_attention_weights = _received_edges_normalizer(
attention_graph.replace(edges=attention_weights_logits),
normalizer=self._normalizer)
# Attending to sender values according to the weights.
# [total_num_edges, num_heads, embedding_size]
attented_edges = sender_values * normalized_attention_weights[..., None]
# Summing all of the attended values from each node.
# [total_num_nodes, num_heads, embedding_size]
received_edges_aggregator = blocks.ReceivedEdgesToNodesAggregator(
reducer=tf.math.unsorted_segment_sum)
aggregated_attended_values = received_edges_aggregator(
attention_graph.replace(edges=attented_edges))
return attention_graph.replace(nodes=aggregated_attended_values)
| graph_nets-master | graph_nets/modules.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Building blocks for Graph Networks.
This module contains elementary building blocks of graph networks:
- `broadcast_{field_1}_to_{field_2}` propagates the features from `field_1`
onto the relevant elements of `field_2`;
- `{field_1}To{field_2}Aggregator` propagates and then reduces the features
from `field_1` onto the relevant elements of `field_2`;
- the `EdgeBlock`, `NodeBlock` and `GlobalBlock` are elementary graph networks
that only update the edges (resp. the nodes, the globals) of their input
graph (as described in https://arxiv.org/abs/1806.01261).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from graph_nets import _base
from graph_nets import graphs
from graph_nets import utils_tf
import tensorflow as tf
NODES = graphs.NODES
EDGES = graphs.EDGES
GLOBALS = graphs.GLOBALS
RECEIVERS = graphs.RECEIVERS
SENDERS = graphs.SENDERS
GLOBALS = graphs.GLOBALS
N_NODE = graphs.N_NODE
N_EDGE = graphs.N_EDGE
def _validate_graph(graph, mandatory_fields, additional_message=None):
for field in mandatory_fields:
if getattr(graph, field) is None:
message = "`{}` field cannot be None".format(field)
if additional_message:
message += " " + format(additional_message)
message += "."
raise ValueError(message)
def _validate_broadcasted_graph(graph, from_field, to_field):
additional_message = "when broadcasting {} to {}".format(from_field, to_field)
_validate_graph(graph, [from_field, to_field], additional_message)
def _get_static_num_nodes(graph):
"""Returns the static total number of nodes in a batch or None."""
return None if graph.nodes is None else graph.nodes.shape.as_list()[0]
def _get_static_num_edges(graph):
"""Returns the static total number of edges in a batch or None."""
return None if graph.senders is None else graph.senders.shape.as_list()[0]
def broadcast_globals_to_edges(graph, name="broadcast_globals_to_edges",
num_edges_hint=None):
"""Broadcasts the global features to the edges of a graph.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s, with globals features of
shape `[n_graphs] + global_shape`, and `N_EDGE` field of shape
`[n_graphs]`.
name: (string, optional) A name for the operation.
num_edges_hint: Integer indicating the total number of edges, if known.
Returns:
A tensor of shape `[n_edges] + global_shape`, where
`n_edges = sum(graph.n_edge)`. The i-th element of this tensor is given by
`globals[j]`, where j is the index of the graph the i-th edge belongs to
(i.e. is such that
`sum_{k < j} graphs.n_edge[k] <= i < sum_{k <= j} graphs.n_edge[k]`).
Raises:
ValueError: If either `graph.globals` or `graph.n_edge` is `None`.
"""
_validate_broadcasted_graph(graph, GLOBALS, N_EDGE)
with tf.name_scope(name):
return utils_tf.repeat(graph.globals, graph.n_edge, axis=0,
sum_repeats_hint=num_edges_hint)
def broadcast_globals_to_nodes(graph, name="broadcast_globals_to_nodes",
num_nodes_hint=None):
"""Broadcasts the global features to the nodes of a graph.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s, with globals features of
shape `[n_graphs] + global_shape`, and `N_NODE` field of shape
`[n_graphs]`.
name: (string, optional) A name for the operation.
num_nodes_hint: Integer indicating the total number of nodes, if known.
Returns:
A tensor of shape `[n_nodes] + global_shape`, where
`n_nodes = sum(graph.n_node)`. The i-th element of this tensor is given by
`globals[j]`, where j is the index of the graph the i-th node belongs to
(i.e. is such that
`sum_{k < j} graphs.n_node[k] <= i < sum_{k <= j} graphs.n_node[k]`).
Raises:
ValueError: If either `graph.globals` or `graph.n_node` is `None`.
"""
_validate_broadcasted_graph(graph, GLOBALS, N_NODE)
with tf.name_scope(name):
return utils_tf.repeat(graph.globals, graph.n_node, axis=0,
sum_repeats_hint=num_nodes_hint)
def broadcast_sender_nodes_to_edges(
graph, name="broadcast_sender_nodes_to_edges"):
"""Broadcasts the node features to the edges they are sending into.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s, with nodes features of
shape `[n_nodes] + node_shape`, and `senders` field of shape
`[n_edges]`.
name: (string, optional) A name for the operation.
Returns:
A tensor of shape `[n_edges] + node_shape`, where
`n_edges = sum(graph.n_edge)`. The i-th element is given by
`graph.nodes[graph.senders[i]]`.
Raises:
ValueError: If either `graph.nodes` or `graph.senders` is `None`.
"""
_validate_broadcasted_graph(graph, NODES, SENDERS)
with tf.name_scope(name):
return tf.gather(graph.nodes, graph.senders)
def broadcast_receiver_nodes_to_edges(
graph, name="broadcast_receiver_nodes_to_edges"):
"""Broadcasts the node features to the edges they are receiving from.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s, with nodes features of
shape `[n_nodes] + node_shape`, and receivers of shape `[n_edges]`.
name: (string, optional) A name for the operation.
Returns:
A tensor of shape `[n_edges] + node_shape`, where
`n_edges = sum(graph.n_edge)`. The i-th element is given by
`graph.nodes[graph.receivers[i]]`.
Raises:
ValueError: If either `graph.nodes` or `graph.receivers` is `None`.
"""
_validate_broadcasted_graph(graph, NODES, RECEIVERS)
with tf.name_scope(name):
return tf.gather(graph.nodes, graph.receivers)
class EdgesToGlobalsAggregator(_base.AbstractModule):
"""Aggregates all edges into globals."""
def __init__(self, reducer, name="edges_to_globals_aggregator"):
"""Initializes the EdgesToGlobalsAggregator module.
The reducer is used for combining per-edge features (one set of edge
feature vectors per graph) to give per-graph features (one feature
vector per graph). The reducer should take a `Tensor` of edge features, a
`Tensor` of segment indices, and a number of graphs. It should be invariant
under permutation of edge features within each graph.
Examples of compatible reducers are:
* tf.math.unsorted_segment_sum
* tf.math.unsorted_segment_mean
* tf.math.unsorted_segment_prod
* unsorted_segment_min_or_zero
* unsorted_segment_max_or_zero
Args:
reducer: A function for reducing sets of per-edge features to individual
per-graph features.
name: The module name.
"""
super(EdgesToGlobalsAggregator, self).__init__(name=name)
self._reducer = reducer
def _build(self, graph):
_validate_graph(graph, (EDGES,),
additional_message="when aggregating from edges.")
num_graphs = utils_tf.get_num_graphs(graph)
graph_index = tf.range(num_graphs)
indices = utils_tf.repeat(graph_index, graph.n_edge, axis=0,
sum_repeats_hint=_get_static_num_edges(graph))
return self._reducer(graph.edges, indices, num_graphs)
class NodesToGlobalsAggregator(_base.AbstractModule):
"""Aggregates all nodes into globals."""
def __init__(self, reducer, name="nodes_to_globals_aggregator"):
"""Initializes the NodesToGlobalsAggregator module.
The reducer is used for combining per-node features (one set of node
feature vectors per graph) to give per-graph features (one feature
vector per graph). The reducer should take a `Tensor` of node features, a
`Tensor` of segment indices, and a number of graphs. It should be invariant
under permutation of node features within each graph.
Examples of compatible reducers are:
* tf.math.unsorted_segment_sum
* tf.math.unsorted_segment_mean
* tf.math.unsorted_segment_prod
* unsorted_segment_min_or_zero
* unsorted_segment_max_or_zero
Args:
reducer: A function for reducing sets of per-node features to individual
per-graph features.
name: The module name.
"""
super(NodesToGlobalsAggregator, self).__init__(name=name)
self._reducer = reducer
def _build(self, graph):
_validate_graph(graph, (NODES,),
additional_message="when aggregating from nodes.")
num_graphs = utils_tf.get_num_graphs(graph)
graph_index = tf.range(num_graphs)
indices = utils_tf.repeat(graph_index, graph.n_node, axis=0,
sum_repeats_hint=_get_static_num_nodes(graph))
return self._reducer(graph.nodes, indices, num_graphs)
class _EdgesToNodesAggregator(_base.AbstractModule):
"""Agregates sent or received edges into the corresponding nodes."""
def __init__(self, reducer, use_sent_edges=False,
name="edges_to_nodes_aggregator"):
super(_EdgesToNodesAggregator, self).__init__(name=name)
self._reducer = reducer
self._use_sent_edges = use_sent_edges
def _build(self, graph):
_validate_graph(graph, (EDGES, SENDERS, RECEIVERS,),
additional_message="when aggregating from edges.")
# If the number of nodes are known at graph construction time (based on the
# shape) then use that value to make the model compatible with XLA/TPU.
if graph.nodes is not None and graph.nodes.shape.as_list()[0] is not None:
num_nodes = graph.nodes.shape.as_list()[0]
else:
num_nodes = tf.reduce_sum(graph.n_node)
indices = graph.senders if self._use_sent_edges else graph.receivers
return self._reducer(graph.edges, indices, num_nodes)
class SentEdgesToNodesAggregator(_EdgesToNodesAggregator):
"""Agregates sent edges into the corresponding sender nodes."""
def __init__(self, reducer, name="sent_edges_to_nodes_aggregator"):
"""Constructor.
The reducer is used for combining per-edge features (one set of edge
feature vectors per node) to give per-node features (one feature
vector per node). The reducer should take a `Tensor` of edge features, a
`Tensor` of segment indices, and a number of nodes. It should be invariant
under permutation of edge features within each segment.
Examples of compatible reducers are:
* tf.math.unsorted_segment_sum
* tf.math.unsorted_segment_mean
* tf.math.unsorted_segment_prod
* unsorted_segment_min_or_zero
* unsorted_segment_max_or_zero
Args:
reducer: A function for reducing sets of per-edge features to individual
per-node features.
name: The module name.
"""
super(SentEdgesToNodesAggregator, self).__init__(
use_sent_edges=True,
reducer=reducer,
name=name)
class ReceivedEdgesToNodesAggregator(_EdgesToNodesAggregator):
"""Agregates received edges into the corresponding receiver nodes."""
def __init__(self, reducer, name="received_edges_to_nodes_aggregator"):
"""Constructor.
The reducer is used for combining per-edge features (one set of edge
feature vectors per node) to give per-node features (one feature
vector per node). The reducer should take a `Tensor` of edge features, a
`Tensor` of segment indices, and a number of nodes. It should be invariant
under permutation of edge features within each segment.
Examples of compatible reducers are:
* tf.math.unsorted_segment_sum
* tf.math.unsorted_segment_mean
* tf.math.unsorted_segment_prod
* unsorted_segment_min_or_zero
* unsorted_segment_max_or_zero
Args:
reducer: A function for reducing sets of per-edge features to individual
per-node features.
name: The module name.
"""
super(ReceivedEdgesToNodesAggregator, self).__init__(
use_sent_edges=False, reducer=reducer, name=name)
def _unsorted_segment_reduction_or_zero(reducer, values, indices, num_groups):
"""Common code for unsorted_segment_{min,max}_or_zero (below)."""
reduced = reducer(values, indices, num_groups)
present_indices = tf.math.unsorted_segment_max(
tf.ones_like(indices, dtype=reduced.dtype), indices, num_groups)
present_indices = tf.clip_by_value(present_indices, 0, 1)
present_indices = tf.reshape(
present_indices, [num_groups] + [1] * (reduced.shape.ndims - 1))
reduced *= present_indices
return reduced
def unsorted_segment_min_or_zero(values, indices, num_groups,
name="unsorted_segment_min_or_zero"):
"""Aggregates information using elementwise min.
Segments with no elements are given a "min" of zero instead of the most
positive finite value possible (which is what `tf.math.unsorted_segment_min`
would do).
Args:
values: A `Tensor` of per-element features.
indices: A 1-D `Tensor` whose length is equal to `values`' first dimension.
num_groups: A `Tensor`.
name: (string, optional) A name for the operation.
Returns:
A `Tensor` of the same type as `values`.
"""
with tf.name_scope(name):
return _unsorted_segment_reduction_or_zero(
tf.math.unsorted_segment_min, values, indices, num_groups)
def unsorted_segment_max_or_zero(values, indices, num_groups,
name="unsorted_segment_max_or_zero"):
"""Aggregates information using elementwise max.
Segments with no elements are given a "max" of zero instead of the most
negative finite value possible (which is what `tf.math.unsorted_segment_max`
would do).
Args:
values: A `Tensor` of per-element features.
indices: A 1-D `Tensor` whose length is equal to `values`' first dimension.
num_groups: A `Tensor`.
name: (string, optional) A name for the operation.
Returns:
A `Tensor` of the same type as `values`.
"""
with tf.name_scope(name):
return _unsorted_segment_reduction_or_zero(
tf.math.unsorted_segment_max, values, indices, num_groups)
class EdgeBlock(_base.AbstractModule):
"""Edge block.
A block that updates the features of each edge in a batch of graphs based on
(a subset of) the previous edge features, the features of the adjacent nodes,
and the global features of the corresponding graph.
See https://arxiv.org/abs/1806.01261 for more details.
"""
def __init__(self,
edge_model_fn,
use_edges=True,
use_receiver_nodes=True,
use_sender_nodes=True,
use_globals=True,
name="edge_block"):
"""Initializes the EdgeBlock module.
Args:
edge_model_fn: A callable that will be called in the variable scope of
this EdgeBlock and should return a Sonnet module (or equivalent
callable) to be used as the edge model. The returned module should take
a `Tensor` (of concatenated input features for each edge) and return a
`Tensor` (of output features for each edge). Typically, this module
would input and output `Tensor`s of rank 2, but it may also be input or
output larger ranks. See the `_build` method documentation for more
details on the acceptable inputs to this module in that case.
use_edges: (bool, default=True). Whether to condition on edge attributes.
use_receiver_nodes: (bool, default=True). Whether to condition on receiver
node attributes.
use_sender_nodes: (bool, default=True). Whether to condition on sender
node attributes.
use_globals: (bool, default=True). Whether to condition on global
attributes.
name: The module name.
Raises:
ValueError: When fields that are required are missing.
"""
super(EdgeBlock, self).__init__(name=name)
if not (use_edges or use_sender_nodes or use_receiver_nodes or use_globals):
raise ValueError("At least one of use_edges, use_sender_nodes, "
"use_receiver_nodes or use_globals must be True.")
self._use_edges = use_edges
self._use_receiver_nodes = use_receiver_nodes
self._use_sender_nodes = use_sender_nodes
self._use_globals = use_globals
with self._enter_variable_scope():
self._edge_model = edge_model_fn()
def _build(self, graph, edge_model_kwargs=None):
"""Connects the edge block.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s, whose individual edges
features (if `use_edges` is `True`), individual nodes features (if
`use_receiver_nodes` or `use_sender_nodes` is `True`) and per graph
globals (if `use_globals` is `True`) should be concatenable on the last
axis.
edge_model_kwargs: Optional keyword arguments to pass to the `edge_model`.
Returns:
An output `graphs.GraphsTuple` with updated edges.
Raises:
ValueError: If `graph` does not have non-`None` receivers and senders, or
if `graph` has `None` fields incompatible with the selected `use_edges`,
`use_receiver_nodes`, `use_sender_nodes`, or `use_globals` options.
"""
if edge_model_kwargs is None:
edge_model_kwargs = {}
_validate_graph(
graph, (SENDERS, RECEIVERS, N_EDGE), " when using an EdgeBlock")
edges_to_collect = []
if self._use_edges:
_validate_graph(graph, (EDGES,), "when use_edges == True")
edges_to_collect.append(graph.edges)
if self._use_receiver_nodes:
edges_to_collect.append(broadcast_receiver_nodes_to_edges(graph))
if self._use_sender_nodes:
edges_to_collect.append(broadcast_sender_nodes_to_edges(graph))
if self._use_globals:
num_edges_hint = _get_static_num_edges(graph)
edges_to_collect.append(
broadcast_globals_to_edges(graph, num_edges_hint=num_edges_hint))
collected_edges = tf.concat(edges_to_collect, axis=-1)
updated_edges = self._edge_model(collected_edges, **edge_model_kwargs)
return graph.replace(edges=updated_edges)
class NodeBlock(_base.AbstractModule):
"""Node block.
A block that updates the features of each node in batch of graphs based on
(a subset of) the previous node features, the aggregated features of the
adjacent edges, and the global features of the corresponding graph.
See https://arxiv.org/abs/1806.01261 for more details.
"""
def __init__(self,
node_model_fn,
use_received_edges=True,
use_sent_edges=False,
use_nodes=True,
use_globals=True,
received_edges_reducer=tf.math.unsorted_segment_sum,
sent_edges_reducer=tf.math.unsorted_segment_sum,
name="node_block"):
"""Initializes the NodeBlock module.
Args:
node_model_fn: A callable that will be called in the variable scope of
this NodeBlock and should return a Sonnet module (or equivalent
callable) to be used as the node model. The returned module should take
a `Tensor` (of concatenated input features for each node) and return a
`Tensor` (of output features for each node). Typically, this module
would input and output `Tensor`s of rank 2, but it may also be input or
output larger ranks. See the `_build` method documentation for more
details on the acceptable inputs to this module in that case.
use_received_edges: (bool, default=True) Whether to condition on
aggregated edges received by each node.
use_sent_edges: (bool, default=False) Whether to condition on aggregated
edges sent by each node.
use_nodes: (bool, default=True) Whether to condition on node attributes.
use_globals: (bool, default=True) Whether to condition on global
attributes.
received_edges_reducer: Reduction to be used when aggregating received
edges. This should be a callable whose signature matches
`tf.math.unsorted_segment_sum`.
sent_edges_reducer: Reduction to be used when aggregating sent edges.
This should be a callable whose signature matches
`tf.math.unsorted_segment_sum`.
name: The module name.
Raises:
ValueError: When fields that are required are missing.
"""
super(NodeBlock, self).__init__(name=name)
if not (use_nodes or use_sent_edges or use_received_edges or use_globals):
raise ValueError("At least one of use_received_edges, use_sent_edges, "
"use_nodes or use_globals must be True.")
self._use_received_edges = use_received_edges
self._use_sent_edges = use_sent_edges
self._use_nodes = use_nodes
self._use_globals = use_globals
with self._enter_variable_scope():
self._node_model = node_model_fn()
if self._use_received_edges:
if received_edges_reducer is None:
raise ValueError(
"If `use_received_edges==True`, `received_edges_reducer` "
"should not be None.")
self._received_edges_aggregator = ReceivedEdgesToNodesAggregator(
received_edges_reducer)
if self._use_sent_edges:
if sent_edges_reducer is None:
raise ValueError(
"If `use_sent_edges==True`, `sent_edges_reducer` "
"should not be None.")
self._sent_edges_aggregator = SentEdgesToNodesAggregator(
sent_edges_reducer)
def _build(self, graph, node_model_kwargs=None):
"""Connects the node block.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s, whose individual edges
features (if `use_received_edges` or `use_sent_edges` is `True`),
individual nodes features (if `use_nodes` is True) and per graph globals
(if `use_globals` is `True`) should be concatenable on the last axis.
node_model_kwargs: Optional keyword arguments to pass to the `node_model`.
Returns:
An output `graphs.GraphsTuple` with updated nodes.
"""
if node_model_kwargs is None:
node_model_kwargs = {}
nodes_to_collect = []
if self._use_received_edges:
nodes_to_collect.append(self._received_edges_aggregator(graph))
if self._use_sent_edges:
nodes_to_collect.append(self._sent_edges_aggregator(graph))
if self._use_nodes:
_validate_graph(graph, (NODES,), "when use_nodes == True")
nodes_to_collect.append(graph.nodes)
if self._use_globals:
# The hint will be an integer if the graph has node features and the total
# number of nodes is known at tensorflow graph definition time, or None
# otherwise.
num_nodes_hint = _get_static_num_nodes(graph)
nodes_to_collect.append(
broadcast_globals_to_nodes(graph, num_nodes_hint=num_nodes_hint))
collected_nodes = tf.concat(nodes_to_collect, axis=-1)
updated_nodes = self._node_model(collected_nodes, **node_model_kwargs)
return graph.replace(nodes=updated_nodes)
class GlobalBlock(_base.AbstractModule):
"""Global block.
A block that updates the global features of each graph in a batch based on
(a subset of) the previous global features, the aggregated features of the
edges of the graph, and the aggregated features of the nodes of the graph.
See https://arxiv.org/abs/1806.01261 for more details.
"""
def __init__(self,
global_model_fn,
use_edges=True,
use_nodes=True,
use_globals=True,
nodes_reducer=tf.math.unsorted_segment_sum,
edges_reducer=tf.math.unsorted_segment_sum,
name="global_block"):
"""Initializes the GlobalBlock module.
Args:
global_model_fn: A callable that will be called in the variable scope of
this GlobalBlock and should return a Sonnet module (or equivalent
callable) to be used as the global model. The returned module should
take a `Tensor` (of concatenated input features) and return a `Tensor`
(the global output features). Typically, this module would input and
output `Tensor`s of rank 2, but it may also input or output larger
ranks. See the `_build` method documentation for more details on the
acceptable inputs to this module in that case.
use_edges: (bool, default=True) Whether to condition on aggregated edges.
use_nodes: (bool, default=True) Whether to condition on node attributes.
use_globals: (bool, default=True) Whether to condition on global
attributes.
nodes_reducer: Reduction to be used when aggregating nodes. This should
be a callable whose signature matches tf.math.unsorted_segment_sum.
edges_reducer: Reduction to be used when aggregating edges. This should
be a callable whose signature matches tf.math.unsorted_segment_sum.
name: The module name.
Raises:
ValueError: When fields that are required are missing.
"""
super(GlobalBlock, self).__init__(name=name)
if not (use_nodes or use_edges or use_globals):
raise ValueError("At least one of use_edges, "
"use_nodes or use_globals must be True.")
self._use_edges = use_edges
self._use_nodes = use_nodes
self._use_globals = use_globals
with self._enter_variable_scope():
self._global_model = global_model_fn()
if self._use_edges:
if edges_reducer is None:
raise ValueError(
"If `use_edges==True`, `edges_reducer` should not be None.")
self._edges_aggregator = EdgesToGlobalsAggregator(
edges_reducer)
if self._use_nodes:
if nodes_reducer is None:
raise ValueError(
"If `use_nodes==True`, `nodes_reducer` should not be None.")
self._nodes_aggregator = NodesToGlobalsAggregator(
nodes_reducer)
def _build(self, graph, global_model_kwargs=None):
"""Connects the global block.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s, whose individual edges
(if `use_edges` is `True`), individual nodes (if `use_nodes` is True)
and per graph globals (if `use_globals` is `True`) should be
concatenable on the last axis.
global_model_kwargs: Optional keyword argumentsto pass to
the `global_model`.
Returns:
An output `graphs.GraphsTuple` with updated globals.
"""
if global_model_kwargs is None:
global_model_kwargs = {}
globals_to_collect = []
if self._use_edges:
_validate_graph(graph, (EDGES,), "when use_edges == True")
globals_to_collect.append(self._edges_aggregator(graph))
if self._use_nodes:
_validate_graph(graph, (NODES,), "when use_nodes == True")
globals_to_collect.append(self._nodes_aggregator(graph))
if self._use_globals:
_validate_graph(graph, (GLOBALS,), "when use_globals == True")
globals_to_collect.append(graph.globals)
collected_globals = tf.concat(globals_to_collect, axis=-1)
updated_globals = self._global_model(
collected_globals, **global_model_kwargs)
return graph.replace(globals=updated_globals)
| graph_nets-master | graph_nets/blocks.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A class that defines graph-structured data.
The main purpose of the `GraphsTuple` is to represent multiple graphs with
different shapes and sizes in a way that supports batched processing.
This module first defines the string constants which are used to represent
graph(s) as tuples or dictionaries: `N_NODE, N_EDGE, NODES, EDGES, RECEIVERS,
SENDERS, GLOBALS`.
This representation could typically take the following form, for a batch of
`n_graphs` graphs stored in a `GraphsTuple` called graph:
- N_NODE: The number of nodes per graph. It is a vector of integers with shape
`[n_graphs]`, such that `graph.N_NODE[i]` is the number of nodes in the i-th
graph.
- N_EDGE: The number of edges per graph. It is a vector of integers with shape
`[n_graphs]`, such that `graph.N_EDGE[i]` is the number of edges in the i-th
graph.
- NODES: The nodes features. It is either `None` (the graph has no node
features), or a vector of shape `[n_nodes] + node_shape`, where
`n_nodes = sum(graph.N_NODE)` is the total number of nodes in the batch of
graphs, and `node_shape` represents the shape of the features of each node.
The relative index of a node from the batched version can be recovered from
the `graph.N_NODE` property. For instance, the second node of the third
graph will have its features in the
`1 + graph.N_NODE[0] + graph.N_NODE[1]`-th slot of graph.NODES.
Observe that having a `None` value for this field does not mean that the
graphs have no nodes, only that they do not have node features.
- EDGES: The edges features. It is either `None` (the graph has no edge
features), or a vector of shape `[n_edges] + edge_shape`, where
`n_edges = sum(graph.N_EDGE)` is the total number of edges in the batch of
graphs, and `edge_shape` represents the shape of the features of each edge.
The relative index of an edge from the batched version can be recovered from
the `graph.N_EDGE` property. For instance, the third edge of the third
graph will have its features in the `2 + graph.N_EDGE[0] + graph.N_EDGE[1]`-
th slot of graph.EDGES.
Observe that having a `None` value for this field does not necessarily mean
that the graph has no edges, only that they do not have edge features.
- RECEIVERS: The indices of the receiver nodes, for each edge. It is either
`None` (if the graph has no edges), or a vector of integers of shape
`[n_edges]`, such that `graph.RECEIVERS[i]` is the index of the node
receiving from the i-th edge.
Observe that the index is absolute (in other words, cumulative), i.e.
`graphs.RECEIVERS` take value in `[0, n_nodes]`. For instance, an edge
connecting the vertices with relative indices 2 and 3 in the second graph of
the batch would have a `RECEIVERS` value of `3 + graph.N_NODE[0]`.
If `graphs.RECEIVERS` is `None`, then `graphs.EDGES` and `graphs.SENDERS`
should also be `None`.
- SENDERS: The indices of the sender nodes, for each edge. It is either
`None` (if the graph has no edges), or a vector of integers of shape
`[n_edges]`, such that `graph.SENDERS[i]` is the index of the node
sending from the i-th edge.
Observe that the index is absolute, i.e. `graphs.RECEIVERS` take value in
`[0, n_nodes]`. For instance, an edge connecting the vertices with relative
indices 1 and 3 in the third graph of the batch would have a `SENDERS` value
of `1 + graph.N_NODE[0] + graph.N_NODE[1]`.
If `graphs.SENDERS` is `None`, then `graphs.EDGES` and `graphs.RECEIVERS`
should also be `None`.
- GLOBALS: The global features of the graph. It is either `None` (the graph
has no global features), or a vector of shape `[n_graphs] + global_shape`
representing graph level features.
The `utils_np` and `utils_tf` modules provide convenience methods to work with
graph that contain numpy and tensorflow data, respectively: conversion,
batching, unbatching, indexing, among others.
The `GraphsTuple` class, however, is not restricted to storing vectors, and can
be used to store attributes of graphs as well (for instance, types or shapes).
The only assertions it makes are that the `None` fields are compatible with the
definition of a graph given above, namely:
- the N_NODE and N_EDGE fields cannot be `None`;
- if RECEIVERS is None, then SENDERS must be `None` (and vice-versa);
- if RECEIVERS and SENDERS are `None`, then `EDGES` must be `None`.
Those assumptions are checked both upon initialization and when replacing a
field by calling the `replace` or `map` method.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
NODES = "nodes"
EDGES = "edges"
RECEIVERS = "receivers"
SENDERS = "senders"
GLOBALS = "globals"
N_NODE = "n_node"
N_EDGE = "n_edge"
GRAPH_FEATURE_FIELDS = (NODES, EDGES, GLOBALS)
GRAPH_INDEX_FIELDS = (RECEIVERS, SENDERS)
GRAPH_DATA_FIELDS = (NODES, EDGES, RECEIVERS, SENDERS, GLOBALS)
GRAPH_NUMBER_FIELDS = (N_NODE, N_EDGE)
ALL_FIELDS = (NODES, EDGES, RECEIVERS, SENDERS, GLOBALS, N_NODE, N_EDGE)
class GraphsTuple(
collections.namedtuple("GraphsTuple",
GRAPH_DATA_FIELDS + GRAPH_NUMBER_FIELDS)):
"""Default namedtuple describing `Graphs`s.
A children of `collections.namedtuple`s, which allows it to be directly input
and output from `tensorflow.Session.run()` calls.
An instance of this class can be constructed as
```
GraphsTuple(nodes=nodes,
edges=edges,
globals=globals,
receivers=receivers,
senders=senders,
n_node=n_node,
n_edge=n_edge)
```
where `nodes`, `edges`, `globals`, `receivers`, `senders`, `n_node` and
`n_edge` are arbitrary, but are typically numpy arrays, tensors, or `None`;
see module's documentation for a more detailed description of which fields
can be left `None`.
"""
def _validate_none_fields(self):
"""Asserts that the set of `None` fields in the instance is valid."""
if self.n_node is None:
raise ValueError("Field `n_node` cannot be None")
if self.n_edge is None:
raise ValueError("Field `n_edge` cannot be None")
if self.receivers is None and self.senders is not None:
raise ValueError(
"Field `senders` must be None as field `receivers` is None")
if self.senders is None and self.receivers is not None:
raise ValueError(
"Field `receivers` must be None as field `senders` is None")
if self.receivers is None and self.edges is not None:
raise ValueError(
"Field `edges` must be None as field `receivers` and `senders` are "
"None")
def __init__(self, *args, **kwargs):
del args, kwargs
# The fields of a `namedtuple` are filled in the `__new__` method.
# `__init__` does not accept parameters.
super(GraphsTuple, self).__init__()
self._validate_none_fields()
def replace(self, **kwargs):
output = self._replace(**kwargs)
output._validate_none_fields() # pylint: disable=protected-access
return output
def map(self, field_fn, fields=GRAPH_FEATURE_FIELDS):
"""Applies `field_fn` to the fields `fields` of the instance.
`field_fn` is applied exactly once per field in `fields`. The result must
satisfy the `GraphsTuple` requirement w.r.t. `None` fields, i.e. the
`SENDERS` cannot be `None` if the `EDGES` or `RECEIVERS` are not `None`,
etc.
Args:
field_fn: A callable that take a single argument.
fields: (iterable of `str`). An iterable of the fields to apply
`field_fn` to.
Returns:
A copy of the instance, with the fields in `fields` replaced by the result
of applying `field_fn` to them.
"""
return self.replace(**{k: field_fn(getattr(self, k)) for k in fields})
| graph_nets-master | graph_nets/graphs.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tensorflow ops and helpers useful to manipulate graphs.
This module contains utility functions to operate with `Tensor`s representations
of graphs, in particular:
- `placeholders_from_data_dicts` and `placeholders_from_networkx`
create placeholder structures to represent graphs;
- `get_feed_dict` allow to create a `feed_dict` from a `graphs.GraphsTuple`
containing numpy arrays and potentially, `None` values;
- `data_dicts_to_graphs_tuple` converts between data dictionaries and
`graphs.GraphsTuple`;
- `fully_connect_graph_static` (resp. `fully_connect_graph_dynamic`) adds
edges to a `graphs.GraphsTuple` in a fully-connected manner, in the case
where the number of nodes per graph is known at graph construction time and
is the same for all graphs (resp. only known at runtime and may depend on
the graph);
- `set_zero_node_features`, `set_zero_edge_features` and
`set_zero_global_features` complete a `graphs.GraphsTuple` with a `Tensor`
of zeros for the nodes, edges and globals;
- `concat` batches `graphs.GraphsTuple` together (when using `axis=0`), or
concatenates them along their data dimension;
- `repeat` is a utility convenient to broadcast globals to edges or nodes of
a graph;
- `get_graph` indexes or slices a `graphs.GraphsTuple` to extract a subgraph
or a subbatch of graphs;
- `stop_gradients` stops the gradients flowing through a graph;
- `identity` applies a `tf.identity` to every field of a graph;
- `make_runnable_in_session` allows to run a graph containing `None` fields
through a Tensorflow session.
The functions in these modules are able to deal with graphs containing `None`
fields (e.g. featureless nodes, featureless edges, or no edges).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from absl import logging
from graph_nets import graphs
from graph_nets import utils_np
import six
from six.moves import range
import tensorflow as tf
import tree
NODES = graphs.NODES
EDGES = graphs.EDGES
RECEIVERS = graphs.RECEIVERS
SENDERS = graphs.SENDERS
GLOBALS = graphs.GLOBALS
N_NODE = graphs.N_NODE
N_EDGE = graphs.N_EDGE
GRAPH_DATA_FIELDS = graphs.GRAPH_DATA_FIELDS
GRAPH_NUMBER_FIELDS = graphs.GRAPH_NUMBER_FIELDS
ALL_FIELDS = graphs.ALL_FIELDS
def _get_shape(tensor):
"""Returns the tensor's shape.
Each shape element is either:
- an `int`, when static shape values are available, or
- a `tf.Tensor`, when the shape is dynamic.
Args:
tensor: A `tf.Tensor` to get the shape of.
Returns:
The `list` which contains the tensor's shape.
"""
shape_list = tensor.shape.as_list()
if all(s is not None for s in shape_list):
return shape_list
shape_tensor = tf.shape(tensor)
return [shape_tensor[i] if s is None else s for i, s in enumerate(shape_list)]
def _build_placeholders_from_specs(dtypes,
shapes,
force_dynamic_num_graphs=True):
"""Creates a `graphs.GraphsTuple` of placeholders with `dtypes` and `shapes`.
The dtypes and shapes arguments are instances of `graphs.GraphsTuple` that
contain dtypes and shapes, or `None` values for the fields for which no
placeholder should be created. The leading dimension the nodes and edges are
dynamic because the numbers of nodes and edges can vary.
If `force_dynamic_num_graphs` is True, then the number of graphs is assumed to
be dynamic and all fields leading dimensions are set to `None`.
If `force_dynamic_num_graphs` is False, then `N_NODE`, `N_EDGE` and `GLOBALS`
leading dimensions are statically defined.
Args:
dtypes: A `graphs.GraphsTuple` that contains `tf.dtype`s or `None`s.
shapes: A `graphs.GraphsTuple` that contains `list`s of integers,
`tf.TensorShape`s, or `None`s.
force_dynamic_num_graphs: A `bool` that forces the batch dimension to be
dynamic. Defaults to `True`.
Returns:
A `graphs.GraphsTuple` containing placeholders.
Raises:
ValueError: The `None` fields in `dtypes` and `shapes` do not match.
"""
dct = {}
for field in ALL_FIELDS:
dtype = getattr(dtypes, field)
shape = getattr(shapes, field)
if dtype is None or shape is None:
if not (shape is None and dtype is None):
raise ValueError(
"only one of dtype and shape are None for field {}".format(field))
dct[field] = None
elif not shape:
raise ValueError("Shapes must have at least rank 1")
else:
shape = list(shape)
if field not in [N_NODE, N_EDGE, GLOBALS] or force_dynamic_num_graphs:
shape[0] = None
dct[field] = tf.placeholder(dtype, shape=shape, name=field)
return graphs.GraphsTuple(**dct)
def _placeholders_from_graphs_tuple(graph, force_dynamic_num_graphs=True):
"""Creates a `graphs.GraphsTuple` of placeholders that matches a numpy graph.
Args:
graph: A `graphs.GraphsTuple` that contains numpy data.
force_dynamic_num_graphs: A `bool` that forces the batch dimension to be
dynamic. Defaults to `True`.
Returns:
A `graphs.GraphsTuple` containing placeholders.
"""
graph_dtypes = graph.map(
lambda v: tf.as_dtype(v.dtype) if v is not None else None, ALL_FIELDS)
graph_shapes = graph.map(lambda v: list(v.shape) if v is not None else None,
ALL_FIELDS)
return _build_placeholders_from_specs(
graph_dtypes,
graph_shapes,
force_dynamic_num_graphs=force_dynamic_num_graphs)
def get_feed_dict(placeholders, graph):
"""Feeds a `graphs.GraphsTuple` of numpy arrays or `None` into `placeholders`.
When feeding a fully defined graph (no `None` field) into a session, this
method is not necessary as one can directly do:
```
_ = sess.run(_, {placeholders: graph})
```
However, if the placeholders contain `None`, the above construction would
fail. This method allows to replace the above call by
```
_ = sess.run(_, get_feed_dict(placeholders: graph))
```
restoring the correct behavior.
Args:
placeholders: A `graphs.GraphsTuple` containing placeholders.
graph: A `graphs.GraphsTuple` containing placeholder compatibale values,
or `None`s.
Returns:
A dictionary with key placeholders and values the fed in values.
Raises:
ValueError: If the `None` fields in placeholders and `graph` do not exactly
match.
"""
feed_dict = {}
for field in ALL_FIELDS:
placeholder = getattr(placeholders, field)
feed_value = getattr(graph, field)
if placeholder is None or feed_value is None:
if not (placeholder is None and feed_value is None):
raise ValueError("Field {} should be `None` in either none or both of "
"the placeholders and feed values.".format(field))
else:
feed_dict[placeholder] = feed_value
return feed_dict
def placeholders_from_data_dicts(data_dicts,
force_dynamic_num_graphs=True,
name="placeholders_from_data_dicts"):
"""Constructs placeholders compatible with a list of data dicts.
Args:
data_dicts: An iterable of data dicts containing numpy arrays.
force_dynamic_num_graphs: A `bool` that forces the batch dimension to be
dynamic. Defaults to `True`.
name: (string, optional) A name for the operation.
Returns:
An instance of `graphs.GraphTuple` placeholders compatible with the
dimensions of the dictionaries in `data_dicts`.
"""
with tf.name_scope(name):
graph = data_dicts_to_graphs_tuple(data_dicts)
return _placeholders_from_graphs_tuple(
graph, force_dynamic_num_graphs=force_dynamic_num_graphs)
def placeholders_from_networkxs(graph_nxs,
node_shape_hint=None,
edge_shape_hint=None,
data_type_hint=tf.float32,
force_dynamic_num_graphs=True,
name="placeholders_from_networkxs"):
"""Constructs placeholders compatible with a list of networkx instances.
Given a list of networkxs instances, constructs placeholders compatible with
the shape of those graphs.
The networkx graph should be set up such that, for fixed shapes `node_shape`,
`edge_shape` and `global_shape`:
- `graph_nx.nodes(data=True)[i][-1]["features"]` is, for any node index i, a
tensor of shape `node_shape`, or `None`;
- `graph_nx.edges(data=True)[i][-1]["features"]` is, for any edge index i, a
tensor of shape `edge_shape`, or `None`;
- `graph_nx.edges(data=True)[i][-1]["index"]`, if present, defines the order
in which the edges will be sorted in the resulting `data_dict`;
- `graph_nx.graph["features"] is a tensor of shape `global_shape` or `None`.
Args:
graph_nxs: A container of `networkx.MultiDiGraph`s.
node_shape_hint: (iterable of `int` or `None`, default=`None`) If the graph
does not contain nodes, the trailing shape for the created `NODES` field.
If `None` (the default), this field is left `None`. This is not used if
`graph_nx` contains at least one node.
edge_shape_hint: (iterable of `int` or `None`, default=`None`) If the graph
does not contain edges, the trailing shape for the created `EDGES` field.
If `None` (the default), this field is left `None`. This is not used if
`graph_nx` contains at least one edge.
data_type_hint: (numpy dtype, default=`np.float32`) If the `NODES` or
`EDGES` fields are autocompleted, their type.
force_dynamic_num_graphs: A `bool` that forces the batch dimension to be
dynamic. Defaults to `True`.
name: (string, optional) A name for the operation.
Returns:
An instance of `graphs.GraphTuple` placeholders compatible with the
dimensions of the graph_nxs.
"""
with tf.name_scope(name):
graph = utils_np.networkxs_to_graphs_tuple(graph_nxs, node_shape_hint,
edge_shape_hint,
data_type_hint.as_numpy_dtype)
return _placeholders_from_graphs_tuple(
graph, force_dynamic_num_graphs=force_dynamic_num_graphs)
def _compute_stacked_offsets(sizes, repeats):
"""Computes offsets to add to indices of stacked tensors (Tensorflow).
When a set of tensors are stacked, the indices of those from the second on
must be offset in order to be able to index into the stacked tensor. This
computes those offsets.
Args:
sizes: A 1D `Tensor` of the sizes per graph.
repeats: A 1D `Tensor` of the number of repeats per graph.
Returns:
A 1D `Tensor` containing the index offset per graph.
"""
sizes = tf.cast(tf.convert_to_tensor(sizes[:-1]), tf.int32)
offset_values = tf.cumsum(tf.concat([[0], sizes], 0))
return repeat(offset_values, repeats)
def _nested_concatenate(input_graphs, field_name, axis):
"""Concatenates a possibly nested feature field of a list of input graphs."""
features_list = [getattr(gr, field_name) for gr in input_graphs
if getattr(gr, field_name) is not None]
if not features_list:
return None
if len(features_list) < len(input_graphs):
raise ValueError(
"All graphs or no graphs must contain {} features.".format(field_name))
name = "concat_" + field_name
return tree.map_structure(lambda *x: tf.concat(x, axis, name), *features_list)
def concat(input_graphs, axis, name="graph_concat"):
"""Returns an op that concatenates graphs along a given axis.
In all cases, the NODES, EDGES and GLOBALS dimension are concatenated
along `axis` (if a fields is `None`, the concatenation is just a `None`).
If `axis` == 0, then the graphs are concatenated along the (underlying) batch
dimension, i.e. the RECEIVERS, SENDERS, N_NODE and N_EDGE fields of the tuples
are also concatenated together.
If `axis` != 0, then there is an underlying assumption that the receivers,
SENDERS, N_NODE and N_EDGE fields of the graphs in `values` should all match,
but this is not checked by this op.
The graphs in `input_graphs` should have the same set of keys for which the
corresponding fields is not `None`.
Args:
input_graphs: A list of `graphs.GraphsTuple` objects containing `Tensor`s
and satisfying the constraints outlined above.
axis: An axis to concatenate on.
name: (string, optional) A name for the operation.
Returns: An op that returns the concatenated graphs.
Raises:
ValueError: If `values` is an empty list, or if the fields which are `None`
in `input_graphs` are not the same for all the graphs.
"""
if not input_graphs:
raise ValueError("List argument `input_graphs` is empty")
utils_np._check_valid_sets_of_keys([gr._asdict() for gr in input_graphs]) # pylint: disable=protected-access
if len(input_graphs) == 1:
return input_graphs[0]
with tf.name_scope(name):
nodes = _nested_concatenate(input_graphs, NODES, axis)
edges = _nested_concatenate(input_graphs, EDGES, axis)
globals_ = _nested_concatenate(input_graphs, GLOBALS, axis)
output = input_graphs[0].replace(nodes=nodes, edges=edges, globals=globals_)
if axis != 0:
return output
n_node_per_tuple = tf.stack(
[tf.reduce_sum(gr.n_node) for gr in input_graphs])
n_edge_per_tuple = tf.stack(
[tf.reduce_sum(gr.n_edge) for gr in input_graphs])
offsets = _compute_stacked_offsets(n_node_per_tuple, n_edge_per_tuple)
n_node = tf.concat(
[gr.n_node for gr in input_graphs], axis=0, name="concat_n_node")
n_edge = tf.concat(
[gr.n_edge for gr in input_graphs], axis=0, name="concat_n_edge")
receivers = [
gr.receivers for gr in input_graphs if gr.receivers is not None
]
receivers = receivers or None
if receivers:
receivers = tf.concat(receivers, axis, name="concat_receivers") + offsets
senders = [gr.senders for gr in input_graphs if gr.senders is not None]
senders = senders or None
if senders:
senders = tf.concat(senders, axis, name="concat_senders") + offsets
return output.replace(
receivers=receivers, senders=senders, n_node=n_node, n_edge=n_edge)
def stop_gradient(graph,
stop_edges=True,
stop_nodes=True,
stop_globals=True,
name="graph_stop_gradient"):
"""Stops the gradient flow through a graph.
Args:
graph: An instance of `graphs.GraphsTuple` containing `Tensor`s.
stop_edges: (bool, default=True) indicates whether to stop gradients for
the edges.
stop_nodes: (bool, default=True) indicates whether to stop gradients for
the nodes.
stop_globals: (bool, default=True) indicates whether to stop gradients for
the globals.
name: (string, optional) A name for the operation.
Returns:
GraphsTuple after stopping the gradients according to the provided
parameters.
Raises:
ValueError: If attempting to stop gradients through a field which has a
`None` value in `graph`.
"""
base_err_msg = "Cannot stop gradient through {0} if {0} are None"
fields_to_stop = []
if stop_globals:
if graph.globals is None:
raise ValueError(base_err_msg.format(GLOBALS))
fields_to_stop.append(GLOBALS)
if stop_nodes:
if graph.nodes is None:
raise ValueError(base_err_msg.format(NODES))
fields_to_stop.append(NODES)
if stop_edges:
if graph.edges is None:
raise ValueError(base_err_msg.format(EDGES))
fields_to_stop.append(EDGES)
with tf.name_scope(name):
return graph.map(tf.stop_gradient, fields_to_stop)
def identity(graph, name="graph_identity"):
"""Pass each element of a graph through a `tf.identity`.
This allows, for instance, to push a name scope on the graph by writing:
```
with tf.name_scope("encoder"):
graph = utils_tf.identity(graph)
```
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s. `None` values are passed
through.
name: (string, optional) A name for the operation.
Returns:
A `graphs.GraphsTuple` `graphs_output` such that for any field `x` in NODES,
EDGES, GLOBALS, RECEIVERS, SENDERS, N_NODE, N_EDGE, if `graph.x` was
`None`, `graph_output.x` is `None`, and otherwise
`graph_output.x = tf.identity(graph.x)`
"""
non_none_fields = [k for k in ALL_FIELDS if getattr(graph, k) is not None]
with tf.name_scope(name):
return graph.map(tf.identity, non_none_fields)
def make_runnable_in_session(graph, name="make_graph_runnable_in_session"):
"""Allows a graph containing `None` fields to be run in a `tf.Session`.
The `None` values of `graph` are replaced by `tf.no_op()`. This function is
meant to be called just before a call to `sess.run` on a Tensorflow session
`sess`, as `None` values currently cannot be run through a session.
Args:
graph: A `graphs.GraphsTuple` containing `Tensor`s or `None` values.
name: (string, optional) A name for the operation.
Returns:
A `graphs.GraphsTuple` `graph_output` such that, for any field `x` in NODES,
EDGES, GLOBALS, RECEIVERS, SENDERS, N_NODE, N_EDGE, and a Tensorflow session
`sess`, if `graph.x` was `None`, `sess.run(graph_output)` is `None`, and
otherwise
"""
none_fields = [k for k in ALL_FIELDS if getattr(graph, k) is None]
with tf.name_scope(name):
return graph.map(lambda _: tf.no_op(), none_fields)
def repeat(tensor, repeats, axis=0, name="repeat", sum_repeats_hint=None):
"""Repeats a `tf.Tensor`'s elements along an axis by custom amounts.
Equivalent to Numpy's `np.repeat`.
`tensor and `repeats` must have the same numbers of elements along `axis`.
Args:
tensor: A `tf.Tensor` to repeat.
repeats: A 1D sequence of the number of repeats per element.
axis: An axis to repeat along. Defaults to 0.
name: (string, optional) A name for the operation.
sum_repeats_hint: Integer with the total sum of repeats in case it is
known at graph definition time.
Returns:
The `tf.Tensor` with repeated values.
"""
with tf.name_scope(name):
if sum_repeats_hint is not None:
sum_repeats = sum_repeats_hint
else:
sum_repeats = tf.reduce_sum(repeats)
# This is TPU compatible.
# Create a tensor consistent with output size indicating where the splits
# between the different repeats are. For example:
# repeats = [2, 3, 6]
# with cumsum(exclusive=True):
# scatter_indices = [0, 2, 5]
# with scatter_nd:
# block_split_indicators = [1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0]
# cumsum(exclusive=False) - 1
# gather_indices = [0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 2]
# Note that scatter_nd accumulates for duplicated indices, so for
# repeats = [2, 0, 6]
# scatter_indices = [0, 2, 2]
# block_split_indicators = [1, 0, 2, 0, 0, 0, 0, 0]
# gather_indices = [0, 0, 2, 2, 2, 2, 2, 2]
# Sometimes repeats may have zeros in the last groups. E.g.
# for repeats = [2, 3, 0]
# scatter_indices = [0, 2, 5]
# However, the `gather_nd` only goes up to (sum_repeats - 1) index. (4 in
# the example). And this would throw an error due to trying to index
# outside the shape. Instead we let the scatter_nd have one more element
# and we trim it from the output.
scatter_indices = tf.cumsum(repeats, exclusive=True)
block_split_indicators = tf.scatter_nd(
indices=tf.expand_dims(scatter_indices, axis=1),
updates=tf.ones_like(scatter_indices),
shape=[sum_repeats + 1])[:-1]
gather_indices = tf.cumsum(block_split_indicators, exclusive=False) - 1
# An alternative implementation of the same, where block split indicators
# does not have an indicator for the first group, and requires less ops
# but requires creating a matrix of size [len(repeats), sum_repeats] is:
# cumsum_repeats = tf.cumsum(repeats, exclusive=False)
# block_split_indicators = tf.reduce_sum(
# tf.one_hot(cumsum_repeats, sum_repeats, dtype=tf.int32), axis=0)
# gather_indices = tf.cumsum(block_split_indicators, exclusive=False)
# Now simply gather the tensor along the correct axis.
repeated_tensor = tf.gather(tensor, gather_indices, axis=axis)
shape = tensor.shape.as_list()
shape[axis] = sum_repeats_hint
repeated_tensor.set_shape(shape)
return repeated_tensor
def _populate_number_fields(data_dict):
"""Returns a dict with the number fields N_NODE, N_EDGE filled in.
The N_NODE field is filled if the graph contains a non-`None` NODES field;
otherwise, it is set to 0.
The N_EDGE field is filled if the graph contains a non-`None` RECEIVERS field;
otherwise, it is set to 0.
Args:
data_dict: An input `dict`.
Returns:
The data `dict` with number fields.
"""
dct = data_dict.copy()
for number_field, data_field in [[N_NODE, NODES], [N_EDGE, RECEIVERS]]:
if dct.get(number_field) is None:
if dct[data_field] is not None:
dct[number_field] = tf.shape(dct[data_field])[0]
else:
dct[number_field] = tf.constant(0, dtype=tf.int32)
return dct
def _to_compatible_data_dicts(data_dicts):
"""Convert the content of `data_dicts` to tensors of the right type.
All fields are converted to `Tensor`s. The index fields (`SENDERS` and
`RECEIVERS`) and number fields (`N_NODE`, `N_EDGE`) are cast to `tf.int32`.
Args:
data_dicts: An iterable of dictionaries with keys `ALL_KEYS` and
values either `None`s, or quantities that can be converted to `Tensor`s.
Returns:
A list of dictionaries containing `Tensor`s or `None`s.
"""
results = []
for data_dict in data_dicts:
result = {}
for k, v in data_dict.items():
if v is None:
result[k] = None
else:
dtype = tf.int32 if k in [SENDERS, RECEIVERS, N_NODE, N_EDGE] else None
result[k] = tf.convert_to_tensor(v, dtype)
results.append(result)
return results
def _concatenate_data_dicts(data_dicts):
"""Concatenate a list of data dicts to create the equivalent batched graph.
Args:
data_dicts: An iterable of data dictionaries with keys a subset of
`GRAPH_DATA_FIELDS`, plus, potentially, a subset of `GRAPH_NUMBER_FIELDS`.
Every element of `data_dicts` has to contain the same set of keys.
Moreover, the key `NODES` or `N_NODE` must be present in every element of
`data_dicts`.
Returns:
A data dictionary with the keys `GRAPH_DATA_FIELDS + GRAPH_NUMBER_FIELDS`,
representing the concatenated graphs.
Raises:
ValueError: If two dictionaries in `data_dicts` have a different set of
keys.
"""
# Go from a list of dict to a dict of lists
dct = collections.defaultdict(lambda: [])
for data_dict in data_dicts:
data_dict = _populate_number_fields(data_dict)
for k, v in data_dict.items():
if v is not None:
dct[k].append(v)
elif k not in dct:
dct[k] = None
dct = dict(dct)
# Concatenate the graphs.
for field, tensors in dct.items():
if tensors is None:
dct[field] = None
elif field in list(GRAPH_NUMBER_FIELDS) + [GLOBALS]:
dct[field] = tf.stack(tensors)
else:
dct[field] = tf.concat(tensors, axis=0)
# Add offsets to the receiver and sender indices.
if dct[RECEIVERS] is not None:
offset = _compute_stacked_offsets(dct[N_NODE], dct[N_EDGE])
dct[RECEIVERS] += offset
dct[SENDERS] += offset
return dct
def _create_complete_edges_from_nodes_static(n_node, exclude_self_edges):
"""Creates complete edges for a graph with `n_node`.
Args:
n_node: (python integer) The number of nodes.
exclude_self_edges: (bool) Excludes self-connected edges.
Returns:
A dict of RECEIVERS, SENDERS and N_EDGE data (`Tensor`s of rank 1).
"""
receivers = []
senders = []
n_edges = 0
for node_1 in range(n_node):
for node_2 in range(n_node):
if not exclude_self_edges or node_1 != node_2:
receivers.append(node_1)
senders.append(node_2)
n_edges += 1
return {
RECEIVERS: tf.constant(receivers, dtype=tf.int32),
SENDERS: tf.constant(senders, dtype=tf.int32),
N_EDGE: tf.constant([n_edges], dtype=tf.int32)
}
def _create_complete_edges_from_nodes_dynamic(n_node, exclude_self_edges):
"""Creates complete edges for a graph with `n_node`.
Args:
n_node: (integer scalar `Tensor`) The number of nodes.
exclude_self_edges: (bool) Excludes self-connected edges.
Returns:
A dict of RECEIVERS, SENDERS and N_EDGE data (`Tensor`s of rank 1).
"""
rng = tf.range(n_node)
receivers, senders = tf.meshgrid(rng, rng)
n_edge = n_node * n_node
if exclude_self_edges:
ind = tf.cast(1 - tf.eye(n_node), bool)
receivers = tf.boolean_mask(receivers, ind)
senders = tf.boolean_mask(senders, ind)
n_edge -= n_node
receivers = tf.reshape(tf.cast(receivers, tf.int32), [n_edge])
senders = tf.reshape(tf.cast(senders, tf.int32), [n_edge])
n_edge = tf.reshape(n_edge, [1])
return {RECEIVERS: receivers, SENDERS: senders, N_EDGE: n_edge}
def _validate_edge_fields_are_all_none(graph):
if not all(getattr(graph, x) is None for x in [EDGES, RECEIVERS, SENDERS]):
raise ValueError("Can only add fully connected a graph with `None`"
"edges, receivers and senders")
def fully_connect_graph_static(graph,
exclude_self_edges=False,
name="fully_connect_graph_static"):
"""Adds edges to a graph by fully-connecting the nodes.
This method can be used if the number of nodes for each graph in `graph` is
constant and known at graph building time: it will be inferred by dividing
the number of nodes in the batch(the length of `graph.nodes`) by the number of
graphs in the batch (the length of `graph.n_node`). It is an error to call
this method with batches of graphs with dynamic or uneven sizes; in the latter
case, the method may silently yield an incorrect result.
Args:
graph: A `graphs.GraphsTuple` with `None` values for the edges, senders and
receivers.
exclude_self_edges (default=False): Excludes self-connected edges.
name: (string, optional) A name for the operation.
Returns:
A `graphs.GraphsTuple` containing `Tensor`s with fully-connected edges.
Raises:
ValueError: If any of the `EDGES`, `RECEIVERS` or `SENDERS` field is not
`None` in `graph`.
ValueError: If the number of graphs (extracted from `graph.n_node` leading
dimension) or number of nodes (extracted from `graph.nodes` leading
dimension) is not known at construction time, or if the latter does not
divide the former (observe that this is only a necessary condition for
the constantness of the number of nodes per graph).
"""
_validate_edge_fields_are_all_none(graph)
num_graphs = graph.n_node.shape.as_list()[0]
if num_graphs is None:
raise ValueError("Number of graphs must be known at construction time when "
"using `fully_connect_graph_static`. Did you mean to use "
"`fully_connect_graph_dynamic`?")
num_nodes = graph.nodes.shape.as_list()[0]
if num_nodes is None:
raise ValueError("Number of nodes must be known at construction time when "
"using `fully_connect_graph_static`. Did you mean to use "
"`fully_connect_graph_dynamic`?")
if num_nodes % num_graphs != 0:
raise ValueError("Number of nodes must be the same in all graphs when "
"using `fully_connect_graph_static`. Did you mean to use "
"`fully_connect_graph_dynamic`?")
num_nodes_per_graph = num_nodes // num_graphs
with tf.name_scope(name):
one_graph_edges = _create_complete_edges_from_nodes_static(
num_nodes_per_graph, exclude_self_edges)
n_edges = num_nodes_per_graph * (num_nodes_per_graph - 1)
if not exclude_self_edges:
n_edges += num_nodes_per_graph
all_graph_edges = {
k: tf.tile(v, [num_graphs]) for k, v in six.iteritems(one_graph_edges)
}
offsets = [
num_nodes_per_graph * i # pylint: disable=g-complex-comprehension
for i in range(num_graphs)
for _ in range(n_edges)
]
all_graph_edges[RECEIVERS] += offsets
all_graph_edges[SENDERS] += offsets
return graph.replace(**all_graph_edges)
def fully_connect_graph_dynamic(graph,
exclude_self_edges=False,
name="fully_connect_graph_dynamic"):
"""Adds edges to a graph by fully-connecting the nodes.
This method does not require the number of nodes per graph to be constant,
or to be known at graph building time.
Args:
graph: A `graphs.GraphsTuple` with `None` values for the edges, senders and
receivers.
exclude_self_edges (default=False): Excludes self-connected edges.
name: (string, optional) A name for the operation.
Returns:
A `graphs.GraphsTuple` containing `Tensor`s with fully-connected edges.
Raises:
ValueError: if any of the `EDGES`, `RECEIVERS` or `SENDERS` field is not
`None` in `graph`.
"""
_validate_edge_fields_are_all_none(graph)
with tf.name_scope(name):
def body(i, senders, receivers, n_edge):
edges = _create_complete_edges_from_nodes_dynamic(graph.n_node[i],
exclude_self_edges)
return (i + 1, senders.write(i, edges[SENDERS]),
receivers.write(i, edges[RECEIVERS]),
n_edge.write(i, edges[N_EDGE]))
num_graphs = get_num_graphs(graph)
loop_condition = lambda i, *_: tf.less(i, num_graphs)
initial_loop_vars = [0] + [
tf.TensorArray(dtype=tf.int32, size=num_graphs, infer_shape=False)
for _ in range(3) # senders, receivers, n_edge
]
_, senders_array, receivers_array, n_edge_array = tf.while_loop(
loop_condition, body, initial_loop_vars, back_prop=False)
n_edge = n_edge_array.concat()
offsets = _compute_stacked_offsets(graph.n_node, n_edge)
senders = senders_array.concat() + offsets
receivers = receivers_array.concat() + offsets
senders.set_shape(offsets.shape)
receivers.set_shape(offsets.shape)
receivers.set_shape([None])
senders.set_shape([None])
num_graphs = graph.n_node.get_shape().as_list()[0]
n_edge.set_shape([num_graphs])
return graph._replace(senders=senders, receivers=receivers, n_edge=n_edge)
def set_zero_node_features(graph,
node_size,
dtype=tf.float32,
name="set_zero_node_features"):
"""Completes the node state of a graph.
Args:
graph: A `graphs.GraphsTuple` with a `None` edge state.
node_size: (int) the dimension for the created node features.
dtype: (tensorflow type) the type for the created nodes features.
name: (string, optional) A name for the operation.
Returns:
The same graph but for the node field, which is a `Tensor` of shape
`[number_of_nodes, node_size]` where `number_of_nodes = sum(graph.n_node)`,
with type `dtype`, filled with zeros.
Raises:
ValueError: If the `NODES` field is not None in `graph`.
ValueError: If `node_size` is None.
"""
if graph.nodes is not None:
raise ValueError(
"Cannot complete node state if the graph already has node features.")
if node_size is None:
raise ValueError("Cannot complete nodes with None node_size")
with tf.name_scope(name):
n_nodes = tf.reduce_sum(graph.n_node)
return graph._replace(
nodes=tf.zeros(shape=[n_nodes, node_size], dtype=dtype))
def set_zero_edge_features(graph,
edge_size,
dtype=tf.float32,
name="set_zero_edge_features"):
"""Completes the edge state of a graph.
Args:
graph: A `graphs.GraphsTuple` with a `None` edge state.
edge_size: (int) the dimension for the created edge features.
dtype: (tensorflow type) the type for the created edge features.
name: (string, optional) A name for the operation.
Returns:
The same graph but for the edge field, which is a `Tensor` of shape
`[number_of_edges, edge_size]`, where `number_of_edges = sum(graph.n_edge)`,
with type `dtype` and filled with zeros.
Raises:
ValueError: If the `EDGES` field is not None in `graph`.
ValueError: If the `RECEIVERS` or `SENDERS` field are None in `graph`.
ValueError: If `edge_size` is None.
"""
if graph.edges is not None:
raise ValueError(
"Cannot complete edge state if the graph already has edge features.")
if graph.receivers is None or graph.senders is None:
raise ValueError(
"Cannot complete edge state if the receivers or senders are None.")
if edge_size is None:
raise ValueError("Cannot complete edges with None edge_size")
with tf.name_scope(name):
senders_leading_size = graph.senders.shape.as_list()[0]
if senders_leading_size is not None:
n_edges = senders_leading_size
else:
n_edges = tf.reduce_sum(graph.n_edge)
return graph._replace(
edges=tf.zeros(shape=[n_edges, edge_size], dtype=dtype))
def set_zero_global_features(graph,
global_size,
dtype=tf.float32,
name="set_zero_global_features"):
"""Completes the global state of a graph.
Args:
graph: A `graphs.GraphsTuple` with a `None` global state.
global_size: (int) the dimension for the created global features.
dtype: (tensorflow type) the type for the created global features.
name: (string, optional) A name for the operation.
Returns:
The same graph but for the global field, which is a `Tensor` of shape
`[num_graphs, global_size]`, type `dtype` and filled with zeros.
Raises:
ValueError: If the `GLOBALS` field of `graph` is not `None`.
ValueError: If `global_size` is not `None`.
"""
if graph.globals is not None:
raise ValueError(
"Cannot complete global state if graph already has global features.")
if global_size is None:
raise ValueError("Cannot complete globals with None global_size")
with tf.name_scope(name):
n_graphs = get_num_graphs(graph)
return graph._replace(
globals=tf.zeros(shape=[n_graphs, global_size], dtype=dtype))
def data_dicts_to_graphs_tuple(data_dicts, name="data_dicts_to_graphs_tuple"):
"""Creates a `graphs.GraphsTuple` containing tensors from data dicts.
All dictionaries must have exactly the same set of keys with non-`None`
values associated to them. Moreover, this set of this key must define a valid
graph (i.e. if the `EDGES` are `None`, the `SENDERS` and `RECEIVERS` must be
`None`, and `SENDERS` and `RECEIVERS` can only be `None` both at the same
time). The values associated with a key must be convertible to `Tensor`s,
for instance python lists, numpy arrays, or Tensorflow `Tensor`s.
This method may perform a memory copy.
The `RECEIVERS`, `SENDERS`, `N_NODE` and `N_EDGE` fields are cast to
`np.int32` type.
Args:
data_dicts: An iterable of data dictionaries with keys in `ALL_FIELDS`.
name: (string, optional) A name for the operation.
Returns:
A `graphs.GraphTuple` representing the graphs in `data_dicts`.
"""
data_dicts = [dict(d) for d in data_dicts]
for key in ALL_FIELDS:
for data_dict in data_dicts:
data_dict.setdefault(key, None)
utils_np._check_valid_sets_of_keys(data_dicts) # pylint: disable=protected-access
with tf.name_scope(name):
data_dicts = _to_compatible_data_dicts(data_dicts)
return graphs.GraphsTuple(**_concatenate_data_dicts(data_dicts))
def _check_valid_index(index, element_name):
"""Verifies if a value with `element_name` is a valid index."""
if isinstance(index, int):
return True
elif isinstance(index, tf.Tensor):
if index.dtype != tf.int32 and index.dtype != tf.int64:
raise TypeError(
"Invalid tensor `{}` parameter. Valid tensor indices must have "
"types tf.int32 or tf.int64, got {}."
.format(element_name, index.dtype))
if index.shape.as_list():
raise TypeError(
"Invalid tensor `{}` parameter. Valid tensor indices must be scalars "
"with shape [], got{}"
.format(element_name, index.shape.as_list()))
return True
else:
raise TypeError(
"Invalid `{}` parameter. Valid tensor indices must be integers "
"or tensors, got {}."
.format(element_name, type(index)))
def get_graph(input_graphs, index, name="get_graph"):
"""Indexes into a graph.
Given a `graphs.graphsTuple` containing `Tensor`s and an index (either
an `int` or a `slice`), index into the nodes, edges and globals to extract the
graphs specified by the slice, and returns them into an another instance of a
`graphs.graphsTuple` containing `Tensor`s.
Args:
input_graphs: A `graphs.GraphsTuple` containing `Tensor`s.
index: An `int`, a `slice`, a tensor `int` or a tensor `slice`, to index
into `graph`. `index` should be compatible with the number of graphs in
`graphs`. The `step` parameter of the `slice` objects must be None.
name: (string, optional) A name for the operation.
Returns:
A `graphs.GraphsTuple` containing `Tensor`s, made of the extracted
graph(s).
Raises:
TypeError: if `index` is not an `int`, a `slice`, or corresponding tensor
types.
ValueError: if `index` is a slice and `index.step` if not None.
"""
def safe_slice_none(value, slice_):
if value is None:
return value
return value[slice_]
if isinstance(index, (int, tf.Tensor)):
_check_valid_index(index, "index")
graph_slice = slice(index, index + 1)
elif (isinstance(index, slice) and
_check_valid_index(index.stop, "index.stop") and
(index.start is None or _check_valid_index(
index.start, "index.start"))):
if index.step is not None:
raise ValueError("slices with step/stride are not supported, got {}"
.format(index))
graph_slice = index
else:
raise TypeError(
"unsupported index type got {} with type {}. Index must be a valid "
"scalar integer (tensor or int) or a slice of such values."
.format(index, type(index)))
start_slice = slice(0, graph_slice.start)
with tf.name_scope(name):
start_node_index = tf.reduce_sum(
input_graphs.n_node[start_slice], name="start_node_index")
start_edge_index = tf.reduce_sum(
input_graphs.n_edge[start_slice], name="start_edge_index")
end_node_index = start_node_index + tf.reduce_sum(
input_graphs.n_node[graph_slice], name="end_node_index")
end_edge_index = start_edge_index + tf.reduce_sum(
input_graphs.n_edge[graph_slice], name="end_edge_index")
nodes_slice = slice(start_node_index, end_node_index)
edges_slice = slice(start_edge_index, end_edge_index)
sliced_graphs_dict = {}
for field in set(GRAPH_NUMBER_FIELDS) | {"globals"}:
sliced_graphs_dict[field] = safe_slice_none(
getattr(input_graphs, field), graph_slice)
field = "nodes"
sliced_graphs_dict[field] = safe_slice_none(
getattr(input_graphs, field), nodes_slice)
for field in {"edges", "senders", "receivers"}:
sliced_graphs_dict[field] = safe_slice_none(
getattr(input_graphs, field), edges_slice)
if (field in {"senders", "receivers"} and
sliced_graphs_dict[field] is not None):
sliced_graphs_dict[field] = sliced_graphs_dict[field] - start_node_index
return graphs.GraphsTuple(**sliced_graphs_dict)
def get_num_graphs(input_graphs, name="get_num_graphs"):
"""Returns the number of graphs (i.e. the batch size) in `input_graphs`.
Args:
input_graphs: A `graphs.GraphsTuple` containing tensors.
name: (string, optional) A name for the operation.
Returns:
An `int` (if a static number of graphs is defined) or a `tf.Tensor` (if the
number of graphs is dynamic).
"""
with tf.name_scope(name):
return _get_shape(input_graphs.n_node)[0]
def nest_to_numpy(nest_of_tensors):
"""Converts a nest of eager tensors to a nest of numpy arrays.
Leaves non-`tf.Tensor` elements untouched.
A common use case for this method is to transform a `graphs.GraphsTuple` of
tensors into a `graphs.GraphsTuple` of arrays, or nests containing
`graphs.GraphsTuple`s.
Args:
nest_of_tensors: Nest containing `tf.Tensor`s.
Returns:
A nest with the same structure where `tf.Tensor`s are replaced by numpy
arrays and all other elements are kept the same.
"""
return tree.map_structure(
lambda x: x.numpy() if isinstance(x, tf.Tensor) else x,
nest_of_tensors)
def specs_from_graphs_tuple(
graphs_tuple_sample,
dynamic_num_graphs=False,
dynamic_num_nodes=True,
dynamic_num_edges=True,
description_fn=tf.TensorSpec,
):
"""Returns the `TensorSpec` specification for a given `GraphsTuple`.
This method is often used with `tf.function` in Tensorflow 2 to obtain
improved speed and performance of eager code. For example:
```
example_graphs_tuple = get_graphs_tuple(...)
@tf.function(input_signature=[specs_from_graphs_tuple(example_graphs_tuple)])
def forward_pass(graphs_tuple_input):
graphs_tuple_output = graph_network(graphs_tuple_input)
return graphs_tuple_output
for i in range(num_training_steps):
input = get_graphs_tuple(...)
with tf.GradientTape() as tape:
output = forward_pass(input)
loss = compute_loss(output)
grads = tape.gradient(loss, graph_network.trainable_variables)
optimizer.apply(grads, graph_network.trainable_variables)
```
Args:
graphs_tuple_sample: A `graphs.GraphsTuple` with sample data. `GraphsTuple`s
that have fields with `None` are not accepted since they will create an
invalid signature specification for `tf.function`. If your graph has
`None`s use `utils_tf.set_zero_edge_features`,
`utils_tf.set_zero_node_features` or `utils_tf.set_zero_global_features`.
This method also returns the signature for `GraphTuple`s with nests of
tensors in the feature fields (`nodes`, `edges`, `globals`), including
empty nests (e.g. empty list, dict, or tuple). Nested node, edge and
global feature tensors, should usually have the same leading dimension as
all other node, edge and global feature tensors respectively.
dynamic_num_graphs: Boolean indicating if the number of graphs in each
`GraphsTuple` will be variable across examples.
dynamic_num_nodes: Boolean indicating if number of nodes per graph will be
variable across examples. Not used if `dynamic_num_graphs` is True, as the
size of the first axis of all `GraphsTuple` fields will be variable, due
to the variable number of graphs.
dynamic_num_edges: Boolean indicating if number of edges per graph will be
variable across examples. Not used if dynamic_num_graphs is True, as the
size of the first axis of all `GraphsTuple` fields will be variable, due
to the variable number of graphs.
description_fn: A callable which accepts the dtype and shape arguments to
describe the shapes and types of tensors. By default uses `tf.TensorSpec`.
Returns:
A `GraphsTuple` with tensors replaced by `TensorSpec` with shape and dtype
of the field contents.
Raises:
ValueError: If a `GraphsTuple` has a field with `None`.
"""
graphs_tuple_description_fields = {}
edge_dim_fields = [graphs.EDGES, graphs.SENDERS, graphs.RECEIVERS]
# Method to get the spec for a single tensor.
def get_tensor_spec(tensor, field_name):
"""Returns the spec of an array or a tensor in the field of a graph."""
shape = list(tensor.shape)
dtype = tensor.dtype
# If the field is not None but has no field shape (i.e. it is a constant)
# then we consider this to be a replaced `None`.
# If dynamic_num_graphs, then all fields have a None first dimension.
# If dynamic_num_nodes, then the "nodes" field needs None first dimension.
# If dynamic_num_edges, then the "edges", "senders" and "receivers" need
# a None first dimension.
if (shape and (
dynamic_num_graphs or
(dynamic_num_nodes and field_name == graphs.NODES) or
(dynamic_num_edges and field_name in edge_dim_fields))):
shape[0] = None
return description_fn(shape=shape, dtype=dtype)
for field_name in graphs.ALL_FIELDS:
field_sample = getattr(graphs_tuple_sample, field_name)
if field_sample is None:
raise ValueError(
"The `GraphsTuple` field `{}` was `None`. All fields of the "
"`GraphsTuple` must be specified to create valid signatures that"
"work with `tf.function`. This can be achieved with `input_graph = "
"utils_tf.set_zero_{{node,edge,global}}_features(input_graph, 0)`"
"to replace None's by empty features in your graph. Alternatively"
"`None`s can be replaced by empty lists by doing `input_graph = "
"input_graph.replace({{nodes,edges,globals}}=[]). To ensure "
"correct execution of the program, it is recommended to restore "
"the None's once inside of the `tf.function` by doing "
"`input_graph = input_graph.replace({{nodes,edges,globals}}=None)"
"".format(field_name))
if field_name in graphs.GRAPH_FEATURE_FIELDS:
field_spec = tree.map_structure(
functools.partial(get_tensor_spec, field_name=field_name),
field_sample)
else:
field_spec = get_tensor_spec(field_sample, field_name=field_name)
graphs_tuple_description_fields[field_name] = field_spec
return graphs.GraphsTuple(**graphs_tuple_description_fields)
# Convenience data container for carrying around padding.
GraphsTupleSize = collections.namedtuple(
"GraphsTupleSize", ["num_nodes", "num_edges", "num_graphs"])
# Mapping indicating the leading size of `GraphsTuple` fields according to the
# number of nodes/edges/graphs in the `GraphsTuple`.
_GRAPH_ATTRIBUTE_TO_SIZE_MAP = {
graphs.NODES: "num_nodes",
graphs.EDGES: "num_edges",
graphs.RECEIVERS: "num_edges",
graphs.SENDERS: "num_edges",
graphs.GLOBALS: "num_graphs",
graphs.N_NODE: "num_graphs",
graphs.N_EDGE: "num_graphs"
}
def _get_field_size_from_size_tuple(size_tuple, graphs_field_name):
field_size_name = _GRAPH_ATTRIBUTE_TO_SIZE_MAP[graphs_field_name]
return getattr(size_tuple, field_size_name)
def _assert_if_space_for_first_padding_graph(
graphs_tuple, graphs_tuple_padded_sizes):
"""Checks if a given graph can fit in the provided padded shape.
Args:
graphs_tuple: A `graphs.GraphsTuple` that is checked for size.
graphs_tuple_padded_sizes: A `GraphsTupleSize` with the sized to pad to.
Returns:
An assertion op indicating whether there is space for the padding graph.
"""
# Padding graph needs to have at least one graph, and at least one node,
# but should not need extra edges, so the number of padded nodes and graphs
# needs to be strictly larger, than the input sizes, but it is ok if the
# number of padded edges are equal to the number of input edges.
graphs_tuple_sizes = get_graphs_tuple_size(graphs_tuple)
all_fields_fit = [
tf.less_equal(graphs_tuple_sizes.num_edges,
graphs_tuple_padded_sizes.num_edges),
tf.less(graphs_tuple_sizes.num_nodes,
graphs_tuple_padded_sizes.num_nodes),
tf.less(graphs_tuple_sizes.num_graphs,
graphs_tuple_padded_sizes.num_graphs),
]
all_fields_fit = functools.reduce(tf.math.logical_and, all_fields_fit)
return tf.Assert(all_fields_fit, [
"There is not enough space to pad the GraphsTuple "
" with sizes (#nodes, #edges, #graphs):", graphs_tuple_sizes,
" to padded sizes of :", graphs_tuple_padded_sizes,
"`pad_edges_to` must be larger or equal to the maximum number of edges "
"in any `GraphsTuple` and `pad_nodes_to`/`pad_graphs_to` must be "
"strictly larger than the maximum number of nodes/graphs in any "
"`GraphsTuple`."
])
def get_graphs_tuple_size(graphs_tuple):
"""Calculates the total nodes, edges and graphs in a graph batch.
Args:
graphs_tuple: A `GraphsTuple`.
Returns:
A `GraphsTupleSizes` object containing the total number of nodes, edges and
graphs in the `GraphsTuple`. Each value is a scalar integer `tf.Tensor`.
"""
num_nodes = tf.reduce_sum(graphs_tuple.n_node)
num_edges = tf.reduce_sum(graphs_tuple.n_edge)
num_graphs = tf.shape(graphs_tuple.n_node)[0]
return GraphsTupleSize(num_nodes, num_edges, num_graphs)
def _get_required_padding_sizes(graphs_tuple, padded_size):
"""Gets the padding size, given a GraphsTuple and the total padded sizes."""
graph_size = get_graphs_tuple_size(graphs_tuple)
return GraphsTupleSize(*(b - c for b, c in zip(padded_size, graph_size)))
def get_mask(valid_length, full_length):
"""Returns a mask given the valid length of a vector with trailing padding.
This is useful for masking out padded elements from a loss. For example
```
input_graphs_tuple = ...
input_graphs_tuple_size = get_graphs_tuple_size(input_graphs_tuple)
padded_input_graphs_tuple = pad_graphs_tuple(input_graphs_tuple,
pad_nodes_to,...)
per_node_loss # After graph_network computation.
nodes_mask = get_mask(
input_graphs_tuple_size.nodes, pad_nodes_to)
masked_per_node_loss = per_node_loss * tf.cast(
nodes_mask, per_node_loss.dtype)
```
Args:
valid_length: Length of the valid elements.
full_length: Full length of the vector after padding.
Returns:
Boolean mask of shape [full_length], where all values are set to `True`
except for the last `padding_length` which are set to False.
"""
valid_length = tf.cast(valid_length, tf.int32)
full_length = tf.cast(full_length, tf.int32)
# This implementation allows for statically sized shapes, rather than
# using concat([tf.ones([valid_length]), tf.zeros([full_length-valid_length])]
# which has intermediate tensors with shapes not know statically.
field_mask = tf.range(full_length)
field_mask = field_mask < valid_length
return field_mask
def remove_graphs_tuple_padding(padded_graphs_tuple, valid_size):
"""Strips a padded `GraphsTuple` of padding.
Given a graph that has been padded by `padding` amount, remove the padding
to recover the original graph.
Often used in the sequence:
```
graphs_tuple_size = get_graphs_tuple_size(graphs_tuple)
padded_graphs_tuple = pad_graphs_tuple(graphs_tuple,
pad_nodes_to=x,
pad_edges_to=y,
pad_graphs_to=z)
unpadded_graphs_tuple = remove_graphs_tuple_padding(padded_graphs_tuple,
graphs_tuple_size)
```
Args:
padded_graphs_tuple: A `graphs.GraphsTuple` that has been padded by
`padding` amount.
valid_size: A `GraphsTupleSize` that represents the size of the valid graph.
Returns:
Returns a `graphs.GraphsTuple` which is padded_graphs_tuple stripped of
padding.
"""
stripped_graph_kwargs = {}
graph_dict = padded_graphs_tuple._asdict() # pylint: disable=protected-access
for field, tensor_nest in graph_dict.items():
field_valid_size = _get_field_size_from_size_tuple(valid_size, field)
strip_fn = lambda x: x[:field_valid_size] # pylint:disable=cell-var-from-loop
stripped_field = tree.map_structure(strip_fn, tensor_nest)
stripped_graph_kwargs[field] = stripped_field
return graphs.GraphsTuple(**stripped_graph_kwargs)
def _pad_tensor(tensor, field, padding_size):
"""Pads a tensor on the first dimensions with the padding size.
Args:
tensor: tf.Tensor of size [batch_dim, x1, ..., xn].
field: Text, the field of `graphs.GraphsTuple` to pad.
padding_size: A tuple representing the size of padding of the graph.
Returns:
A tf.Tensor of size [batch_dim + padding, x1, ..., xn] padded with zeros.
"""
padding = _get_field_size_from_size_tuple(padding_size, field)
padding_tensor = tf.zeros(
[padding] + tensor.shape.as_list()[1:],
dtype=tensor.dtype,
name="pad_zeros_{}".format(field))
return tf.concat((tensor, padding_tensor), axis=0)
def _get_zeros_with_variable_batch_size(feature_tensor, padding_size):
return tf.zeros([padding_size] + feature_tensor.shape.as_list()[1:],
feature_tensor.dtype)
def _get_first_padding_graph(graphs_batch, padding_size,
experimental_unconnected_padding_edges):
"""Gets a dummy graph that pads receivers and senders.
This dummy graph will have number of nodes = padding_size.nodes and
number of edges = padding_size.edges. Receivers and
senders will be indexed with all zeros (connecting to the first node in the
dummy graph).
Args:
graphs_batch: the `graphs.GraphsTuple` to be padded.
padding_size: a `GraphsTupleSize` with the padding size.
experimental_unconnected_padding_edges: see `pad_graphs_tuple` for details.
Returns:
A `graphs.GraphsTuple` of a single dummy graph.
"""
# Set the edges to belong to an index corresponding to a node that does not
# exist.
if experimental_unconnected_padding_edges:
logging.log_first_n(
logging.WARNING,
"Using a padding graph with unconnected edges. This is an experimental "
"feature which may stop working in the future, and will lead to out"
"of range errors on tf.scatter if the graph net computation occurs on "
"CPU.", 1)
dummy_senders_and_receivers = (
tf.ones([padding_size.num_edges], tf.int32) * padding_size.num_nodes)
else:
dummy_senders_and_receivers = tf.zeros([padding_size.num_edges], tf.int32)
return graphs.GraphsTuple(
n_node=[padding_size.num_nodes],
n_edge=[padding_size.num_edges],
nodes=tree.map_structure(
functools.partial(
_get_zeros_with_variable_batch_size,
padding_size=padding_size.num_nodes), graphs_batch.nodes),
edges=tree.map_structure(
functools.partial(
_get_zeros_with_variable_batch_size,
padding_size=padding_size.num_edges), graphs_batch.edges),
senders=dummy_senders_and_receivers,
receivers=dummy_senders_and_receivers,
globals=tree.map_structure(
functools.partial(
_get_zeros_with_variable_batch_size, padding_size=1),
graphs_batch.globals))
def pad_graphs_tuple(graphs_tuple,
pad_nodes_to,
pad_edges_to,
pad_graphs_to,
experimental_unconnected_padding_edges=False):
"""Pads a `graphs.GraphsTuple` to fixed number of nodes, edges and graphs.
The Graph Nets library treat zeros as valid parts of a graph.GraphsTuple, so
special padding is required in order to preserve the computation. This
method does so by adding a 'dummy' graph to the batch so that additional
nodes/edges can't interfere with the valid graph.
Args:
graphs_tuple: `graphs.GraphsTuple` batch of graphs.
pad_nodes_to: the size to pad node determined features to.
pad_edges_to: the size to pad edge determined features to.
pad_graphs_to: the size to pad graph determined features to.
experimental_unconnected_padding_edges: Experimental feature to prevent nans
in the padding graph. DISCLAIMER: This feature is extremly experimental,
and setting it to `True` is not recommened unless strictly necessary, and
understanding the implications.
If `True`, the padding graph will have `senders` and `receivers` for
the padding edges reference a node which does not exist (one beyond the
size of `nodes`).
This feature can be used to prevent any broadcasting/aggregation ops
between edges and nodes for the padding graph. The reason is that the
sum aggregations in the padding graph, which has a single node with a
very large number of self-edges, sometimes lead to infs or nans,
which may contaminate the gradients of the other valid graphs in the batch
with nans (even if masked out of the loss: this is related to the
`tf.where` issue.).
This approach relies on several numerical hacks that do not work on CPU,
but work on GPU and TPU (as covered by our tests):
* `tf.gather` returns zeros when the index is beyond the boundaries. From
https://www.tensorflow.org/api_docs/python/tf/gather
"Note that on CPU, if an out of bound index is found, an error is
returned. On GPU, if an out of bound index is found, a 0 is stored
in the corresponding output value."
* `tf.unsorted_segment_sum` drops values for negative indices. From
https://www.tensorflow.org/api_docs/python/tf/math/unsorted_segment_sum
"If the given segment ID is negative, the value is dropped and
will not be added to the sum of the segment."
We have seen empirically that it also ignores values with indices equal
or larger than `num_segments`. While this behavior is tested in our
library, we cannot guarantee that it will work in the future for all
unsorted_segment ops, so use at your own risk.
This fixes the appearance of nans in the node-wise edge aggregation. The
appearance of `nan`s is less likely in the global aggregation because in
the worst case, the number of nodes/edges on the padding graph is not
typically much larger than the number of nodes/edges in other graphs in
the dataset.
A less hacky approach (but more expensive, and requiring modifying model
code) to prevent nan's appearing in the padding graph, is by masking out
the graph features before they are aggregated, although for convenience
we usually find that it is enough to do it after each message passing
layer. E.g.:
```
graphs_tuple_size = get_graphs_tuple_size(graphs_tuple)
padded_graphs_tuple = pad_graphs_tuple(graphs_tuple, ...)
graphs_mask = get_mask(graphs_tuple_size.num_graphs, pad_graphs_to)
nodes_mask = get_mask(graphs_tuple_size.num_nodes, pad_nodes_to)
edges_mask = get_mask(graphs_tuple_size.num_edges, pad_edges_to)
# Some computation that creates intermediate `any_padded_graphs_tuple`s
# after each message passing step.
any_padded_graphs_tuple = any_padded_graphs_tuple.replace(
edges=any_padded_graphs_tuple.edges * tf.cast(
edges_mask, tf.float32)[:, None],
nodes=any_padded_graphs_tuple.nodes * tf.cast(
nodes_mask, tf.float32)[:, None],
globals=any_padded_graphs_tuple.globals * tf.cast(
graphs_mask, tf.float32)[:, None],
)
```
Returns:
A `graphs.GraphsTuple` padded up to the required values.
"""
padded_sizes = GraphsTupleSize(pad_nodes_to, pad_edges_to, pad_graphs_to)
# The strategy goes as follows:
# 0. Make sure our `graphs_tuple` is at least 1 node and 1 graph smaller than
# the padded sizes (this is required for step 1).
# 1. Pad with one graph with at least one node, that contains all padding
# edges, and padding nodes, this will guaranteed preserved computation for
# graphs in the input `GraphsTuple`.
# 2. Pad up to `pad_graphs_to` with graphs with no nodes and no edges.
# 3. Set the shapes of the padded tensors to be statically known. Otherwise
# tensorflow shape inference mechanism is not smart enough to realize that
# at this stage tensors have statically known sizes.
# Step 0.
sufficient_space_assert = _assert_if_space_for_first_padding_graph(
graphs_tuple, padded_sizes)
with tf.control_dependencies([sufficient_space_assert]):
padding_size = _get_required_padding_sizes(graphs_tuple, padded_sizes)
# Step 1.
first_padding_graph = _get_first_padding_graph(
graphs_tuple, padding_size, experimental_unconnected_padding_edges)
graphs_tuple_with_first_padding_graph = concat(
[graphs_tuple, first_padding_graph], axis=0)
# Step 2.
remaining_padding_sizes = _get_required_padding_sizes(
graphs_tuple_with_first_padding_graph, padded_sizes)
padded_batch_kwargs = {}
for field, tensor_dict in (
graphs_tuple_with_first_padding_graph._asdict().items()): # pylint: disable=protected-access
field_pad_fn = functools.partial(
_pad_tensor, padding_size=remaining_padding_sizes, field=field)
padded_batch_kwargs[field] = tree.map_structure(field_pad_fn, tensor_dict)
# Step 3.
def _set_shape(tensor, padded_size):
tensor_shape = tensor.get_shape().as_list()
tensor.set_shape([padded_size] + tensor_shape[1:])
return tensor
for field, tensor_dict in padded_batch_kwargs.items():
padded_size = _get_field_size_from_size_tuple(padded_sizes, field)
set_shape_partial = functools.partial(_set_shape, padded_size=padded_size)
tensor_dict = tree.map_structure(set_shape_partial, tensor_dict)
padded_batch_kwargs[field] = tensor_dict
return graphs.GraphsTuple(**padded_batch_kwargs)
| graph_nets-master | graph_nets/utils_tf.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for `utils_np_test` and `utils_tf_test`.
This provides a base class for tests involving `graphs.GraphsTuple`
containing either numpy or tensorflow data. This base class is populated with
test data and also provides a convenience method for asserting graph equality.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import itertools
from graph_nets import graphs
from graph_nets import utils_np
import numpy as np
import tensorflow as tf
@contextlib.contextmanager
def assert_new_op_prefixes(test, expected_prefix, assert_some_new_ops=True):
"""Asserts the namescope of tf ops created within the context manager."""
ops_before = [n.name for n in tf.get_default_graph().as_graph_def().node]
yield
ops_after = [n.name for n in tf.get_default_graph().as_graph_def().node]
new_ops = set(ops_after) - set(ops_before)
prefix_length = len(expected_prefix)
if assert_some_new_ops:
test.assertNotEqual(0, len(new_ops))
for op_name in new_ops:
test.assertEqual(expected_prefix, op_name[:prefix_length])
def generate_random_data_dict(
node_shape, edge_shape, globals_shape,
num_nodes_range=(15, 20), num_edges_range=(30, 35)):
num_nodes = np.random.randint(*num_nodes_range)
num_edges = np.random.randint(*num_edges_range)
return {
"nodes": np.random.normal(size=(num_nodes,) + node_shape),
"edges": np.random.normal(size=(num_edges,) + edge_shape),
"globals": np.random.normal(size=globals_shape),
"senders": np.random.randint(num_nodes, size=num_edges),
"receivers": np.random.randint(num_nodes, size=num_edges),
"n_node": num_nodes,
"n_edge": num_edges,
}
def mask_leading_dimension(tensor):
return tf.placeholder_with_default(tensor,
[None] + tensor.get_shape().as_list()[1:])
NODES_DIMS = [7, 11]
EDGES_DIMS = [13, 14]
GLOBALS_DIMS = [5, 3]
class GraphsTest(tf.test.TestCase):
"""A base class for tests that operate on GraphsNP or GraphsTF."""
def _populate_test_data(self, max_size):
"""Populates the class fields with data used for the tests.
This creates a batch of graphs with number of nodes from 0 to `num`,
number of edges ranging from 1 to `num`, plus an empty graph with no nodes
and no edges (so that the total number of graphs is 1 + (num ** (num + 1)).
The nodes states, edges states and global states of the graphs are
created to have different types and shapes.
Those graphs are stored both as dictionaries (in `self.graphs_dicts_in`,
without `n_node` and `n_edge` information, and in `self.graphs_dicts_out`
with these two fields filled), and a corresponding numpy
`graphs.GraphsTuple` is stored in `self.reference_graph`.
Args:
max_size: The maximum number of nodes and edges (inclusive).
"""
filt = lambda x: (x[0] > 0) or (x[1] == 0)
n_node, n_edge = zip(*list(
filter(filt, itertools.product(
range(max_size + 1), range(max_size + 1)))))
graphs_dicts = []
nodes = []
edges = []
receivers = []
senders = []
globals_ = []
def _make_default_state(shape, dtype):
return np.arange(np.prod(shape)).reshape(shape).astype(dtype)
for i, (n_node_, n_edge_) in enumerate(zip(n_node, n_edge)):
n = _make_default_state([n_node_,] + NODES_DIMS, "f4") + i * 100.
e = _make_default_state(
[n_edge_,] + EDGES_DIMS, np.float64) + i * 100. + 1000.
r = _make_default_state([n_edge_], np.int32) % n_node[i]
s = (_make_default_state([n_edge_], np.int32) + 1) % n_node[i]
g = _make_default_state(GLOBALS_DIMS, "f4") - i * 100. - 1000.
nodes.append(n)
edges.append(e)
receivers.append(r)
senders.append(s)
globals_.append(g)
graphs_dict = dict(nodes=n, edges=e, receivers=r, senders=s, globals=g)
graphs_dicts.append(graphs_dict)
# Graphs dicts without n_node / n_edge (to be used as inputs).
self.graphs_dicts_in = graphs_dicts
# Graphs dicts with n_node / n_node (to be checked against outputs).
self.graphs_dicts_out = []
for dict_ in self.graphs_dicts_in:
completed_dict = dict_.copy()
completed_dict["n_node"] = completed_dict["nodes"].shape[0]
completed_dict["n_edge"] = completed_dict["edges"].shape[0]
self.graphs_dicts_out.append(completed_dict)
# pylint: disable=protected-access
offset = utils_np._compute_stacked_offsets(n_node, n_edge)
# pylint: enable=protected-access
self.reference_graph = graphs.GraphsTuple(**dict(
nodes=np.concatenate(nodes, axis=0),
edges=np.concatenate(edges, axis=0),
receivers=np.concatenate(receivers, axis=0) + offset,
senders=np.concatenate(senders, axis=0) + offset,
globals=np.stack(globals_),
n_node=np.array(n_node),
n_edge=np.array(n_edge)))
self.graphs_dicts = graphs_dicts
def _assert_graph_equals_np(self, graph0, graph, force_edges_ordering=False):
"""Asserts that all the graph fields of graph0 and graph match."""
if graph0.nodes is None:
self.assertEqual(None, graph.nodes)
else:
self.assertAllClose(graph0.nodes, graph.nodes)
if graph0.globals is None:
self.assertEqual(None, graph.globals)
else:
self.assertAllClose(graph0.globals, graph.globals)
self.assertAllClose(graph0.n_node, graph.n_node.tolist())
if graph0.receivers is None:
self.assertEqual(None, graph.receivers)
self.assertEqual(None, graph.senders)
self.assertEqual(None, graph.edges)
self.assertAllEqual(graph0.n_edge, graph.n_edge)
return
self.assertAllClose(graph0.n_edge, graph.n_edge.tolist())
if not force_edges_ordering:
self.assertAllClose(graph0.receivers, graph.receivers)
self.assertAllClose(graph0.senders, graph.senders)
if graph0.edges is not None:
self.assertAllClose(graph0.edges, graph.edges)
else:
self.assertEqual(None, graph.edges)
return
# To compare edges content, we need to make sure they appear in the same
# order
if graph0.edges is not None:
sorted_receivers0, sorted_senders0, sorted_content0 = zip(
*sorted(zip(graph0.receivers, graph0.senders, graph0.edges.tolist())))
sorted_receivers, sorted_senders, sorted_content = zip(
*sorted(zip(graph.receivers, graph.senders, graph.edges.tolist())))
self.assertAllClose(sorted_content0, sorted_content)
elif graph.receivers is not None:
sorted_receivers0, sorted_senders0 = zip(
*sorted(zip(graph0.receivers, graph0.senders)))
sorted_receivers, sorted_senders = zip(
*sorted(zip(graph.receivers, graph.senders)))
else:
return
self.assertAllClose(sorted_receivers0, sorted_receivers)
self.assertAllClose(sorted_senders0, sorted_senders)
def setUp(self):
self._populate_test_data(max_size=2)
tf.reset_default_graph()
| graph_nets-master | graph_nets/tests/test_utils.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for `graphs.py`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from graph_nets import graphs
import tensorflow as tf
class GraphsTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(GraphsTest, self).setUp()
all_fields = graphs.GRAPH_DATA_FIELDS + graphs.GRAPH_NUMBER_FIELDS
self.graph = {k: k for k in all_fields}
@parameterized.named_parameters(
("no n_node", ["n_node"],),
("no n_edge", ["n_edge"],),
("receivers but no senders", ["edges", "senders"],),
("senders but no receivers", ["edges", "receivers"],),
("edges but no senders/receivers", ["receivers", "senders"],),
)
def test_inconsistent_none_fields_raise_error_on_creation(self, none_fields):
for none_field in none_fields:
self.graph[none_field] = None
with self.assertRaisesRegexp(ValueError, none_fields[-1]):
graphs.GraphsTuple(**self.graph)
@parameterized.named_parameters(
("no n_node", ["n_node"],),
("no n_edge", ["n_edge"],),
("receivers but no senders", ["edges", "senders"],),
("senders but no receivers", ["edges", "receivers"],),
("edges but no senders/receivers", ["receivers", "senders"],),
)
def test_inconsistent_none_fields_raise_error_on_replace(self, none_fields):
graph = graphs.GraphsTuple(**self.graph)
with self.assertRaisesRegexp(ValueError, none_fields[-1]):
graph.replace(**{none_field: None for none_field in none_fields})
@parameterized.named_parameters(
("all fields defined", [],),
("no node state", ["nodes"],),
("no edge state", ["edges"],),
("no global state", ["globals"],),
("no state", ["nodes", "edges", "globals"],),
("no graph", ["nodes", "edges", "globals", "receivers", "senders"],),
("no edges", ["edges", "receivers", "senders"],),
)
def test_creation_with_valid_none_fields(self, none_fields):
for none_field in none_fields:
self.graph[none_field] = None
graph = graphs.GraphsTuple(**self.graph)
for k, v in self.graph.items():
self.assertEqual(v, getattr(graph, k))
@parameterized.named_parameters(
("all fields defined", [],),
("no node state", ["nodes"],),
("no edge state", ["edges"],),
("no global state", ["globals"],),
("no state", ["nodes", "edges", "globals"],),
("no graph", ["nodes", "edges", "globals", "receivers", "senders"],),
("no edges", ["edges", "receivers", "senders"],),
)
def test_replace_with_valid_none_fields(self, none_fields):
# Create a graph with different values.
graph = graphs.GraphsTuple(**{k: v + v for k, v in self.graph.items()})
# Update with a graph containing the initial values, or Nones.
for none_field in none_fields:
self.graph[none_field] = None
graph = graph.replace(**self.graph)
for k, v in self.graph.items():
self.assertEqual(v, getattr(graph, k))
@parameterized.parameters(
([],),
(["nodes"],),
(["edges"],),
(["globals"],),
(["receivers"],),
(["senders"],),
(["n_node"],),
(["n_edge"],),
(["receivers", "senders"],),
(["nodes", "edges", "globals"],),
(["nodes", "edges", "globals", "receivers", "senders",
"n_node", "n_edge"],),
)
def test_map_fields_as_expected(self, fields_to_map):
"""Tests that the fields are mapped are as expected."""
graph = graphs.GraphsTuple(**self.graph)
graph = graph.map(lambda v: v + v, fields_to_map)
for field in graphs.ALL_FIELDS:
if field in fields_to_map:
self.assertEqual(field + field, getattr(graph, field))
else:
self.assertEqual(field, getattr(graph, field))
def test_map_field_called_only_once(self):
"""Tests that the mapping function is called exactly once per field."""
graph = graphs.GraphsTuple(**self.graph)
mapped_fields = []
def map_fn(v):
mapped_fields.append(v)
return v
graph = graph.map(map_fn, graphs.ALL_FIELDS)
self.assertCountEqual(mapped_fields, graphs.ALL_FIELDS)
def test_map_field_default_value(self):
"""Tests the default value for the `fields` argument."""
graph = graphs.GraphsTuple(**self.graph)
mapped_fields = []
graph = graph.map(mapped_fields.append)
self.assertCountEqual(
mapped_fields, [graphs.EDGES, graphs.GLOBALS, graphs.NODES])
def test_map_field_is_parallel(self):
"""Tests that fields are mapped parallelaly, not sequentially."""
graph = graphs.GraphsTuple(**self.graph)
graph = graph.map(lambda v: None, ["edges", "receivers", "senders"])
if __name__ == "__main__":
tf.test.main()
| graph_nets-master | graph_nets/tests/graphs_test.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Graph networks library tests."""
| graph_nets-master | graph_nets/tests/__init__.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Test for utils_np."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
from graph_nets import utils_np
from graph_nets.tests import test_utils
import networkx as nx
import numpy as np
from six.moves import range
import tensorflow as tf
class ConcatenationTest(test_utils.GraphsTest, parameterized.TestCase):
def test_compute_stacked_offsets(self):
sizes = np.array([5, 4, 3, 1, 2, 0, 3, 0, 4, 7])
repeats = [2, 2, 0, 2, 1, 3, 2, 0, 3, 2]
offsets0 = utils_np._compute_stacked_offsets(sizes, repeats)
offsets1 = utils_np._compute_stacked_offsets(sizes, np.array(repeats))
expected_offsets = [
0, 0, 5, 5, 12, 12, 13, 15, 15, 15, 15, 15, 18, 18, 18, 22, 22
]
self.assertAllEqual(expected_offsets, offsets0.tolist())
self.assertAllEqual(expected_offsets, offsets1.tolist())
def test_concatenate_data_dicts(self):
cat = utils_np._concatenate_data_dicts(self.graphs_dicts_in)
for k, v in cat.items():
self.assertAllEqual(getattr(self.reference_graph, k), v)
class DataDictsConversionTest(test_utils.GraphsTest, parameterized.TestCase):
@parameterized.parameters(([],),
(["edges"],),
(["globals"],),
(["edges", "receivers", "senders"],))
def test_data_dicts_to_graphs_tuple(self, none_fields):
"""Fields in `none_fields` will be cleared out."""
for field in none_fields:
for graph_dict in self.graphs_dicts_in:
if field in graph_dict:
if field == "nodes":
graph_dict["n_node"] = graph_dict["nodes"].shape[0]
graph_dict[field] = None
self.reference_graph = self.reference_graph._replace(**{field: None})
if field == "senders":
self.reference_graph = self.reference_graph._replace(
n_edge=np.zeros_like(self.reference_graph.n_edge))
graphs = utils_np.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
for field in none_fields:
self.assertEqual(None, getattr(graphs, field))
self._assert_graph_equals_np(self.reference_graph, graphs)
@parameterized.parameters(("receivers",), ("senders",))
def test_data_dicts_to_graphs_tuple_missing_field_raises(self, none_field):
"""Fields that cannot be missing."""
for graph_dict in self.graphs_dicts_in:
graph_dict[none_field] = None
with self.assertRaisesRegexp(ValueError, none_field):
utils_np.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
def test_data_dicts_to_graphs_tuple_infer_n_node(self):
"""Not having nodes is fine if providing the number of nodes."""
for graph_dict in self.graphs_dicts_in:
graph_dict["n_node"] = graph_dict["nodes"].shape[0]
graph_dict["nodes"] = None
out = utils_np.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
self.assertAllEqual([0, 1, 1, 1, 2, 2, 2], out.n_node)
def test_data_dicts_to_graphs_tuple_cast_types(self):
"""Index and number fields should be cast to numpy arrays."""
for graph_dict in self.graphs_dicts_in:
graph_dict["n_node"] = np.array(
graph_dict["nodes"].shape[0], dtype=np.int64)
graph_dict["receivers"] = graph_dict["receivers"].astype(np.int16)
graph_dict["senders"] = graph_dict["senders"].astype(np.float64)
graph_dict["nodes"] = graph_dict["nodes"].astype(np.float64)
graph_dict["edges"] = graph_dict["edges"].astype(np.float64)
out = utils_np.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
for key in ["n_node", "n_edge", "receivers", "senders"]:
self.assertEqual(np.int32, getattr(out, key).dtype)
for key in ["nodes", "edges"]:
self.assertEqual(np.float64, getattr(out, key).dtype)
def test_data_dicts_to_graphs_tuple_from_lists(self):
"""Tests creatings a GraphsTuple from python lists."""
for graph_dict in self.graphs_dicts_in:
graph_dict["receivers"] = graph_dict["receivers"].tolist()
graph_dict["senders"] = graph_dict["senders"].tolist()
graphs = utils_np.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
self._assert_graph_equals_np(self.reference_graph, graphs)
@parameterized.named_parameters(
("all_fields", []),
("no_data", ["nodes", "edges", "globals"]),
("no_edges", ["edges", "receivers", "senders"]))
def test_graphs_tuple_to_data_dicts(self, none_fields):
graphs_tuple = utils_np.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs_tuple = graphs_tuple.map(lambda _: None, none_fields)
data_dicts = utils_np.graphs_tuple_to_data_dicts(graphs_tuple)
for none_field, data_dict in itertools.product(none_fields, data_dicts):
self.assertEqual(None, data_dict[none_field])
for expected_data_dict, data_dict in zip(self.graphs_dicts_out, data_dicts):
for k, v in expected_data_dict.items():
if k not in none_fields:
self.assertAllClose(v, data_dict[k])
def _single_data_dict_to_networkx(data_dict):
graph_nx = nx.OrderedMultiDiGraph()
if data_dict["nodes"].size > 0:
for i, x in enumerate(data_dict["nodes"]):
graph_nx.add_node(i, features=x)
if data_dict["edges"].size > 0:
edge_data = zip(data_dict["senders"], data_dict["receivers"], [{
"features": x
} for x in data_dict["edges"]])
graph_nx.add_edges_from(edge_data)
graph_nx.graph["features"] = data_dict["globals"]
return graph_nx
class NetworkxConversionTest(test_utils.GraphsTest, parameterized.TestCase):
def test_order_preserving(self):
"""Tests that edges order can be preserved when importing from networks."""
graph = nx.DiGraph()
for node_index in range(4):
graph.add_node(node_index, features=np.array([node_index]))
receivers = [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]
senders = [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2]
for edge_index, (receiver, sender) in enumerate(zip(receivers, senders)):
# Removing the "index" key makes this test fail 100%.
edge_data = {"features": np.array([edge_index]), "index": edge_index}
graph.add_edge(sender, receiver, **edge_data)
graph.graph["features"] = np.array([0.])
graphs_graph = utils_np.networkx_to_data_dict(graph)
self.assertAllEqual(receivers, graphs_graph["receivers"])
self.assertAllEqual(senders, graphs_graph["senders"])
self.assertAllEqual([[x] for x in range(4)], graphs_graph["nodes"])
self.assertAllEqual([[x] for x in range(12)], graphs_graph["edges"])
def test_networkxs_to_graphs_tuple_with_none_fields(self):
graph_nx = nx.OrderedMultiDiGraph()
data_dict = utils_np.networkx_to_data_dict(
graph_nx,
node_shape_hint=None,
edge_shape_hint=None)
self.assertEqual(None, data_dict["edges"])
self.assertEqual(None, data_dict["globals"])
self.assertEqual(None, data_dict["nodes"])
graph_nx.add_node(0, features=None)
data_dict = utils_np.networkx_to_data_dict(
graph_nx,
node_shape_hint=1,
edge_shape_hint=None)
self.assertEqual(None, data_dict["nodes"])
graph_nx.add_edge(0, 0, features=None)
data_dict = utils_np.networkx_to_data_dict(
graph_nx,
node_shape_hint=[1],
edge_shape_hint=[1])
self.assertEqual(None, data_dict["edges"])
graph_nx.graph["features"] = None
utils_np.networkx_to_data_dict(graph_nx)
self.assertEqual(None, data_dict["globals"])
def test_networkxs_to_graphs_tuple(self):
graph0 = utils_np.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graph_nxs = []
for data_dict in self.graphs_dicts_in:
graph_nxs.append(_single_data_dict_to_networkx(data_dict))
hints = {
"edge_shape_hint": data_dict["edges"].shape[1:],
"node_shape_hint": data_dict["nodes"].shape[1:],
"data_type_hint": data_dict["nodes"].dtype,
}
graph = utils_np.networkxs_to_graphs_tuple(graph_nxs, **hints)
self._assert_graph_equals_np(graph0, graph, force_edges_ordering=True)
def test_networkxs_to_data_dict_raises_node_key_error(self):
"""If the nodes have keys not consistent with the order they were added."""
graph_nx = nx.OrderedMultiDiGraph()
graph_nx.add_node(0, features=None)
graph_nx.add_node(1, features=None)
graph_nx.add_node(3, features=None)
with self.assertRaisesRegexp(
ValueError, "found node with index 2 and key 3"):
utils_np.networkx_to_data_dict(graph_nx)
# Check that it is still raised even if there is a node with each key,
# and only the order is wrong.
graph_nx.add_node(2, features=None)
with self.assertRaisesRegexp(
ValueError, "found node with index 2 and key 3"):
utils_np.networkx_to_data_dict(graph_nx)
def test_networkxs_to_graphs_tuple_raises_key_error(self):
"""If the "features" field is not present in the nodes or edges."""
graph_nx = _single_data_dict_to_networkx(self.graphs_dicts_in[-1])
first_node = list(graph_nx.nodes(data=True))[0]
del first_node[1]["features"]
with self.assertRaisesRegexp(
KeyError, "This could be due to the node having been silently added"):
utils_np.networkxs_to_graphs_tuple([graph_nx])
graph_nx = _single_data_dict_to_networkx(self.graphs_dicts_in[-1])
first_edge = list(graph_nx.edges(data=True))[0]
del first_edge[2]["features"]
with self.assertRaises(KeyError):
utils_np.networkxs_to_graphs_tuple([graph_nx])
def test_networkxs_to_graphs_tuple_raises_assertion_error(self):
"""Either all nodes (resp. edges) should have features, or none of them."""
graph_nx = _single_data_dict_to_networkx(self.graphs_dicts_in[-1])
first_node = list(graph_nx.nodes(data=True))[0]
first_node[1]["features"] = None
with self.assertRaisesRegexp(
ValueError, "Either all the nodes should have features"):
utils_np.networkxs_to_graphs_tuple([graph_nx])
graph_nx = _single_data_dict_to_networkx(self.graphs_dicts_in[-1])
first_edge = list(graph_nx.edges(data=True))[0]
first_edge[2]["features"] = None
with self.assertRaisesRegexp(
ValueError, "Either all the edges should have features"):
utils_np.networkxs_to_graphs_tuple([graph_nx])
@parameterized.named_parameters(
("all fields defined", []),
("stateless", ["nodes", "edges", "globals"]))
def test_graphs_tuple_to_networkxs(self, none_fields):
if "nodes" in none_fields:
for graph in self.graphs_dicts_in:
graph["n_node"] = graph["nodes"].shape[0]
graphs = utils_np.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs = graphs.map(lambda _: None, none_fields)
graph_nxs = utils_np.graphs_tuple_to_networkxs(graphs)
for data_dict, graph_nx in zip(self.graphs_dicts_out, graph_nxs):
if "globals" in none_fields:
self.assertEqual(None, graph_nx.graph["features"])
else:
self.assertAllClose(data_dict["globals"], graph_nx.graph["features"])
nodes_data = graph_nx.nodes(data=True)
for i, (v, (j, n)) in enumerate(zip(data_dict["nodes"], nodes_data)):
self.assertEqual(i, j)
if "nodes" in none_fields:
self.assertEqual(None, n["features"])
else:
self.assertAllClose(v, n["features"])
edges_data = sorted(
graph_nx.edges(data=True), key=lambda x: x[2]["index"])
for v, (_, _, e) in zip(data_dict["edges"], edges_data):
if "edges" in none_fields:
self.assertEqual(None, e["features"])
else:
self.assertAllClose(v, e["features"])
for r, s, (i, j, _) in zip(
data_dict["receivers"], data_dict["senders"], edges_data):
self.assertEqual(s, i)
self.assertEqual(r, j)
class GetItemTest(test_utils.GraphsTest, parameterized.TestCase):
def test_get_single_item(self):
index = 2
expected = self.graphs_dicts_out[index]
graphs = utils_np.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graph = utils_np.get_graph(graphs, index)
actual, = utils_np.graphs_tuple_to_data_dicts(graph)
for k, v in expected.items():
self.assertAllClose(v, actual[k])
def test_get_many_items(self):
index = slice(1, 3)
expected = self.graphs_dicts_out[index]
graphs = utils_np.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs2 = utils_np.get_graph(graphs, index)
actual = utils_np.graphs_tuple_to_data_dicts(graphs2)
for ex, ac in zip(expected, actual):
for k, v in ex.items():
self.assertAllClose(v, ac[k])
if __name__ == "__main__":
tf.test.main()
| graph_nets-master | graph_nets/tests/utils_np_test.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for blocks.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl.testing import parameterized
from graph_nets import blocks
from graph_nets import graphs
from graph_nets import utils_np
from graph_nets import utils_tf
import numpy as np
import sonnet as snt
import tensorflow as tf
SMALL_GRAPH_1 = {
"globals": [1.1, 1.2, 1.3, 1.4],
"nodes": [[10.1, 10.2], [20.1, 20.2], [30.1, 30.2]],
"edges": [[101., 102., 103., 104.], [201., 202., 203., 204.]],
"senders": [0, 1],
"receivers": [1, 2],
}
SMALL_GRAPH_2 = {
"globals": [-1.1, -1.2, -1.3, -1.4],
"nodes": [[-10.1, -10.2], [-20.1, -20.2], [-30.1, -30.2]],
"edges": [[-101., -102., -103., -104.]],
"senders": [1,],
"receivers": [2,],
}
SMALL_GRAPH_3 = {
"globals": [1.1, 1.2, 1.3, 1.4],
"nodes": [[10.1, 10.2], [20.1, 20.2], [30.1, 30.2]],
"edges": [[101., 102., 103., 104.], [201., 202., 203., 204.]],
"senders": [1, 1],
"receivers": [0, 2],
}
SMALL_GRAPH_4 = {
"globals": [1.1, 1.2, 1.3, 1.4],
"nodes": [[10.1, 10.2], [20.1, 20.2], [30.1, 30.2]],
"edges": [[101., 102., 103., 104.], [201., 202., 203., 204.]],
"senders": [0, 2],
"receivers": [1, 1],
}
class GraphModuleTest(tf.test.TestCase, parameterized.TestCase):
"""Base class for all the tests in this file."""
def setUp(self):
super(GraphModuleTest, self).setUp()
tf.set_random_seed(0)
def _get_input_graph(self, none_fields=None):
if none_fields is None:
none_fields = []
input_graph = utils_tf.data_dicts_to_graphs_tuple(
[SMALL_GRAPH_1, SMALL_GRAPH_2, SMALL_GRAPH_3, SMALL_GRAPH_4])
input_graph = input_graph.map(lambda _: None, none_fields)
return input_graph
def _get_shaped_input_graph(self):
return graphs.GraphsTuple(
nodes=tf.zeros([3, 4, 5, 11], dtype=tf.float32),
edges=tf.zeros([5, 4, 5, 12], dtype=tf.float32),
globals=tf.zeros([2, 4, 5, 13], dtype=tf.float32),
receivers=tf.range(5, dtype=tf.int32) // 3,
senders=tf.range(5, dtype=tf.int32) % 3,
n_node=tf.constant([2, 1], dtype=tf.int32),
n_edge=tf.constant([3, 2], dtype=tf.int32),
)
def _assert_build_and_run(self, network, input_graph):
# No error at construction time.
output = network(input_graph)
# No error at runtime.
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(output)
BROADCAST_GLOBAL_TO_EDGES = [
[1.1, 1.2, 1.3, 1.4],
[1.1, 1.2, 1.3, 1.4],
[-1.1, -1.2, -1.3, -1.4],
]
BROADCAST_GLOBAL_TO_NODES = [
[1.1, 1.2, 1.3, 1.4],
[1.1, 1.2, 1.3, 1.4],
[1.1, 1.2, 1.3, 1.4],
[-1.1, -1.2, -1.3, -1.4],
[-1.1, -1.2, -1.3, -1.4],
[-1.1, -1.2, -1.3, -1.4],
]
SENDER_NODES_TO_EDGES = [
[10.1, 10.2],
[20.1, 20.2],
[-20.1, -20.2],
]
RECEIVER_NODES_TO_EDGES = [
[20.1, 20.2],
[30.1, 30.2],
[-30.1, -30.2],
]
class BroadcastersTest(GraphModuleTest):
"""Tests for the broadcasters."""
@parameterized.named_parameters(
("globals_to_edges",
blocks.broadcast_globals_to_edges, BROADCAST_GLOBAL_TO_EDGES),
("globals_to_nodes",
blocks.broadcast_globals_to_nodes, BROADCAST_GLOBAL_TO_NODES),
("sender_nodes_to_edges",
blocks.broadcast_sender_nodes_to_edges, SENDER_NODES_TO_EDGES),
("receiver_nodes_to_edges",
blocks.broadcast_receiver_nodes_to_edges, RECEIVER_NODES_TO_EDGES),
)
def test_output_values(self, broadcaster, expected):
"""Test the broadcasted output value."""
input_graph = utils_tf.data_dicts_to_graphs_tuple(
[SMALL_GRAPH_1, SMALL_GRAPH_2])
broadcasted = broadcaster(input_graph)
with tf.Session() as sess:
broadcasted_out = sess.run(broadcasted)
self.assertNDArrayNear(
np.array(expected, dtype=np.float32), broadcasted_out, err=1e-4)
@parameterized.named_parameters(
("globals_to_edges",
blocks.broadcast_globals_to_edges, BROADCAST_GLOBAL_TO_EDGES),
("globals_to_nodes",
blocks.broadcast_globals_to_nodes, BROADCAST_GLOBAL_TO_NODES),
("sender_nodes_to_edges",
blocks.broadcast_sender_nodes_to_edges, SENDER_NODES_TO_EDGES),
("receiver_nodes_to_edges",
blocks.broadcast_receiver_nodes_to_edges, RECEIVER_NODES_TO_EDGES),
)
def test_output_values_larger_rank(self, broadcaster, expected):
"""Test the broadcasted output value."""
input_graph = utils_tf.data_dicts_to_graphs_tuple(
[SMALL_GRAPH_1, SMALL_GRAPH_2])
input_graph = input_graph.map(
lambda v: tf.reshape(v, [v.get_shape().as_list()[0]] + [2, -1]))
broadcasted = broadcaster(input_graph)
with tf.Session() as sess:
broadcasted_out = sess.run(broadcasted)
self.assertNDArrayNear(
np.reshape(np.array(expected, dtype=np.float32),
[len(expected)] + [2, -1]),
broadcasted_out,
err=1e-4)
@parameterized.named_parameters(
("globals_to_edges_no_globals",
blocks.broadcast_globals_to_edges, ("globals",)),
("globals_to_nodes_no_globals",
blocks.broadcast_globals_to_nodes, ("globals",)),
("sender_nodes_to_edges_none_nodes",
blocks.broadcast_sender_nodes_to_edges, ("nodes",)),
("sender_nodes_to_edges_none_senders",
blocks.broadcast_sender_nodes_to_edges,
("edges", "senders", "receivers")),
("receiver_nodes_to_edges_none_nodes",
blocks.broadcast_receiver_nodes_to_edges, ("nodes",)),
)
def test_missing_field_raises_exception(self, broadcaster, none_fields):
"""Test that an error is raised if a required field is `None`."""
input_graph = self._get_input_graph(none_fields)
with self.assertRaisesRegexp(
ValueError, "field cannot be None when broadcasting"):
broadcaster(input_graph)
class ReducersTest(GraphModuleTest):
"""Tests for the reducers."""
@parameterized.parameters(
(blocks.unsorted_segment_min_or_zero,
[[0., 0.],
[0.1, -0.1],
[0.2, -0.3],
[0.4, -0.6],
[0.7, -1.],
[0.9, -0.9],
[0., 0.]]),
(blocks.unsorted_segment_max_or_zero,
[[0., 0.],
[0.1, -0.1],
[0.3, -0.2],
[0.6, -0.4],
[1., -0.7],
[0.9, -0.9],
[0., 0.]]),
)
def test_output_values(self, reducer, expected_values):
input_values_np = np.array([[0.1, -0.1],
[0.2, -0.2],
[0.3, -0.3],
[0.4, -0.4],
[0.5, -0.5],
[0.6, -0.6],
[0.7, -0.7],
[0.8, -0.8],
[0.9, -0.9],
[1., -1.]], dtype=np.float32)
input_indices_np = np.array([1, 2, 2, 3, 3, 3, 4, 4, 5, 4], dtype=np.int32)
num_groups_np = np.array(7, dtype=np.int32)
input_indices = tf.constant(input_indices_np, dtype=tf.int32)
input_values = tf.constant(input_values_np, dtype=tf.float32)
num_groups = tf.constant(num_groups_np, dtype=tf.int32)
reduced = reducer(input_values, input_indices, num_groups)
with tf.Session() as sess:
reduced_out = sess.run(reduced)
self.assertNDArrayNear(
np.array(expected_values, dtype=np.float32), reduced_out, err=1e-4)
SEGMENT_SUM_EDGES_TO_GLOBALS = [
[302., 304., 306., 308.],
[-101., -102., -103., -104.],
[302., 304., 306., 308.],
[302., 304., 306., 308.],
]
SEGMENT_SUM_NODES_TO_GLOBALS = [
[60.3, 60.6],
[-60.3, -60.6],
[60.3, 60.6],
[60.3, 60.6],
]
SEGMENT_SUM_SENT_EDGES_TO_NODES = [
[101., 102., 103., 104.],
[201., 202., 203., 204.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[-101., -102., -103., -104.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[302., 304., 306., 308.],
[0., 0., 0., 0.,],
[101., 102., 103., 104.],
[0., 0., 0., 0.],
[201., 202., 203., 204.],
]
SEGMENT_SUM_RECEIVED_EDGES_TO_NODES = [
[0., 0., 0., 0.],
[101., 102., 103., 104.],
[201., 202., 203., 204.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[-101., -102., -103., -104.],
[101., 102., 103., 104.],
[0., 0., 0., 0.],
[201., 202., 203., 204.],
[0., 0., 0., 0.],
[302., 304., 306., 308,],
[0., 0., 0., 0.],
]
class FieldAggregatorsTest(GraphModuleTest):
@parameterized.named_parameters(
("edges_to_globals",
blocks.EdgesToGlobalsAggregator(tf.unsorted_segment_sum),
SEGMENT_SUM_EDGES_TO_GLOBALS,),
("nodes_to_globals",
blocks.NodesToGlobalsAggregator(tf.unsorted_segment_sum),
SEGMENT_SUM_NODES_TO_GLOBALS,),
("sent_edges_to_nodes",
blocks.SentEdgesToNodesAggregator(tf.unsorted_segment_sum),
SEGMENT_SUM_SENT_EDGES_TO_NODES,),
("received_edges_to_nodes",
blocks.ReceivedEdgesToNodesAggregator(tf.unsorted_segment_sum),
SEGMENT_SUM_RECEIVED_EDGES_TO_NODES),
)
def test_output_values(self, aggregator, expected):
input_graph = self._get_input_graph()
aggregated = aggregator(input_graph)
with tf.Session() as sess:
aggregated_out = sess.run(aggregated)
self.assertNDArrayNear(
np.array(expected, dtype=np.float32), aggregated_out, err=1e-4)
@parameterized.named_parameters(
("edges_to_globals",
blocks.EdgesToGlobalsAggregator(tf.unsorted_segment_sum),
SEGMENT_SUM_EDGES_TO_GLOBALS,),
("nodes_to_globals",
blocks.NodesToGlobalsAggregator(tf.unsorted_segment_sum),
SEGMENT_SUM_NODES_TO_GLOBALS,),
("sent_edges_to_nodes",
blocks.SentEdgesToNodesAggregator(tf.unsorted_segment_sum),
SEGMENT_SUM_SENT_EDGES_TO_NODES,),
("received_edges_to_nodes",
blocks.ReceivedEdgesToNodesAggregator(tf.unsorted_segment_sum),
SEGMENT_SUM_RECEIVED_EDGES_TO_NODES),
)
def test_output_values_larger_rank(self, aggregator, expected):
input_graph = self._get_input_graph()
input_graph = input_graph.map(
lambda v: tf.reshape(v, [v.get_shape().as_list()[0]] + [2, -1]))
aggregated = aggregator(input_graph)
with tf.Session() as sess:
aggregated_out = sess.run(aggregated)
self.assertNDArrayNear(
np.reshape(np.array(expected, dtype=np.float32),
[len(expected)] + [2, -1]),
aggregated_out,
err=1e-4)
@parameterized.named_parameters(
("received edges to nodes missing edges",
blocks.ReceivedEdgesToNodesAggregator, "edges"),
("sent edges to nodes missing edges",
blocks.SentEdgesToNodesAggregator, "edges"),
("nodes to globals missing nodes",
blocks.NodesToGlobalsAggregator, "nodes"),
("edges to globals missing nodes",
blocks.EdgesToGlobalsAggregator, "edges"),)
def test_missing_field_raises_exception(self, constructor, none_field):
"""Tests that aggregator fail if a required field is missing."""
input_graph = self._get_input_graph([none_field])
with self.assertRaisesRegexp(ValueError, none_field):
constructor(tf.unsorted_segment_sum)(input_graph)
@parameterized.named_parameters(
("received edges to nodes missing nodes and globals",
blocks.ReceivedEdgesToNodesAggregator, ["nodes", "globals"]),
("sent edges to nodes missing nodes and globals",
blocks.SentEdgesToNodesAggregator, ["nodes", "globals"]),
("nodes to globals missing edges and globals",
blocks.NodesToGlobalsAggregator,
["edges", "receivers", "senders", "globals"]),
("edges to globals missing globals",
blocks.EdgesToGlobalsAggregator, ["globals"]),
)
def test_unused_field_can_be_none(self, constructor, none_fields):
"""Tests that aggregator fail if a required field is missing."""
input_graph = self._get_input_graph(none_fields)
constructor(tf.unsorted_segment_sum)(input_graph)
class EdgeBlockTest(GraphModuleTest):
def setUp(self):
super(EdgeBlockTest, self).setUp()
self._scale = 10.
self._edge_model_fn = lambda: lambda features: features * self._scale
@parameterized.named_parameters(
("all inputs", True, True, True, True),
("edges nodes only", True, False, False, False),
("receiver nodes only", False, True, False, False),
("sender nodes only", False, False, True, False),
("globals only", False, False, False, True),
("edges and sender nodes", True, False, True, False),
("receiver nodes and globals", False, True, False, True),
)
def test_output_values(
self, use_edges, use_receiver_nodes, use_sender_nodes, use_globals):
"""Compares the output of an EdgeBlock to an explicit computation."""
input_graph = self._get_input_graph()
edge_block = blocks.EdgeBlock(
edge_model_fn=self._edge_model_fn,
use_edges=use_edges,
use_receiver_nodes=use_receiver_nodes,
use_sender_nodes=use_sender_nodes,
use_globals=use_globals)
output_graph = edge_block(input_graph)
model_inputs = []
if use_edges:
model_inputs.append(input_graph.edges)
if use_receiver_nodes:
model_inputs.append(blocks.broadcast_receiver_nodes_to_edges(input_graph))
if use_sender_nodes:
model_inputs.append(blocks.broadcast_sender_nodes_to_edges(input_graph))
if use_globals:
model_inputs.append(blocks.broadcast_globals_to_edges(input_graph))
model_inputs = tf.concat(model_inputs, axis=-1)
self.assertEqual(input_graph.nodes, output_graph.nodes)
self.assertEqual(input_graph.globals, output_graph.globals)
with tf.Session() as sess:
output_graph_out, model_inputs_out = sess.run(
(output_graph, model_inputs))
expected_output_edges = model_inputs_out * self._scale
self.assertNDArrayNear(
expected_output_edges, output_graph_out.edges, err=1e-4)
@parameterized.named_parameters(
("all inputs", True, True, True, True, 12),
("edges only", True, False, False, False, 4),
("receivers only", False, True, False, False, 2),
("senders only", False, False, True, False, 2),
("globals only", False, False, False, True, 4),
)
def test_created_variables(self,
use_edges, use_receiver_nodes, use_sender_nodes,
use_globals, expected_first_dim_w):
"""Verifies the variable names and shapes created by an EdgeBlock."""
output_size = 10
expected_var_shapes_dict = {
"edge_block/mlp/linear_0/b:0": [output_size],
"edge_block/mlp/linear_0/w:0": [expected_first_dim_w, output_size]}
input_graph = self._get_input_graph()
edge_block = blocks.EdgeBlock(
edge_model_fn=functools.partial(snt.nets.MLP,
output_sizes=[output_size]),
use_edges=use_edges,
use_receiver_nodes=use_receiver_nodes,
use_sender_nodes=use_sender_nodes,
use_globals=use_globals)
edge_block(input_graph)
variables = edge_block.get_variables()
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("missing node (receivers only)", False, True, False, False, ("nodes",)),
("missing node (senders only)", False, False, True, False, ("nodes",)),
("missing edge data", True, False, False, False, ("edges",)),
("missing edges (but no edge consumption)", False, True, True, False,
("edges", "senders", "receivers")),
("missing globals", False, False, False, True, ("globals",)),
)
def test_missing_field_raises_exception(
self, use_edges, use_receiver_nodes, use_sender_nodes, use_globals,
none_fields):
"""Checks that missing a required field raises an exception."""
input_graph = self._get_input_graph(none_fields)
edge_block = blocks.EdgeBlock(
edge_model_fn=self._edge_model_fn,
use_edges=use_edges,
use_receiver_nodes=use_receiver_nodes,
use_sender_nodes=use_sender_nodes,
use_globals=use_globals)
with self.assertRaisesRegexp(ValueError, "field cannot be None"):
edge_block(input_graph)
def test_compatible_higher_rank_no_raise(self):
"""No exception should occur with higher ranks tensors."""
input_graph = self._get_shaped_input_graph()
input_graph = input_graph.map(lambda v: tf.transpose(v, [0, 2, 1, 3]))
network = blocks.EdgeBlock(
functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]))
self._assert_build_and_run(network, input_graph)
@parameterized.named_parameters(
("mismatched edges and r. nodes", True, True, False, False, "nodes"),
("mismatched edges and s. nodes", True, False, True, False, "nodes"),
("mismatched edges and globals", True, False, False, True, "edges"),
("mismatched nodes and globals", False, True, True, True, "globals"),
)
def test_incompatible_higher_rank_inputs_raises(self,
use_edges,
use_receiver_nodes,
use_sender_nodes,
use_globals,
field):
"""A exception should be raised if the inputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
input_graph = input_graph.replace(
**{field: tf.transpose(getattr(input_graph, field), [0, 2, 1, 3])})
network = blocks.EdgeBlock(
functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]),
use_edges=use_edges,
use_receiver_nodes=use_receiver_nodes,
use_sender_nodes=use_sender_nodes,
use_globals=use_globals
)
with self.assertRaisesRegexp(ValueError, "in both shapes must be equal"):
network(input_graph)
@parameterized.named_parameters(
("mismatched nodes", True, False, False, True, "nodes"),
("mismatched edges", False, True, True, True, "edges"),
("mismatched globals", True, True, True, False, "globals"),
)
def test_incompatible_higher_rank_inputs_no_raise(self,
use_edges,
use_receiver_nodes,
use_sender_nodes,
use_globals,
field):
"""No exception should occur if a differently shapped field is not used."""
input_graph = self._get_shaped_input_graph()
input_graph = input_graph.replace(
**{field: tf.transpose(getattr(input_graph, field), [0, 2, 1, 3])})
network = blocks.EdgeBlock(
functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]),
use_edges=use_edges,
use_receiver_nodes=use_receiver_nodes,
use_sender_nodes=use_sender_nodes,
use_globals=use_globals
)
self._assert_build_and_run(network, input_graph)
@parameterized.named_parameters(
("no edges", False, True, True, "edges"),
("no nodes", True, False, True, "nodes"),
("no globals", True, True, False, "globals"),
)
def test_unused_field_can_be_none(
self, use_edges, use_nodes, use_globals, none_field):
"""Checks that computation can handle non-necessary fields left None."""
input_graph = self._get_input_graph([none_field])
edge_block = blocks.EdgeBlock(
edge_model_fn=self._edge_model_fn,
use_edges=use_edges,
use_receiver_nodes=use_nodes,
use_sender_nodes=use_nodes,
use_globals=use_globals)
output_graph = edge_block(input_graph)
model_inputs = []
if use_edges:
model_inputs.append(input_graph.edges)
if use_nodes:
model_inputs.append(blocks.broadcast_receiver_nodes_to_edges(input_graph))
model_inputs.append(blocks.broadcast_sender_nodes_to_edges(input_graph))
if use_globals:
model_inputs.append(blocks.broadcast_globals_to_edges(input_graph))
model_inputs = tf.concat(model_inputs, axis=-1)
self.assertEqual(input_graph.nodes, output_graph.nodes)
self.assertEqual(input_graph.globals, output_graph.globals)
with tf.Session() as sess:
actual_edges, model_inputs_out = sess.run(
(output_graph.edges, model_inputs))
expected_output_edges = model_inputs_out * self._scale
self.assertNDArrayNear(expected_output_edges, actual_edges, err=1e-4)
def test_no_input_raises_exception(self):
"""Checks that receiving no input raises an exception."""
with self.assertRaisesRegexp(ValueError, "At least one of "):
blocks.EdgeBlock(
edge_model_fn=self._edge_model_fn,
use_edges=False,
use_receiver_nodes=False,
use_sender_nodes=False,
use_globals=False)
class NodeBlockTest(GraphModuleTest):
def setUp(self):
super(NodeBlockTest, self).setUp()
self._scale = 10.
self._node_model_fn = lambda: lambda features: features * self._scale
@parameterized.named_parameters(
("all inputs, custom reductions", True, True, True, True,
tf.unsorted_segment_sum, tf.unsorted_segment_mean),
("received edges only, blocks reducer",
True, False, False, False, blocks.unsorted_segment_max_or_zero, None),
("sent edges only, custom reduction",
False, True, False, False, None, tf.unsorted_segment_prod),
("nodes only",
False, False, True, False, None, None),
("globals only",
False, False, False, True, None, None),
("received edges and nodes, custom reductions",
True, False, True, False,
blocks.unsorted_segment_min_or_zero, tf.unsorted_segment_prod),
("sent edges and globals, custom reduction",
False, True, False, True, None, blocks.unsorted_segment_min_or_zero),
)
def test_output_values(
self, use_received_edges, use_sent_edges, use_nodes,
use_globals, received_edges_reducer, sent_edges_reducer):
"""Compares the output of a NodeBlock to an explicit computation."""
input_graph = self._get_input_graph()
node_block = blocks.NodeBlock(
node_model_fn=self._node_model_fn,
use_received_edges=use_received_edges,
use_sent_edges=use_sent_edges,
use_nodes=use_nodes,
use_globals=use_globals,
received_edges_reducer=received_edges_reducer,
sent_edges_reducer=sent_edges_reducer)
output_graph = node_block(input_graph)
model_inputs = []
if use_received_edges:
model_inputs.append(
blocks.ReceivedEdgesToNodesAggregator(
received_edges_reducer)(input_graph))
if use_sent_edges:
model_inputs.append(
blocks.SentEdgesToNodesAggregator(sent_edges_reducer)(input_graph))
if use_nodes:
model_inputs.append(input_graph.nodes)
if use_globals:
model_inputs.append(blocks.broadcast_globals_to_nodes(input_graph))
model_inputs = tf.concat(model_inputs, axis=-1)
self.assertEqual(input_graph.edges, output_graph.edges)
self.assertEqual(input_graph.globals, output_graph.globals)
with tf.Session() as sess:
output_graph_out, model_inputs_out = sess.run(
(output_graph, model_inputs))
expected_output_nodes = model_inputs_out * self._scale
self.assertNDArrayNear(
expected_output_nodes, output_graph_out.nodes, err=1e-4)
@parameterized.named_parameters(
("all inputs", True, True, True, True, 14),
("received edges only", True, False, False, False, 4),
("sent edges only", False, True, False, False, 4),
("nodes only", False, False, True, False, 2),
("globals only", False, False, False, True, 4),
)
def test_created_variables(self,
use_received_edges, use_sent_edges, use_nodes,
use_globals, expected_first_dim_w):
"""Verifies the variable names and shapes created by a NodeBlock."""
output_size = 10
expected_var_shapes_dict = {
"node_block/mlp/linear_0/b:0": [output_size],
"node_block/mlp/linear_0/w:0": [expected_first_dim_w, output_size]}
input_graph = self._get_input_graph()
node_block = blocks.NodeBlock(
node_model_fn=functools.partial(snt.nets.MLP,
output_sizes=[output_size]),
use_received_edges=use_received_edges,
use_sent_edges=use_sent_edges,
use_nodes=use_nodes,
use_globals=use_globals)
node_block(input_graph)
variables = node_block.get_variables()
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("missing nodes", False, False, True, False, ("nodes",)),
("missing edge data (receivers only)",
True, False, False, False, ("edges",)),
("missing edge data (senders only)",
False, True, False, False, ("edges",)),
("missing globals", False, False, False, True, ("globals",)),
)
def test_missing_field_raises_exception(
self, use_received_edges, use_sent_edges, use_nodes, use_globals,
none_fields):
"""Checks that missing a required field raises an exception."""
input_graph = self._get_input_graph(none_fields)
node_block = blocks.NodeBlock(
node_model_fn=self._node_model_fn,
use_received_edges=use_received_edges,
use_sent_edges=use_sent_edges,
use_nodes=use_nodes,
use_globals=use_globals)
with self.assertRaisesRegexp(ValueError, "field cannot be None"):
node_block(input_graph)
@parameterized.named_parameters(
("no received edges reducer", True, False, None, tf.unsorted_segment_sum),
("no sent edges reducer", False, True, tf.unsorted_segment_sum, None),
)
def test_missing_aggregation_raises_exception(
self, use_received_edges, use_sent_edges,
received_edges_reducer, sent_edges_reducer):
"""Checks that missing a required aggregation argument raises an error."""
with self.assertRaisesRegexp(ValueError, "should not be None"):
blocks.NodeBlock(
node_model_fn=self._node_model_fn,
use_received_edges=use_received_edges,
use_sent_edges=use_sent_edges,
use_nodes=False,
use_globals=False,
received_edges_reducer=received_edges_reducer,
sent_edges_reducer=sent_edges_reducer)
def test_compatible_higher_rank_no_raise(self):
"""No exception should occur with higher ranks tensors."""
input_graph = self._get_shaped_input_graph()
input_graph = input_graph.map(lambda v: tf.transpose(v, [0, 2, 1, 3]))
network = blocks.NodeBlock(
functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]))
self._assert_build_and_run(network, input_graph)
@parameterized.named_parameters(
("mismatched nodes and r. edges", True, False, True, False, "edges"),
("mismatched nodes and s. edges", True, False, True, False, "edges"),
("mismatched edges and globals", True, False, False, True, "globals"),
("mismatched nodes and globals", False, True, True, True, "globals"),
)
def test_incompatible_higher_rank_inputs_raises(self,
use_received_edges,
use_sent_edges,
use_nodes,
use_globals,
field):
"""A exception should be raised if the inputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
input_graph = input_graph.replace(
**{field: tf.transpose(getattr(input_graph, field), [0, 2, 1, 3])})
network = blocks.NodeBlock(
functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]),
use_received_edges=use_received_edges,
use_sent_edges=use_sent_edges,
use_nodes=use_nodes,
use_globals=use_globals
)
with self.assertRaisesRegexp(ValueError, "in both shapes must be equal"):
network(input_graph)
@parameterized.named_parameters(
("mismatched nodes", True, True, False, True, "nodes"),
("mismatched edges", False, False, True, True, "edges"),
("mismatched globals", True, True, True, False, "globals"),
)
def test_incompatible_higher_rank_inputs_no_raise(self,
use_received_edges,
use_sent_edges,
use_nodes,
use_globals,
field):
"""No exception should occur if a differently shapped field is not used."""
input_graph = self._get_shaped_input_graph()
input_graph = input_graph.replace(
**{field: tf.transpose(getattr(input_graph, field), [0, 2, 1, 3])})
network = blocks.NodeBlock(
functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]),
use_received_edges=use_received_edges,
use_sent_edges=use_sent_edges,
use_nodes=use_nodes,
use_globals=use_globals
)
self._assert_build_and_run(network, input_graph)
@parameterized.named_parameters(
("no edges", False, True, True, "edges"),
("no nodes", True, False, True, "nodes"),
("no globals", True, True, False, "globals"),
)
def test_unused_field_can_be_none(
self, use_edges, use_nodes, use_globals, none_field):
"""Checks that computation can handle non-necessary fields left None."""
input_graph = self._get_input_graph([none_field])
node_block = blocks.NodeBlock(
node_model_fn=self._node_model_fn,
use_received_edges=use_edges,
use_sent_edges=use_edges,
use_nodes=use_nodes,
use_globals=use_globals)
output_graph = node_block(input_graph)
model_inputs = []
if use_edges:
model_inputs.append(
blocks.ReceivedEdgesToNodesAggregator(
tf.unsorted_segment_sum)(input_graph))
model_inputs.append(
blocks.SentEdgesToNodesAggregator(
tf.unsorted_segment_sum)(input_graph))
if use_nodes:
model_inputs.append(input_graph.nodes)
if use_globals:
model_inputs.append(blocks.broadcast_globals_to_nodes(input_graph))
model_inputs = tf.concat(model_inputs, axis=-1)
self.assertEqual(input_graph.edges, output_graph.edges)
self.assertEqual(input_graph.globals, output_graph.globals)
with tf.Session() as sess:
actual_nodes, model_inputs_out = sess.run(
(output_graph.nodes, model_inputs))
expected_output_nodes = model_inputs_out * self._scale
self.assertNDArrayNear(expected_output_nodes, actual_nodes, err=1e-4)
def test_no_input_raises_exception(self):
"""Checks that receiving no input raises an exception."""
with self.assertRaisesRegexp(ValueError, "At least one of "):
blocks.NodeBlock(
node_model_fn=self._node_model_fn,
use_received_edges=False,
use_sent_edges=False,
use_nodes=False,
use_globals=False)
class GlobalBlockTest(GraphModuleTest):
"""Tests for the GlobalBlock."""
def setUp(self):
super(GlobalBlockTest, self).setUp()
self._scale = 10.
self._global_model_fn = lambda: lambda features: features * self._scale
@parameterized.named_parameters(
("all_inputs, custom reductions",
True, True, True, tf.unsorted_segment_sum, tf.unsorted_segment_mean),
("edges only, blocks reducer",
True, False, False, blocks.unsorted_segment_max_or_zero, None),
("nodes only, custom reduction",
False, True, False, None, tf.unsorted_segment_prod),
("globals only",
False, False, True, None, None),
("edges and nodes, blocks reducer",
True, True, False, blocks.unsorted_segment_min_or_zero,
tf.unsorted_segment_prod),
("nodes and globals, blocks reducer",
False, True, True, None, blocks.unsorted_segment_min_or_zero),
)
def test_output_values(
self, use_edges, use_nodes, use_globals, edges_reducer, nodes_reducer):
"""Compares the output of a GlobalBlock to an explicit computation."""
input_graph = self._get_input_graph()
global_block = blocks.GlobalBlock(
global_model_fn=self._global_model_fn,
use_edges=use_edges,
use_nodes=use_nodes,
use_globals=use_globals,
edges_reducer=edges_reducer,
nodes_reducer=nodes_reducer)
output_graph = global_block(input_graph)
model_inputs = []
if use_edges:
model_inputs.append(
blocks.EdgesToGlobalsAggregator(edges_reducer)(input_graph))
if use_nodes:
model_inputs.append(
blocks.NodesToGlobalsAggregator(nodes_reducer)(input_graph))
if use_globals:
model_inputs.append(input_graph.globals)
model_inputs = tf.concat(model_inputs, axis=-1)
self.assertEqual(input_graph.edges, output_graph.edges)
self.assertEqual(input_graph.nodes, output_graph.nodes)
with tf.Session() as sess:
output_graph_out, model_inputs_out = sess.run(
(output_graph, model_inputs))
expected_output_globals = model_inputs_out * self._scale
self.assertNDArrayNear(
expected_output_globals, output_graph_out.globals, err=1e-4)
@parameterized.named_parameters(
("default", True, True, True, 10),
("use edges only", True, False, False, 4),
("use nodes only", False, True, False, 2),
("use globals only", False, False, True, 4),
)
def test_created_variables(self, use_edges, use_nodes,
use_globals, expected_first_dim_w):
"""Verifies the variable names and shapes created by a GlobalBlock."""
output_size = 10
expected_var_shapes_dict = {
"global_block/mlp/linear_0/b:0": [output_size],
"global_block/mlp/linear_0/w:0": [expected_first_dim_w, output_size]}
input_graph = self._get_input_graph()
global_block = blocks.GlobalBlock(
global_model_fn=functools.partial(snt.nets.MLP,
output_sizes=[output_size]),
use_edges=use_edges,
use_nodes=use_nodes,
use_globals=use_globals)
global_block(input_graph)
variables = global_block.get_variables()
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("missing edges", True, False, False, "edges"),
("missing nodes", False, True, False, "nodes"),
("missing globals", False, False, True, "globals"),
)
def test_missing_field_raises_exception(
self, use_edges, use_nodes, use_globals, none_field):
"""Checks that missing a required field raises an exception."""
input_graph = self._get_input_graph([none_field])
global_block = blocks.GlobalBlock(
global_model_fn=self._global_model_fn,
use_edges=use_edges,
use_nodes=use_nodes,
use_globals=use_globals)
with self.assertRaisesRegexp(ValueError, "field cannot be None"):
global_block(input_graph)
@parameterized.named_parameters(
("no edges", False, True, True, "edges"),
("no nodes", True, False, True, "nodes"),
("no globals", True, True, False, "globals"),
)
def test_unused_field_can_be_none(
self, use_edges, use_nodes, use_globals, none_field):
"""Checks that computation can handle non-necessary fields left None."""
input_graph = self._get_input_graph([none_field])
global_block = blocks.GlobalBlock(
global_model_fn=self._global_model_fn,
use_edges=use_edges,
use_nodes=use_nodes,
use_globals=use_globals)
output_graph = global_block(input_graph)
model_inputs = []
if use_edges:
model_inputs.append(
blocks.EdgesToGlobalsAggregator(tf.unsorted_segment_sum)(input_graph))
if use_nodes:
model_inputs.append(
blocks.NodesToGlobalsAggregator(tf.unsorted_segment_sum)(input_graph))
if use_globals:
model_inputs.append(input_graph.globals)
model_inputs = tf.concat(model_inputs, axis=-1)
self.assertEqual(input_graph.edges, output_graph.edges)
self.assertEqual(input_graph.nodes, output_graph.nodes)
with tf.Session() as sess:
actual_globals, model_inputs_out = sess.run(
(output_graph.globals, model_inputs))
expected_output_globals = model_inputs_out * self._scale
self.assertNDArrayNear(expected_output_globals, actual_globals, err=1e-4)
def test_compatible_higher_rank_no_raise(self):
"""No exception should occur with higher ranks tensors."""
input_graph = self._get_shaped_input_graph()
input_graph = input_graph.map(lambda v: tf.transpose(v, [0, 2, 1, 3]))
network = blocks.GlobalBlock(
functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]))
self._assert_build_and_run(network, input_graph)
@parameterized.named_parameters(
("mismatched nodes and edges", True, True, False, "edges"),
("mismatched edges and globals", True, False, True, "globals"),
("mismatched nodes and globals", False, True, True, "globals"),
)
def test_incompatible_higher_rank_inputs_raises(self,
use_edges,
use_nodes,
use_globals,
field):
"""A exception should be raised if the inputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
input_graph = input_graph.replace(
**{field: tf.transpose(getattr(input_graph, field), [0, 2, 1, 3])})
network = blocks.GlobalBlock(
functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]),
use_edges=use_edges,
use_nodes=use_nodes,
use_globals=use_globals
)
with self.assertRaisesRegexp(ValueError, "in both shapes must be equal"):
network(input_graph)
@parameterized.named_parameters(
("mismatched nodes", True, False, True, "nodes"),
("mismatched edges", False, True, True, "edges"),
("mismatched globals", True, True, False, "globals"),
)
def test_incompatible_higher_rank_inputs_no_raise(self,
use_edges,
use_nodes,
use_globals,
field):
"""No exception should occur if a differently shapped field is not used."""
input_graph = self._get_shaped_input_graph()
input_graph = input_graph.replace(
**{field: tf.transpose(getattr(input_graph, field), [0, 2, 1, 3])})
network = blocks.GlobalBlock(
functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]),
use_edges=use_edges,
use_nodes=use_nodes,
use_globals=use_globals
)
self._assert_build_and_run(network, input_graph)
def test_no_input_raises_exception(self):
"""Checks that receiving no input raises an exception."""
with self.assertRaisesRegexp(ValueError, "At least one of "):
blocks.GlobalBlock(
global_model_fn=self._global_model_fn,
use_edges=False,
use_nodes=False,
use_globals=False)
@parameterized.named_parameters(
("missing edges reducer", True, False, None, tf.unsorted_segment_sum),
("missing nodes reducer", False, True, tf.unsorted_segment_sum, None),
)
def test_missing_aggregation_raises_exception(
self, use_edges, use_nodes, edges_reducer,
nodes_reducer):
"""Checks that missing a required aggregation argument raises an error."""
with self.assertRaisesRegexp(ValueError, "should not be None"):
blocks.GlobalBlock(
global_model_fn=self._global_model_fn,
use_edges=use_edges,
use_nodes=use_nodes,
use_globals=False,
edges_reducer=edges_reducer,
nodes_reducer=nodes_reducer)
def _mask_leading_dimension(tensor):
return tf.placeholder_with_default(tensor,
[None] + tensor.get_shape().as_list()[1:])
class CommonBlockTests(GraphModuleTest):
"""Tests that are common to the EdgeBlock, NodeBlock and GlobalBlock."""
@parameterized.named_parameters(
("edge block", blocks.EdgeBlock),
("node block", blocks.NodeBlock),
("global block", blocks.GlobalBlock),
)
def test_dynamic_batch_sizes(self, block_constructor):
"""Checks that all batch sizes are as expected through a GraphNetwork."""
input_graph = self._get_input_graph()
placeholders = input_graph.map(_mask_leading_dimension, graphs.ALL_FIELDS)
model = block_constructor(
functools.partial(snt.nets.MLP, output_sizes=[10]))
output = model(placeholders)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
other_input_graph = utils_np.data_dicts_to_graphs_tuple(
[SMALL_GRAPH_1, SMALL_GRAPH_2])
actual = sess.run(output, {placeholders: other_input_graph})
for k, v in other_input_graph._asdict().items():
self.assertEqual(v.shape[0], getattr(actual, k).shape[0])
@parameterized.named_parameters(
("float64 data, edge block", tf.float64, tf.int32, blocks.EdgeBlock),
("int64 indices, edge block", tf.float32, tf.int64, blocks.EdgeBlock),
("float64 data, node block", tf.float64, tf.int32, blocks.NodeBlock),
("int64 indices, node block", tf.float32, tf.int64, blocks.NodeBlock),
("float64 data, global block", tf.float64, tf.int32, blocks.GlobalBlock),
("int64 indices, global block", tf.float32, tf.int64, blocks.GlobalBlock),
)
def test_dtypes(self, data_dtype, indices_dtype, block_constructor):
"""Checks that all the output types are as expected for blocks."""
input_graph = self._get_input_graph()
input_graph = input_graph.map(lambda v: tf.cast(v, data_dtype),
["nodes", "edges", "globals"])
input_graph = input_graph.map(lambda v: tf.cast(v, indices_dtype),
["receivers", "senders"])
model = block_constructor(
functools.partial(snt.nets.MLP, output_sizes=[10]))
output = model(input_graph)
for field in ["nodes", "globals", "edges"]:
self.assertEqual(data_dtype, getattr(output, field).dtype)
for field in ["receivers", "senders"]:
self.assertEqual(indices_dtype, getattr(output, field).dtype)
if __name__ == "__main__":
tf.test.main()
| graph_nets-master | graph_nets/tests/blocks_test.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for modules.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl.testing import parameterized
from graph_nets import blocks
from graph_nets import graphs
from graph_nets import modules
from graph_nets import utils_np
from graph_nets import utils_tf
import numpy as np
import sonnet as snt
import tensorflow as tf
SMALL_GRAPH_1 = {
"globals": [1.1, 1.2, 1.3],
"nodes": [[10.1, 10.2], [20.1, 20.2], [30.1, 30.2]],
"edges": [[101., 102., 103., 104.], [201., 202., 203., 204.]],
"senders": [0, 1],
"receivers": [1, 2],
}
SMALL_GRAPH_2 = {
"globals": [-1.1, -1.2, -1.3],
"nodes": [[-10.1, -10.2], [-20.1, -20.2], [-30.1, -30.2]],
"edges": [[-101., -102., -103., -104.]],
"senders": [1,],
"receivers": [2,],
}
SMALL_GRAPH_3 = {
"globals": [1.1, 1.2, 1.3],
"nodes": [[10.1, 10.2], [20.1, 20.2], [30.1, 30.2]],
"edges": [[101., 102., 103., 104.], [201., 202., 203., 204.]],
"senders": [1, 1],
"receivers": [0, 2],
}
SMALL_GRAPH_4 = {
"globals": [1.1, 1.2, 1.3],
"nodes": [[10.1, 10.2], [20.1, 20.2], [30.1, 30.2]],
"edges": [[101., 102., 103., 104.], [201., 202., 203., 204.]],
"senders": [0, 2],
"receivers": [1, 1],
}
def _mask_leading_dimension(tensor):
return tf.placeholder_with_default(tensor,
[None] + tensor.get_shape().as_list()[1:])
class GraphModuleTest(tf.test.TestCase, parameterized.TestCase):
"""Base class for all the tests in this file."""
def setUp(self):
super(GraphModuleTest, self).setUp()
tf.set_random_seed(0)
def _assert_all_none_or_all_close(self, expected, actual, *args, **kwargs):
if expected is None:
return self.assertAllEqual(expected, actual)
return self.assertAllClose(expected, actual, *args, **kwargs)
def _get_input_graph(self, none_field=None):
input_graph = utils_tf.data_dicts_to_graphs_tuple(
[SMALL_GRAPH_1, SMALL_GRAPH_2, SMALL_GRAPH_3, SMALL_GRAPH_4])
if none_field:
input_graph = input_graph.replace(**{none_field: None})
return input_graph
def _get_shaped_input_graph(self):
return graphs.GraphsTuple(
nodes=tf.zeros([3, 4, 5, 11], dtype=tf.float32),
edges=tf.zeros([5, 4, 5, 12], dtype=tf.float32),
globals=tf.zeros([2, 4, 5, 13], dtype=tf.float32),
receivers=tf.range(5, dtype=tf.int32) // 3,
senders=tf.range(5, dtype=tf.int32) % 3,
n_node=tf.constant([2, 1], dtype=tf.int32),
n_edge=tf.constant([3, 2], dtype=tf.int32),
)
def _get_shaped_model_fns(self):
edge_model_fn = functools.partial(
snt.Conv2D, output_channels=10, kernel_shape=[3, 3])
node_model_fn = functools.partial(
snt.Conv2D, output_channels=8, kernel_shape=[3, 3])
global_model_fn = functools.partial(
snt.Conv2D, output_channels=7, kernel_shape=[3, 3])
return edge_model_fn, node_model_fn, global_model_fn
def _assert_build_and_run(self, network, input_graph):
# No error at construction time.
output = network(input_graph)
# No error at runtime.
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(output)
class GraphIndependentTest(GraphModuleTest):
def _get_model(self, name=None):
kwargs = {
"edge_model_fn": functools.partial(snt.nets.MLP, output_sizes=[5]),
"node_model_fn": functools.partial(snt.nets.MLP, output_sizes=[10]),
"global_model_fn": functools.partial(snt.nets.MLP, output_sizes=[15]),
}
if name:
kwargs["name"] = name
return modules.GraphIndependent(**kwargs)
def test_same_as_subblocks(self):
"""Compares the output to explicit subblocks output."""
input_graph = self._get_input_graph()
model = self._get_model()
output_graph = model(input_graph)
expected_output_edges = model._edge_model(input_graph.edges)
expected_output_nodes = model._node_model(input_graph.nodes)
expected_output_globals = model._global_model(input_graph.globals)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
(output_graph_out,
expected_edges_out, expected_nodes_out, expected_globals_out) = sess.run(
(output_graph,
expected_output_edges,
expected_output_nodes,
expected_output_globals))
self._assert_all_none_or_all_close(expected_edges_out,
output_graph_out.edges)
self._assert_all_none_or_all_close(expected_nodes_out,
output_graph_out.nodes)
self._assert_all_none_or_all_close(expected_globals_out,
output_graph_out.globals)
@parameterized.named_parameters(
("default name", None), ("custom name", "custom_name"))
def test_created_variables(self, name=None):
"""Verifies variable names and shapes created by a GraphIndependent."""
name = name if name is not None else "graph_independent"
expected_var_shapes_dict = {
name + "/edge_model/mlp/linear_0/b:0": [5],
name + "/edge_model/mlp/linear_0/w:0": [4, 5],
name + "/node_model/mlp/linear_0/b:0": [10],
name + "/node_model/mlp/linear_0/w:0": [2, 10],
name + "/global_model/mlp/linear_0/b:0": [15],
name + "/global_model/mlp/linear_0/w:0": [3, 15],
}
input_graph = self._get_input_graph()
model = self._get_model(name=name)
model(input_graph)
variables = model.get_variables()
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
def test_gradient_flow(self):
"""Verifies that gradient flow is as expected."""
input_graph = self._get_input_graph()
model = self._get_model()
output_graph = model(input_graph)
for input_field in ["nodes", "edges", "globals"]:
input_tensor = getattr(input_graph, input_field)
for output_field in ["nodes", "edges", "globals"]:
output_tensor = getattr(output_graph, output_field)
gradients = tf.gradients(output_tensor, input_tensor)
if input_field == output_field:
self.assertNotEqual(None, gradients[0])
else:
self.assertListEqual([None], gradients)
@parameterized.named_parameters(
("differently shaped edges", "edges"),
("differently shaped nodes", "nodes"),
("differently shaped globals", "globals"),)
def test_incompatible_higher_rank_inputs_no_raise(self, field_to_reshape):
"""A GraphIndependent does not make assumptions on its inputs shapes."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, node_model_fn, global_model_fn = self._get_shaped_model_fns()
input_graph = input_graph.map(
lambda v: tf.transpose(v, [0, 2, 1, 3]), [field_to_reshape])
network = modules.GraphIndependent(
edge_model_fn, node_model_fn, global_model_fn)
self._assert_build_and_run(network, input_graph)
class GraphNetworkTest(GraphModuleTest):
def _get_model(self):
edge_model_fn = functools.partial(snt.Linear, output_size=5)
node_model_fn = functools.partial(snt.Linear, output_size=10)
global_model_fn = functools.partial(snt.Linear, output_size=15)
return modules.GraphNetwork(
edge_model_fn=edge_model_fn,
node_model_fn=node_model_fn,
global_model_fn=global_model_fn)
@parameterized.named_parameters(
("default name", None), ("custom name", "custom_name"))
def test_created_variables(self, name=None):
"""Verifies variable names and shapes created by a GraphNetwork."""
name = name if name is not None else "graph_network"
expected_var_shapes_dict = {
name + "/edge_block/mlp/linear_0/b:0": [5],
name + "/edge_block/mlp/linear_0/w:0": [4 + 4 + 3, 5],
name + "/node_block/mlp/linear_0/b:0": [10],
name + "/node_block/mlp/linear_0/w:0": [5 + 2 + 3, 10],
name + "/global_block/mlp/linear_0/b:0": [15],
name + "/global_block/mlp/linear_0/w:0": [10 + 5 + 3, 15],
}
input_graph = self._get_input_graph()
extra_kwargs = {"name": name} if name else {}
model = modules.GraphNetwork(
edge_model_fn=functools.partial(snt.nets.MLP, output_sizes=[5]),
node_model_fn=functools.partial(snt.nets.MLP, output_sizes=[10]),
global_model_fn=functools.partial(snt.nets.MLP, output_sizes=[15]),
**extra_kwargs)
model(input_graph)
variables = model.get_variables()
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("reduce sum reduction", tf.unsorted_segment_sum,),
("reduce max or zero reduction", blocks.unsorted_segment_max_or_zero,),)
def test_same_as_subblocks(self, reducer):
"""Compares the output to explicit subblocks output.
Args:
reducer: The reducer used in the `NodeBlock` and `GlobalBlock`.
"""
input_graph = self._get_input_graph()
edge_model_fn = functools.partial(snt.Linear, output_size=5)
node_model_fn = functools.partial(snt.Linear, output_size=10)
global_model_fn = functools.partial(snt.Linear, output_size=15)
graph_network = modules.GraphNetwork(
edge_model_fn=edge_model_fn,
node_model_fn=node_model_fn,
global_model_fn=global_model_fn,
reducer=reducer)
output_graph = graph_network(input_graph)
edge_block = blocks.EdgeBlock(
edge_model_fn=lambda: graph_network._edge_block._edge_model,
use_sender_nodes=True,
use_edges=True,
use_receiver_nodes=True,
use_globals=True)
node_block = blocks.NodeBlock(
node_model_fn=lambda: graph_network._node_block._node_model,
use_nodes=True,
use_sent_edges=False,
use_received_edges=True,
use_globals=True,
received_edges_reducer=reducer)
global_block = blocks.GlobalBlock(
global_model_fn=lambda: graph_network._global_block._global_model,
use_nodes=True,
use_edges=True,
use_globals=True,
edges_reducer=reducer,
nodes_reducer=reducer)
expected_output_edge_block = edge_block(input_graph)
expected_output_node_block = node_block(expected_output_edge_block)
expected_output_global_block = global_block(expected_output_node_block)
expected_edges = expected_output_edge_block.edges
expected_nodes = expected_output_node_block.nodes
expected_globals = expected_output_global_block.globals
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
(output_graph_out,
expected_edges_out, expected_nodes_out, expected_globals_out) = sess.run(
(output_graph, expected_edges, expected_nodes, expected_globals))
self._assert_all_none_or_all_close(expected_edges_out,
output_graph_out.edges)
self._assert_all_none_or_all_close(expected_nodes_out,
output_graph_out.nodes)
self._assert_all_none_or_all_close(expected_globals_out,
output_graph_out.globals)
def test_dynamic_batch_sizes(self):
"""Checks that all batch sizes are as expected through a GraphNetwork."""
input_graph = self._get_input_graph()
placeholders = input_graph.map(_mask_leading_dimension, graphs.ALL_FIELDS)
model = self._get_model()
output = model(placeholders)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
other_input_graph = utils_np.data_dicts_to_graphs_tuple(
[SMALL_GRAPH_1, SMALL_GRAPH_2])
actual = sess.run(output, {placeholders: other_input_graph})
for k, v in other_input_graph._asdict().items():
self.assertEqual(v.shape[0], getattr(actual, k).shape[0])
@parameterized.named_parameters(
("float64 data", tf.float64, tf.int32),
("int64 indices", tf.float32, tf.int64),)
def test_dtypes(self, data_dtype, indices_dtype):
"""Checks that all the output types are as expected in a GraphNetwork."""
input_graph = self._get_input_graph()
input_graph = input_graph.map(lambda v: tf.cast(v, data_dtype),
["nodes", "globals", "edges"])
input_graph = input_graph.map(lambda v: tf.cast(v, indices_dtype),
["senders", "receivers"])
model = self._get_model()
output = model(input_graph)
for field in ["nodes", "globals", "edges"]:
self.assertEqual(data_dtype, getattr(output, field).dtype)
for field in ["receivers", "senders"]:
self.assertEqual(indices_dtype, getattr(output, field).dtype)
@parameterized.named_parameters(
("edges only", True, False, False, False),
("receivers only", False, True, False, False),
("senders only", False, False, True, False),
("globals only", False, False, False, True),)
def test_edge_block_options(self,
use_edges,
use_receiver_nodes,
use_sender_nodes,
use_globals):
"""Test for configuring the EdgeBlock options."""
reducer = tf.unsorted_segment_sum
input_graph = self._get_input_graph()
edge_model_fn = functools.partial(snt.Linear, output_size=10)
edge_block_opt = {"use_edges": use_edges,
"use_receiver_nodes": use_receiver_nodes,
"use_sender_nodes": use_sender_nodes,
"use_globals": use_globals}
# Identity node model
node_model_fn = lambda: tf.identity
node_block_opt = {"use_received_edges": False,
"use_sent_edges": False,
"use_nodes": True,
"use_globals": False}
# Identity global model
global_model_fn = lambda: tf.identity
global_block_opt = {"use_globals": True,
"use_nodes": False,
"use_edges": False}
graph_network = modules.GraphNetwork(
edge_model_fn=edge_model_fn,
edge_block_opt=edge_block_opt,
node_model_fn=node_model_fn,
node_block_opt=node_block_opt,
global_model_fn=global_model_fn,
global_block_opt=global_block_opt,
reducer=reducer)
output_graph = graph_network(input_graph)
edge_block = blocks.EdgeBlock(
edge_model_fn=lambda: graph_network._edge_block._edge_model,
use_edges=use_edges,
use_receiver_nodes=use_receiver_nodes,
use_sender_nodes=use_sender_nodes,
use_globals=use_globals)
expected_output_edge_block = edge_block(input_graph)
expected_output_node_block = expected_output_edge_block
expected_output_global_block = expected_output_node_block
expected_edges = expected_output_edge_block.edges
expected_nodes = expected_output_node_block.nodes
expected_globals = expected_output_global_block.globals
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
(output_graph_out,
expected_edges_out, expected_nodes_out, expected_globals_out) = sess.run(
(output_graph, expected_edges, expected_nodes, expected_globals))
self._assert_all_none_or_all_close(expected_edges_out,
output_graph_out.edges)
self._assert_all_none_or_all_close(expected_nodes_out,
output_graph_out.nodes)
self._assert_all_none_or_all_close(expected_globals_out,
output_graph_out.globals)
@parameterized.named_parameters(
("received edges only", True, False, False, False, None, None),
("received edges, max reduction",
True, False, False, False, tf.unsorted_segment_max, None),
("sent edges only", False, True, False, False, None, None),
("sent edges, max reduction",
False, True, False, False, None, tf.unsorted_segment_max),
("nodes only", False, False, True, False, None, None),
("globals only", False, False, False, True, None, None),
)
def test_node_block_options(self,
use_received_edges,
use_sent_edges,
use_nodes,
use_globals,
received_edges_reducer,
sent_edges_reducer):
"""Test for configuring the NodeBlock options."""
input_graph = self._get_input_graph()
if use_received_edges:
received_edges_reducer = received_edges_reducer or tf.unsorted_segment_sum
if use_sent_edges:
sent_edges_reducer = sent_edges_reducer or tf.unsorted_segment_sum
# Identity edge model.
edge_model_fn = lambda: tf.identity
edge_block_opt = {"use_edges": True,
"use_receiver_nodes": False,
"use_sender_nodes": False,
"use_globals": False}
node_model_fn = functools.partial(snt.Linear, output_size=10)
node_block_opt = {"use_received_edges": use_received_edges,
"use_sent_edges": use_sent_edges,
"use_nodes": use_nodes,
"use_globals": use_globals,
"received_edges_reducer": received_edges_reducer,
"sent_edges_reducer": sent_edges_reducer}
# Identity global model
global_model_fn = lambda: tf.identity
global_block_opt = {"use_globals": True,
"use_nodes": False,
"use_edges": False}
graph_network = modules.GraphNetwork(
edge_model_fn=edge_model_fn,
edge_block_opt=edge_block_opt,
node_model_fn=node_model_fn,
node_block_opt=node_block_opt,
global_model_fn=global_model_fn,
global_block_opt=global_block_opt)
output_graph = graph_network(input_graph)
node_block = blocks.NodeBlock(
node_model_fn=lambda: graph_network._node_block._node_model,
use_nodes=use_nodes,
use_sent_edges=use_sent_edges,
use_received_edges=use_received_edges,
use_globals=use_globals,
received_edges_reducer=received_edges_reducer,
sent_edges_reducer=sent_edges_reducer)
expected_output_edge_block = input_graph
expected_output_node_block = node_block(input_graph)
expected_output_global_block = expected_output_node_block
expected_edges = expected_output_edge_block.edges
expected_nodes = expected_output_node_block.nodes
expected_globals = expected_output_global_block.globals
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
(output_graph_out,
expected_edges_out, expected_nodes_out, expected_globals_out) = sess.run(
(output_graph, expected_edges, expected_nodes, expected_globals))
self._assert_all_none_or_all_close(expected_edges_out,
output_graph_out.edges)
self._assert_all_none_or_all_close(expected_nodes_out,
output_graph_out.nodes)
self._assert_all_none_or_all_close(expected_globals_out,
output_graph_out.globals)
@parameterized.named_parameters(
("edges only", True, False, False, None, None),
("edges only, max", True, False, False, tf.unsorted_segment_max, None),
("nodes only", False, True, False, None, None),
("nodes only, max", False, True, False, None, tf.unsorted_segment_max),
("globals only", False, False, True, None, None),
)
def test_global_block_options(self,
use_edges,
use_nodes,
use_globals,
edges_reducer,
nodes_reducer):
"""Test for configuring the NodeBlock options."""
input_graph = self._get_input_graph()
if use_edges:
edges_reducer = edges_reducer or tf.unsorted_segment_sum
if use_nodes:
nodes_reducer = nodes_reducer or tf.unsorted_segment_sum
# Identity edge model.
edge_model_fn = lambda: tf.identity
edge_block_opt = {"use_edges": True,
"use_receiver_nodes": False,
"use_sender_nodes": False,
"use_globals": False}
# Identity node model
node_model_fn = lambda: tf.identity
node_block_opt = {"use_received_edges": False,
"use_sent_edges": False,
"use_nodes": True,
"use_globals": False}
global_model_fn = functools.partial(snt.Linear, output_size=10)
global_block_opt = {"use_globals": use_globals,
"use_nodes": use_nodes,
"use_edges": use_edges,
"edges_reducer": edges_reducer,
"nodes_reducer": nodes_reducer}
graph_network = modules.GraphNetwork(
edge_model_fn=edge_model_fn,
edge_block_opt=edge_block_opt,
node_model_fn=node_model_fn,
node_block_opt=node_block_opt,
global_model_fn=global_model_fn,
global_block_opt=global_block_opt)
output_graph = graph_network(input_graph)
global_block = blocks.GlobalBlock(
global_model_fn=lambda: graph_network._global_block._global_model,
use_edges=use_edges,
use_nodes=use_nodes,
use_globals=use_globals,
edges_reducer=edges_reducer,
nodes_reducer=nodes_reducer)
expected_output_edge_block = input_graph
expected_output_node_block = expected_output_edge_block
expected_output_global_block = global_block(expected_output_node_block)
expected_edges = expected_output_edge_block.edges
expected_nodes = expected_output_node_block.nodes
expected_globals = expected_output_global_block.globals
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
(output_graph_out,
expected_edges_out, expected_nodes_out, expected_globals_out) = sess.run(
(output_graph, expected_edges, expected_nodes, expected_globals))
self._assert_all_none_or_all_close(expected_edges_out,
output_graph_out.edges)
self._assert_all_none_or_all_close(expected_nodes_out,
output_graph_out.nodes)
self._assert_all_none_or_all_close(expected_globals_out,
output_graph_out.globals)
def test_higher_rank_outputs(self):
"""Tests that a graph net can be build with higher rank inputs/outputs."""
input_graph = self._get_shaped_input_graph()
network = modules.GraphNetwork(*self._get_shaped_model_fns())
self._assert_build_and_run(network, input_graph)
@parameterized.named_parameters(
("wrongly shaped edges", "edges"),
("wrongly shaped nodes", "nodes"),
("wrongly shaped globals", "globals"),)
def test_incompatible_higher_rank_inputs_raises(self, field_to_reshape):
"""A exception should be raised if the inputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, node_model_fn, global_model_fn = self._get_shaped_model_fns()
input_graph = input_graph.map(
lambda v: tf.transpose(v, [0, 2, 1, 3]), [field_to_reshape])
graph_network = modules.GraphNetwork(
edge_model_fn, node_model_fn, global_model_fn)
with self.assertRaisesRegexp(ValueError, "in both shapes must be equal"):
graph_network(input_graph)
def test_incompatible_higher_rank_partial_outputs_raises(self):
"""A error should be raised if partial outputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, node_model_fn, global_model_fn = self._get_shaped_model_fns()
edge_model_fn_2 = functools.partial(
snt.Conv2D, output_channels=10, kernel_shape=[3, 3], stride=[1, 2])
graph_network = modules.GraphNetwork(
edge_model_fn_2, node_model_fn, global_model_fn)
with self.assertRaisesRegexp(ValueError, "in both shapes must be equal"):
graph_network(input_graph)
node_model_fn_2 = functools.partial(
snt.Conv2D, output_channels=10, kernel_shape=[3, 3], stride=[1, 2])
graph_network = modules.GraphNetwork(
edge_model_fn, node_model_fn_2, global_model_fn)
with self.assertRaisesRegexp(ValueError, "in both shapes must be equal"):
graph_network(input_graph)
class InteractionNetworkTest(GraphModuleTest):
def _get_model(self, reducer=None, name=None):
kwargs = {
"edge_model_fn": functools.partial(snt.Linear, output_size=5),
"node_model_fn": functools.partial(snt.Linear, output_size=10)
}
if reducer:
kwargs["reducer"] = reducer
if name:
kwargs["name"] = name
return modules.InteractionNetwork(**kwargs)
@parameterized.named_parameters(
("default name", None), ("custom name", "custom_name"))
def test_created_variables(self, name=None):
"""Verifies variable names and shapes created by an InteractionNetwork."""
name = name if name is not None else "interaction_network"
expected_var_shapes_dict = {
name + "/edge_block/linear/b:0": [5],
name + "/edge_block/linear/w:0": [2 + 2 + 4, 5],
name + "/node_block/linear/b:0": [10],
name + "/node_block/linear/w:0": [5 + 2, 10],
}
input_graph = utils_tf.data_dicts_to_graphs_tuple([SMALL_GRAPH_1])
model = self._get_model(name=name)
model(input_graph)
variables = model.get_variables()
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("default", tf.unsorted_segment_sum,),
("max or zero reduction", blocks.unsorted_segment_max_or_zero,),
("no globals", tf.unsorted_segment_sum, "globals"),
)
def test_same_as_subblocks(self, reducer, none_field=None):
"""Compares the output to explicit subblocks output.
Args:
reducer: The reducer used in the `NodeBlock`s.
none_field: (string, default=None) If not None, the corresponding field
is removed from the input graph.
"""
input_graph = self._get_input_graph(none_field)
interaction_network = self._get_model(reducer)
output_graph = interaction_network(input_graph)
edges_out = output_graph.edges
nodes_out = output_graph.nodes
self.assertAllEqual(input_graph.globals, output_graph.globals)
edge_block = blocks.EdgeBlock(
edge_model_fn=lambda: interaction_network._edge_block._edge_model,
use_sender_nodes=True,
use_edges=True,
use_receiver_nodes=True,
use_globals=False)
node_block = blocks.NodeBlock(
node_model_fn=lambda: interaction_network._node_block._node_model,
use_nodes=True,
use_sent_edges=False,
use_received_edges=True,
use_globals=False,
received_edges_reducer=reducer)
expected_output_edge_block = edge_block(input_graph)
expected_output_node_block = node_block(expected_output_edge_block)
expected_edges = expected_output_edge_block.edges
expected_nodes = expected_output_node_block.nodes
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
(actual_edges_out, actual_nodes_out,
expected_edges_out, expected_nodes_out) = sess.run(
[edges_out, nodes_out, expected_edges, expected_nodes])
self._assert_all_none_or_all_close(expected_edges_out, actual_edges_out)
self._assert_all_none_or_all_close(expected_nodes_out, actual_nodes_out)
@parameterized.named_parameters(
("no nodes", ["nodes"],),
("no edge data", ["edges"],),
("no edges", ["edges", "receivers", "senders"],),
)
def test_field_must_not_be_none(self, none_fields):
"""Tests that the model cannot be built if required fields are missing."""
input_graph = utils_tf.data_dicts_to_graphs_tuple([SMALL_GRAPH_1])
input_graph = input_graph.map(lambda _: None, none_fields)
interaction_network = self._get_model()
with self.assertRaises(ValueError):
interaction_network(input_graph)
def test_higher_rank_outputs(self):
"""Tests that an IN can be build with higher rank inputs/outputs."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, node_model_fn, _ = self._get_shaped_model_fns()
graph_network = modules.InteractionNetwork(edge_model_fn, node_model_fn)
self._assert_build_and_run(graph_network, input_graph)
@parameterized.named_parameters(
("wrongly shaped edges", "edges"),
("wrongly shaped nodes", "nodes"),)
def test_incompatible_higher_rank_inputs_raises(self, field_to_reshape):
"""Am exception should be raised if the inputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, node_model_fn, _ = self._get_shaped_model_fns()
input_graph = input_graph.map(
lambda v: tf.transpose(v, [0, 2, 1, 3]), [field_to_reshape])
graph_network = modules.InteractionNetwork(edge_model_fn, node_model_fn)
with self.assertRaisesRegexp(ValueError, "in both shapes must be equal"):
graph_network(input_graph)
def test_incompatible_higher_rank_inputs_no_raise(self):
"""The globals can have an arbitrary shape in the input."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, node_model_fn, _ = self._get_shaped_model_fns()
input_graph = input_graph.replace(
globals=tf.transpose(input_graph.globals, [0, 2, 1, 3]))
graph_network = modules.InteractionNetwork(edge_model_fn, node_model_fn)
self._assert_build_and_run(graph_network, input_graph)
class RelationNetworkTest(GraphModuleTest):
def _get_model(self, reducer=tf.unsorted_segment_sum, name=None):
kwargs = {
"edge_model_fn": functools.partial(snt.Linear, output_size=5),
"global_model_fn": functools.partial(snt.Linear, output_size=15)
}
if reducer:
kwargs["reducer"] = reducer
if name:
kwargs["name"] = name
return modules.RelationNetwork(**kwargs)
@parameterized.named_parameters(
("default name", None), ("custom name", "custom_name"))
def test_created_variables(self, name=None):
"""Verifies variable names and shapes created by a RelationNetwork."""
name = name if name is not None else "relation_network"
expected_var_shapes_dict = {
name + "/edge_block/linear/b:0": [5],
name + "/edge_block/linear/w:0": [2 + 2, 5],
name + "/global_block/linear/b:0": [15],
name + "/global_block/linear/w:0": [5, 15],
}
input_graph = utils_tf.data_dicts_to_graphs_tuple([SMALL_GRAPH_1])
model = self._get_model(name=name)
model(input_graph)
variables = model.get_variables()
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("default", tf.unsorted_segment_sum, None),
("max or zero reduction", blocks.unsorted_segment_max_or_zero, None),
("no edges", tf.unsorted_segment_sum, "edges"),
("no globals", tf.unsorted_segment_sum, "globals"),
)
def test_same_as_subblocks(self, reducer, none_field=None):
"""Compares the output to explicit subblocks output.
Args:
reducer: The reducer used in the `GlobalBlock`.
none_field: (string, default=None) If not None, the corresponding field
is removed from the input graph.
"""
input_graph = self._get_input_graph(none_field)
relation_network = self._get_model(reducer)
output_graph = relation_network(input_graph)
edge_block = blocks.EdgeBlock(
edge_model_fn=lambda: relation_network._edge_block._edge_model,
use_edges=False,
use_receiver_nodes=True,
use_sender_nodes=True,
use_globals=False)
global_block = blocks.GlobalBlock(
global_model_fn=lambda: relation_network._global_block._global_model,
use_edges=True,
use_nodes=False,
use_globals=False,
edges_reducer=reducer,
nodes_reducer=reducer)
expected_output_edge_block = edge_block(input_graph)
expected_output_global_block = global_block(expected_output_edge_block)
self.assertEqual(input_graph.edges, output_graph.edges)
self.assertEqual(input_graph.nodes, output_graph.nodes)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
(actual_globals_out, expected_globals_out) = sess.run(
(output_graph.globals, expected_output_global_block.globals))
self._assert_all_none_or_all_close(expected_globals_out, actual_globals_out)
@parameterized.named_parameters(
("no nodes", ["nodes"],), ("no edges", ["edges", "receivers", "senders"],)
)
def test_field_must_not_be_none(self, none_fields):
"""Tests that the model cannot be built if required fields are missing."""
input_graph = utils_tf.data_dicts_to_graphs_tuple([SMALL_GRAPH_1])
input_graph = input_graph.map(lambda _: None, none_fields)
relation_network = self._get_model()
with self.assertRaises(ValueError):
relation_network(input_graph)
@parameterized.named_parameters(
("differently shaped edges", "edges"),
("differently shaped nodes", "nodes"),
("differently shaped globals", "globals"),)
def test_incompatible_higher_rank_inputs_no_raise(self, field_to_reshape):
"""A RelationNetwork does not make assumptions on its inputs shapes."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, _, global_model_fn = self._get_shaped_model_fns()
input_graph = input_graph.map(
lambda v: tf.transpose(v, [0, 2, 1, 3]), [field_to_reshape])
network = modules.RelationNetwork(edge_model_fn, global_model_fn)
self._assert_build_and_run(network, input_graph)
class DeepSetsTest(GraphModuleTest):
def _get_model(self, reducer=None, name=None):
kwargs = {
"node_model_fn": functools.partial(snt.Linear, output_size=5),
"global_model_fn": functools.partial(snt.Linear, output_size=15)
}
if reducer:
kwargs["reducer"] = reducer
if name:
kwargs["name"] = name
return modules.DeepSets(**kwargs)
@parameterized.named_parameters(
("default name", None), ("custom name", "custom_name"))
def test_created_variables(self, name=None):
"""Verifies variable names and shapes created by a DeepSets network."""
name = name if name is not None else "deep_sets"
expected_var_shapes_dict = {
name + "/node_block/linear/b:0": [5],
name + "/node_block/linear/w:0": [2 + 3, 5],
name + "/global_block/linear/b:0": [15],
name + "/global_block/linear/w:0": [5, 15],
}
input_graph = self._get_input_graph()
model = self._get_model(name=name)
model(input_graph)
variables = model.get_variables()
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("default", tf.unsorted_segment_sum, []),
("no edge data", tf.unsorted_segment_sum, ["edges"]),
("no edges", tf.unsorted_segment_sum, ["edges", "receivers", "senders"]),
("max or zero reduction", blocks.unsorted_segment_max_or_zero, []),
)
def test_same_as_subblocks(self, reducer, none_fields):
"""Compares the output to explicit subblocks output.
Args:
reducer: The reducer used in the NodeBlock.
none_fields: (list of strings) The corresponding fields are removed from
the input graph.
"""
input_graph = self._get_input_graph()
input_graph = input_graph.map(lambda _: None, none_fields)
deep_sets = self._get_model(reducer)
output_graph = deep_sets(input_graph)
output_nodes = output_graph.nodes
output_globals = output_graph.globals
node_block = blocks.NodeBlock(
node_model_fn=lambda: deep_sets._node_block._node_model,
use_received_edges=False,
use_sent_edges=False,
use_nodes=True,
use_globals=True)
global_block = blocks.GlobalBlock(
global_model_fn=lambda: deep_sets._global_block._global_model,
use_edges=False,
use_nodes=True,
use_globals=False,
nodes_reducer=reducer)
node_block_out = node_block(input_graph)
expected_nodes = node_block_out.nodes
expected_globals = global_block(node_block_out).globals
self.assertAllEqual(input_graph.edges, output_graph.edges)
self.assertAllEqual(input_graph.receivers, output_graph.receivers)
self.assertAllEqual(input_graph.senders, output_graph.senders)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
(output_nodes_, output_globals_, expected_nodes_,
expected_globals_) = sess.run(
[output_nodes, output_globals, expected_nodes, expected_globals])
self._assert_all_none_or_all_close(expected_nodes_, output_nodes_)
self._assert_all_none_or_all_close(expected_globals_, output_globals_)
@parameterized.parameters(
("nodes",), ("globals",),
)
def test_field_must_not_be_none(self, none_field):
"""Tests that the model cannot be built if required fields are missing."""
input_graph = utils_tf.data_dicts_to_graphs_tuple([SMALL_GRAPH_1])
input_graph = input_graph.replace(**{none_field: None})
deep_sets = self._get_model()
with self.assertRaises(ValueError):
deep_sets(input_graph)
def test_incompatible_higher_rank_inputs_raises(self):
"""A exception should be raised if the inputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
_, node_model_fn, global_model_fn = self._get_shaped_model_fns()
input_graph = input_graph.replace(
nodes=tf.transpose(input_graph.nodes, [0, 2, 1, 3]))
graph_network = modules.DeepSets(node_model_fn, global_model_fn)
with self.assertRaisesRegexp(ValueError, "in both shapes must be equal"):
graph_network(input_graph)
def test_incompatible_higher_rank_partial_outputs_no_raise(self):
"""There is no constraint on the size of the partial outputs."""
input_graph = self._get_shaped_input_graph()
node_model_fn = functools.partial(
snt.Conv2D, output_channels=10, kernel_shape=[3, 3], stride=[1, 2])
global_model_fn = functools.partial(
snt.Conv2D, output_channels=10, kernel_shape=[3, 3])
network = modules.DeepSets(node_model_fn, global_model_fn)
self._assert_build_and_run(network, input_graph)
def test_incompatible_higher_rank_inputs_no_raise(self):
"""A DeepSets does not make assumptions on the shape if its input edges."""
input_graph = self._get_shaped_input_graph()
_, node_model_fn, global_model_fn = self._get_shaped_model_fns()
input_graph = input_graph.replace(
edges=tf.transpose(input_graph.edges, [0, 2, 1, 3]))
network = modules.DeepSets(node_model_fn, global_model_fn)
self._assert_build_and_run(network, input_graph)
class CommNetTest(GraphModuleTest):
def _get_model(self, reducer=None, name=None):
kwargs = {
"edge_model_fn": functools.partial(snt.Linear, output_size=15),
"node_encoder_model_fn": functools.partial(snt.Linear, output_size=8),
"node_model_fn": functools.partial(snt.Linear, output_size=5),
}
if reducer is not None:
kwargs["reducer"] = reducer
if name:
kwargs["name"] = name
return modules.CommNet(**kwargs)
@parameterized.named_parameters(
("default name", None), ("custom name", "custom_name"))
def test_created_variables(self, name=None):
"""Verifies variable names and shapes created by a DeepSets network."""
name = name if name is not None else "comm_net"
expected_var_shapes_dict = {
name + "/edge_block/linear/b:0": [15],
name + "/edge_block/linear/w:0": [2, 15],
name + "/node_encoder_block/linear/b:0": [8],
name + "/node_encoder_block/linear/w:0": [2, 8],
name + "/node_block/linear/b:0": [5],
name + "/node_block/linear/w:0": [15 + 8, 5],
}
input_graph = self._get_input_graph()
model = self._get_model(name=name)
model(input_graph)
variables = model.get_variables()
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("default", tf.unsorted_segment_sum,),
("no edges", tf.unsorted_segment_sum, "edges"),
("no globals", tf.unsorted_segment_sum, "globals"),
("max or zero reduction", blocks.unsorted_segment_max_or_zero,),
)
def test_same_as_subblocks(self, reducer, none_field=None):
"""Compares the output to explicit subblocks output.
Args:
reducer: The reducer used in the `NodeBlock`s.
none_field: (string, default=None) If not None, the corresponding field
is removed from the input graph.
"""
input_graph = self._get_input_graph(none_field)
comm_net = self._get_model(reducer)
output_graph = comm_net(input_graph)
output_nodes = output_graph.nodes
edge_subblock = blocks.EdgeBlock(
edge_model_fn=lambda: comm_net._edge_block._edge_model,
use_edges=False,
use_receiver_nodes=False,
use_sender_nodes=True,
use_globals=False)
node_encoder_subblock = blocks.NodeBlock(
node_model_fn=lambda: comm_net._node_encoder_block._node_model,
use_received_edges=False,
use_sent_edges=False,
use_nodes=True,
use_globals=False,
received_edges_reducer=reducer)
node_subblock = blocks.NodeBlock(
node_model_fn=lambda: comm_net._node_block._node_model,
use_received_edges=True,
use_sent_edges=False,
use_nodes=True,
use_globals=False,
received_edges_reducer=reducer)
edge_block_out = edge_subblock(input_graph)
encoded_nodes = node_encoder_subblock(input_graph).nodes
node_input_graph = input_graph.replace(
edges=edge_block_out.edges, nodes=encoded_nodes)
node_block_out = node_subblock(node_input_graph)
expected_nodes = node_block_out.nodes
self.assertAllEqual(input_graph.globals, output_graph.globals)
self.assertAllEqual(input_graph.edges, output_graph.edges)
self.assertAllEqual(input_graph.receivers, output_graph.receivers,)
self.assertAllEqual(input_graph.senders, output_graph.senders)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
actual_nodes_output, expected_nodes_output = sess.run(
[output_nodes, expected_nodes])
self._assert_all_none_or_all_close(expected_nodes_output,
actual_nodes_output)
@parameterized.named_parameters(
("no nodes", ["nodes"],), ("no edges", ["edges", "receivers", "senders"],)
)
def test_field_must_not_be_none(self, none_fields):
"""Tests that the model cannot be built if required fields are missing."""
input_graph = utils_tf.data_dicts_to_graphs_tuple([SMALL_GRAPH_1])
input_graph = input_graph.map(lambda _: None, none_fields)
comm_net = self._get_model()
with self.assertRaises(ValueError):
comm_net(input_graph)
def test_higher_rank_outputs(self):
"""Tests that a CommNet can be build with higher rank inputs/outputs."""
input_graph = self._get_shaped_input_graph()
graph_network = modules.CommNet(*self._get_shaped_model_fns())
self._assert_build_and_run(graph_network, input_graph)
class SelfAttentionTest(GraphModuleTest):
def _get_model(self, reducer=None, name=None):
kwargs = {
"edge_model_fn": functools.partial(snt.Linear, output_size=15),
"node_encoder_model_fn": functools.partial(snt.Linear, output_size=8),
"node_model_fn": functools.partial(snt.Linear, output_size=5),
}
if reducer is not None:
kwargs["reducer"] = reducer
if name:
kwargs["name"] = name
return modules.CommNet(**kwargs)
LOGITS_1D = [np.log(2), np.log(2), np.log(2), 0., 0., 0.]
SOFTMAX_1D = [1., 2/3, 0.5, 0.25, 0.25, 1/3]
LOGITS_2D = [[np.log(2), 1.], [np.log(2), 1.], [np.log(2), 1.],
[0., 1.], [0., 1.], [0., 1.]]
SOFTMAX_2D = [[1., 1.], [2/3, 0.5], [1/2, 1/3],
[1/4, 1/3], [1/4, 1/3], [1/3, 0.5]]
SENDERS = [0, 2, 2, 3, 4, 3]
RECEIVERS = [1, 5, 6, 6, 6, 5]
N_NODE = [2, 5]
N_EDGE = [1, 5]
@parameterized.named_parameters(
("one dimensional", LOGITS_1D, SOFTMAX_1D),
("two dimensional", LOGITS_2D, SOFTMAX_2D),)
def test_unsorted_segment_softmax(self, data, expected_softmax):
"""Verifies variable names and shapes created by a DeepSets network."""
data = tf.constant(data, dtype=tf.float32)
segment_ids = tf.constant(self.RECEIVERS, dtype=tf.int32)
num_segments = tf.constant(sum(self.N_NODE), dtype=tf.int32)
actual_softmax = modules._unsorted_segment_softmax(
data, segment_ids, num_segments)
with tf.Session() as sess:
actual_softmax_output = sess.run(actual_softmax)
self.assertAllClose(expected_softmax, actual_softmax_output)
@parameterized.named_parameters(
("one dimensional", LOGITS_1D, SOFTMAX_1D,
modules._unsorted_segment_softmax),
("two dimensional", LOGITS_2D, SOFTMAX_2D,
modules._unsorted_segment_softmax),)
def test_received_edges_normalizer(self, logits,
expected_normalized, normalizer):
graph = graphs.GraphsTuple(
nodes=None,
edges=logits,
globals=None,
receivers=tf.constant(self.RECEIVERS, dtype=tf.int32),
senders=tf.constant(self.SENDERS, dtype=tf.int32),
n_node=tf.constant(self.N_NODE, dtype=tf.int32),
n_edge=tf.constant(self.N_EDGE, dtype=tf.int32),
)
actual_normalized_edges = modules._received_edges_normalizer(
graph, normalizer)
with tf.Session() as sess:
actual_normalized_edges_output = sess.run(actual_normalized_edges)
self.assertAllClose(expected_normalized, actual_normalized_edges_output)
def test_self_attention(self):
# Just one feature per node.
values_np = np.arange(sum(self.N_NODE)) + 1.
# Multiple heads, one positive values, one negative values.
values_np = np.stack([values_np, values_np*-1.], axis=-1)
# Multiple features per node, per head, at different scales.
values_np = np.stack([values_np, values_np*0.1], axis=-1)
values = tf.constant(values_np, dtype=tf.float32)
keys_np = [
[[0.3, 0.4]]*2, # Irrelevant (only sender to one node)
[[0.1, 0.5]]*2, # Not used (is not a sender)
[[1, 0], [0, 1]],
[[0, 1], [1, 0]],
[[1, 1], [1, 1]],
[[0.4, 0.3]]*2, # Not used (is not a sender)
[[0.3, 0.2]]*2] # Not used (is not a sender)
keys = tf.constant(keys_np, dtype=tf.float32)
queries_np = [
[[0.2, 0.7]]*2, # Not used (is not a receiver)
[[0.3, 0.2]]*2, # Irrelevant (only receives from one node)
[[0.2, 0.8]]*2, # Not used (is not a receiver)
[[0.2, 0.4]]*2, # Not used (is not a receiver)
[[0.3, 0.9]]*2, # Not used (is not a receiver)
[[0, np.log(2)], [np.log(3), 0]],
[[np.log(2), 0], [0, np.log(3)]]]
queries = tf.constant(queries_np, dtype=tf.float32)
attention_graph = graphs.GraphsTuple(
nodes=None,
edges=None,
globals=None,
receivers=tf.constant(self.RECEIVERS, dtype=tf.int32),
senders=tf.constant(self.SENDERS, dtype=tf.int32),
n_node=tf.constant(self.N_NODE, dtype=tf.int32),
n_edge=tf.constant(self.N_EDGE, dtype=tf.int32),)
self_attention = modules.SelfAttention()
output_graph = self_attention(values, keys, queries, attention_graph)
mixed_nodes = output_graph.nodes
with tf.Session() as sess:
mixed_nodes_output = sess.run(mixed_nodes)
expected_mixed_nodes = [
[[0., 0.], [0., 0.]], # Does not receive any edges
[[1., 0.1], [-1., -0.1]], # Only receives from n0.
[[0., 0.], [0., 0.]], # Does not receive any edges
[[0., 0.], [0., 0.]], # Does not receive any edges
[[0., 0.], [0., 0.]], # Does not receive any edges
[[11/3, 11/3*0.1], # Head one, receives from n2(1/3) n3(2/3)
[-15/4, -15/4*0.1]], # Head two, receives from n2(1/4) n3(3/4)
[[20/5, 20/5*0.1], # Head one, receives from n2(2/5) n3(1/5) n4(2/5)
[-28/7, -28/7*0.1]], # Head two, receives from n2(3/7) n3(1/7) n4(3/7)
]
self.assertAllClose(expected_mixed_nodes, mixed_nodes_output)
if __name__ == "__main__":
tf.test.main()
| graph_nets-master | graph_nets/tests/modules_test.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for utils_tf.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from graph_nets import graphs
from graph_nets import utils_np
from graph_nets import utils_tf
from graph_nets.tests import test_utils
import networkx as nx
import numpy as np
from six.moves import range
import tensorflow as tf
import tree
class RepeatTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for `repeat`."""
@parameterized.named_parameters(
("base", (3,), [2, 3, 4], 0),
("empty_group_first", (3,), [0, 3, 4], 0),
("empty_group_middle", (3,), [2, 0, 4], 0),
("double_empty_group_middle", (4,), [2, 0, 0, 4], 0),
("empty_group_last", (3,), [2, 3, 0], 0),
("just_one_group", (1,), [2], 0),
("zero_groups", (0,), [], 0),
("axis 0", (2, 3, 4), [2, 3], 0),
("axis 1", (3, 2, 4), [2, 3], 1),
("axis 2", (4, 3, 2), [2, 3], 2),
("zero_groups_with_shape", (2, 0, 4), [], 1),
)
def test_repeat(self, shape, repeats, axis):
num_elements = np.prod(shape)
t = np.arange(num_elements).reshape(*shape)
expected = np.repeat(t, repeats, axis=axis)
tensor = tf.constant(t)
repeats = tf.constant(repeats, dtype=tf.int32)
op = utils_tf.repeat(tensor, repeats, axis=axis)
with tf.Session() as sess:
actual = sess.run(op)
self.assertAllEqual(expected, actual)
@parameterized.named_parameters(("default", "custom_name", None),
("custom", None, "repeat"))
def test_name_scope(self, name, expected_name):
kwargs = {"name": name} if name else {}
expected_name = expected_name if expected_name else name
t = tf.zeros([3, 2, 4])
indices = tf.constant([2, 3])
with test_utils.assert_new_op_prefixes(self, expected_name + "/"):
utils_tf.repeat(t, indices, axis=1, **kwargs)
def _generate_graph(batch_index, n_nodes=4, add_edges=True):
graph = nx.DiGraph()
for node in range(n_nodes):
node_data = {"features": np.array([node, batch_index], dtype=np.float32)}
graph.add_node(node, **node_data)
if add_edges:
for edge, (receiver, sender) in enumerate(zip([0, 0, 1], [1, 2, 3])):
if sender < n_nodes and receiver < n_nodes:
edge_data = np.array([edge, edge + 1, batch_index], dtype=np.float64)
graph.add_edge(sender, receiver, features=edge_data, index=edge)
graph.graph["features"] = np.array([batch_index], dtype=np.float32)
return graph
class ConcatTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for `concat`, along various axis."""
@parameterized.named_parameters(
("no nones", []), ("stateless graph", ["nodes", "edges", "globals"]),
("no edges", ["edges", "receivers", "senders"]))
def test_concat_first_axis(self, none_fields):
graph_0 = utils_np.networkxs_to_graphs_tuple(
[_generate_graph(0, 3), _generate_graph(1, 2)])
graph_1 = utils_np.networkxs_to_graphs_tuple([_generate_graph(2, 2)])
graph_2 = utils_np.networkxs_to_graphs_tuple([_generate_graph(3, 3)])
graphs_ = [
gr.map(tf.convert_to_tensor, graphs.ALL_FIELDS)
for gr in [graph_0, graph_1, graph_2]
]
graphs_ = [gr.map(lambda _: None, none_fields) for gr in graphs_]
concat_graph = utils_tf.concat(graphs_, axis=0)
for none_field in none_fields:
self.assertEqual(None, getattr(concat_graph, none_field))
concat_graph = concat_graph.map(tf.no_op, none_fields)
with tf.Session() as sess:
concat_graph = sess.run(concat_graph)
if "nodes" not in none_fields:
self.assertAllEqual(
np.array([0, 1, 2, 0, 1, 0, 1, 0, 1, 2]),
[x[0] for x in concat_graph.nodes])
self.assertAllEqual(
np.array([0, 0, 0, 1, 1, 2, 2, 3, 3, 3]),
[x[1] for x in concat_graph.nodes])
if "edges" not in none_fields:
self.assertAllEqual(
np.array([0, 1, 0, 0, 0, 1]), [x[0] for x in concat_graph.edges])
self.assertAllEqual(
np.array([0, 0, 1, 2, 3, 3]), [x[2] for x in concat_graph.edges])
self.assertAllEqual(np.array([3, 2, 2, 3]), concat_graph.n_node)
self.assertAllEqual(np.array([2, 1, 1, 2]), concat_graph.n_edge)
if "senders" not in none_fields:
# [1, 2], [1], [1], [1, 2] and 3, 2, 2, 3 nodes
# So we are summing [1, 2, 1, 1, 2] with [0, 0, 3, 5, 7, 7]
self.assertAllEqual(np.array([1, 2, 4, 6, 8, 9]), concat_graph.senders)
if "receivers" not in none_fields:
# [0, 0], [0], [0], [0, 0] and 3, 2, 2, 3 nodes
# So we are summing [0, 0, 0, 0, 0, 0] with [0, 0, 3, 5, 7, 7]
self.assertAllEqual(np.array([0, 0, 3, 5, 7, 7]), concat_graph.receivers)
if "globals" not in none_fields:
self.assertAllEqual(np.array([[0], [1], [2], [3]]), concat_graph.globals)
def test_concat_last_axis(self):
graph0 = utils_np.networkxs_to_graphs_tuple(
[_generate_graph(0, 3), _generate_graph(1, 2)])
graph1 = utils_np.networkxs_to_graphs_tuple(
[_generate_graph(2, 3), _generate_graph(3, 2)])
graph0 = graph0.map(tf.convert_to_tensor, graphs.ALL_FIELDS)
graph1 = graph1.map(tf.convert_to_tensor, graphs.ALL_FIELDS)
with tf.Session() as sess:
concat_graph = sess.run(utils_tf.concat([graph0, graph1], axis=-1))
self.assertAllEqual(
np.array([[0, 0, 0, 2], [1, 0, 1, 2], [2, 0, 2, 2], [0, 1, 0, 3],
[1, 1, 1, 3]]), concat_graph.nodes)
self.assertAllEqual(
np.array([[0, 1, 0, 0, 1, 2], [1, 2, 0, 1, 2, 2], [0, 1, 1, 0, 1, 3]]),
concat_graph.edges)
self.assertAllEqual(np.array([3, 2]), concat_graph.n_node)
self.assertAllEqual(np.array([2, 1]), concat_graph.n_edge)
self.assertAllEqual(np.array([1, 2, 4]), concat_graph.senders)
self.assertAllEqual(np.array([0, 0, 3]), concat_graph.receivers)
self.assertAllEqual(np.array([[0, 2], [1, 3]]), concat_graph.globals)
class BuildPlaceholdersTest(test_utils.GraphsTest, parameterized.TestCase):
def _assert_expected_shapes(self, placeholders, but_for=None,
num_graphs=None):
if but_for is None:
but_for = []
if "nodes" not in but_for:
self.assertAllEqual([None, 2], placeholders.nodes.shape.as_list())
if "edges" not in but_for:
self.assertAllEqual([None, 3], placeholders.edges.shape.as_list())
if "globals" not in but_for:
self.assertAllEqual([num_graphs, 1], placeholders.globals.shape.as_list())
for key in ["receivers", "senders"]:
if key not in but_for:
self.assertAllEqual([None], getattr(placeholders, key).shape.as_list())
for key in ["n_node", "n_edge"]:
if key not in but_for:
self.assertAllEqual([num_graphs],
getattr(placeholders, key).shape.as_list())
@parameterized.named_parameters(
("all_field_defined", [], False),
("no features", ["nodes", "edges", "globals"], False),
("no edges", ["edges", "receivers", "senders"], False),
("dynamic", [], True))
def test_build_placeholders_from_specs(self,
none_fields,
force_dynamic_num_graphs=False):
num_graphs = 3
shapes = graphs.GraphsTuple(
nodes=[3, 4],
edges=[2],
globals=[num_graphs, 4, 6],
receivers=[None],
senders=[18],
n_node=[num_graphs],
n_edge=[num_graphs],
)
dtypes = graphs.GraphsTuple(
nodes=tf.float64,
edges=tf.int32,
globals=tf.float32,
receivers=tf.int64,
senders=tf.int64,
n_node=tf.int32,
n_edge=tf.int64)
dtypes = dtypes.map(lambda _: None, none_fields)
shapes = shapes.map(lambda _: None, none_fields)
placeholders = utils_tf._build_placeholders_from_specs(
dtypes, shapes, force_dynamic_num_graphs=force_dynamic_num_graphs)
for k in graphs.ALL_FIELDS:
placeholder = getattr(placeholders, k)
if k in none_fields:
self.assertEqual(None, placeholder)
else:
self.assertEqual(getattr(dtypes, k), placeholder.dtype)
if k not in ["n_node", "n_edge", "globals"] or force_dynamic_num_graphs:
self.assertAllEqual([None] + getattr(shapes, k)[1:],
placeholder.shape.as_list())
else:
self.assertAllEqual([num_graphs] + getattr(shapes, k)[1:],
placeholder.shape.as_list())
@parameterized.named_parameters(("static_num_graphs", False),
("dynamic_num_graphs", True))
def test_placeholders_from_data_dicts(self, force_dynamic_num_graphs):
num_graphs = len(self.graphs_dicts_in)
placeholders = utils_tf.placeholders_from_data_dicts(
self.graphs_dicts_in, force_dynamic_num_graphs=force_dynamic_num_graphs)
self.assertAllEqual([None, 7, 11], placeholders.nodes.shape.as_list())
self.assertAllEqual([None, 13, 14], placeholders.edges.shape.as_list())
global_shape = placeholders.globals.shape.as_list()
if force_dynamic_num_graphs:
self.assertAllEqual([None, 5, 3], global_shape)
else:
self.assertAllEqual([num_graphs, 5, 3], global_shape)
for key in ["receivers", "senders"]:
self.assertAllEqual([None], getattr(placeholders, key).shape.as_list())
for key in ["n_node", "n_edge"]:
shape = getattr(placeholders, key).shape.as_list()
if force_dynamic_num_graphs:
self.assertAllEqual([None], shape)
else:
self.assertAllEqual([num_graphs], shape)
def test_placeholders_from_networkxs(self):
num_graphs = 16
networkxs = [
_generate_graph(batch_index) for batch_index in range(num_graphs)
]
placeholders = utils_tf.placeholders_from_networkxs(
networkxs, force_dynamic_num_graphs=False)
self._assert_expected_shapes(placeholders, num_graphs=num_graphs)
self.assertEqual(tf.float32, placeholders.nodes.dtype)
self.assertEqual(tf.float64, placeholders.edges.dtype)
def test_placeholders_from_networkxs_missing_nodes(self):
num_graphs = 16
networkxs = [
_generate_graph(batch_index, n_nodes=0, add_edges=False)
for batch_index in range(num_graphs)
]
placeholders = utils_tf.placeholders_from_networkxs(
networkxs, force_dynamic_num_graphs=False)
self.assertEqual(None, placeholders.nodes)
self.assertEqual(None, placeholders.edges)
self._assert_expected_shapes(
placeholders, but_for=["nodes", "edges"], num_graphs=num_graphs)
def test_placeholders_from_networkxs_hints(self):
num_graphs = 16
networkxs = [
_generate_graph(batch_index, n_nodes=0, add_edges=False)
for batch_index in range(num_graphs)
]
placeholders = utils_tf.placeholders_from_networkxs(
networkxs,
node_shape_hint=[14],
edge_shape_hint=[17],
data_type_hint=tf.float64,
force_dynamic_num_graphs=False)
self.assertAllEqual([None, 14], placeholders.nodes.shape.as_list())
self.assertAllEqual([None, 17], placeholders.edges.shape.as_list())
self._assert_expected_shapes(
placeholders, but_for=["nodes", "edges"], num_graphs=num_graphs)
self.assertEqual(tf.float64, placeholders.nodes.dtype)
self.assertEqual(tf.float64, placeholders.edges.dtype)
def test_placeholders_from_networkxs_missing_edges(self):
num_graphs = 16
networkxs = [
_generate_graph(batch_index, add_edges=False)
for batch_index in range(num_graphs)
]
placeholders = utils_tf.placeholders_from_networkxs(
networkxs, force_dynamic_num_graphs=False)
self.assertEqual(None, placeholders.edges)
self._assert_expected_shapes(
placeholders, but_for=["edges"], num_graphs=num_graphs)
def test_feed_data(self):
networkx = [_generate_graph(batch_index) for batch_index in range(16)]
placeholders = utils_tf.placeholders_from_networkxs(
networkx, force_dynamic_num_graphs=True)
# Does not need to be the same size
networkxs = [_generate_graph(batch_index) for batch_index in range(2)]
with tf.Session() as sess:
output = sess.run(
placeholders,
utils_tf.get_feed_dict(placeholders,
utils_np.networkxs_to_graphs_tuple(networkxs)))
self.assertAllEqual(
np.array([[0, 0], [1, 0], [2, 0], [3, 0], [0, 1], [1, 1], [2, 1],
[3, 1]]), output.nodes)
self.assertEqual(np.float32, output.nodes.dtype)
self.assertAllEqual(np.array([[0], [1]]), output.globals)
self.assertEqual(np.float32, output.globals.dtype)
sorted_edges_content = sorted(
[(x, y, z)
for x, y, z in zip(output.receivers, output.senders, output.edges)])
self.assertAllEqual([0, 0, 1, 4, 4, 5],
[x[0] for x in sorted_edges_content])
self.assertAllEqual([1, 2, 3, 5, 6, 7],
[x[1] for x in sorted_edges_content])
self.assertEqual(np.float64, output.edges.dtype)
self.assertAllEqual(
np.array([[0, 1, 0], [1, 2, 0], [2, 3, 0], [0, 1, 1], [1, 2, 1],
[2, 3, 1]]), [x[2] for x in sorted_edges_content])
@parameterized.named_parameters((
"no features",
["nodes", "edges", "globals"],
), (
"no edges",
["edges", "receivers", "senders"],
))
def test_get_feed_dict_raises(self, none_fields):
networkxs = [_generate_graph(batch_index) for batch_index in range(16)]
placeholders = utils_tf.placeholders_from_networkxs(networkxs)
feed_values = utils_np.networkxs_to_graphs_tuple(networkxs)
with self.assertRaisesRegexp(ValueError, ""):
utils_tf.get_feed_dict(
placeholders.map(lambda _: None, none_fields), feed_values)
with self.assertRaisesRegexp(ValueError, ""):
utils_tf.get_feed_dict(placeholders,
feed_values.map(lambda _: None, none_fields))
def test_feed_data_no_nodes(self):
networkx = [
_generate_graph(batch_index, n_nodes=0, add_edges=False)
for batch_index in range(16)
]
placeholders = utils_tf.placeholders_from_networkxs(
networkx, force_dynamic_num_graphs=True)
# Does not need to be the same size
networkxs = [
_generate_graph(batch_index, n_nodes=0, add_edges=False)
for batch_index in range(2)
]
self.assertEqual(None, placeholders.nodes)
self.assertEqual(None, placeholders.edges)
with tf.Session() as sess:
output = sess.run(
placeholders.replace(nodes=tf.no_op(), edges=tf.no_op()),
utils_tf.get_feed_dict(placeholders,
utils_np.networkxs_to_graphs_tuple(networkxs)))
self.assertAllEqual(np.array([[0], [1]]), output.globals)
self.assertEqual(np.float32, output.globals.dtype)
def test_feed_data_no_edges(self):
networkx = [
_generate_graph(batch_index, add_edges=False)
for batch_index in range(16)
]
placeholders = utils_tf.placeholders_from_networkxs(
networkx, force_dynamic_num_graphs=True)
# Does not need to be the same size
networkxs = [
_generate_graph(batch_index, add_edges=False)
for batch_index in range(2)
]
self.assertEqual(None, placeholders.edges)
with tf.Session() as sess:
output = sess.run(
placeholders.replace(edges=tf.no_op()),
utils_tf.get_feed_dict(placeholders,
utils_np.networkxs_to_graphs_tuple(networkxs)))
self.assertAllEqual(
np.array([[0, 0], [1, 0], [2, 0], [3, 0], [0, 1], [1, 1], [2, 1],
[3, 1]]), output.nodes)
self.assertAllEqual(np.array([[0], [1]]), output.globals)
self.assertEqual(np.float32, output.globals.dtype)
class StopGradientsGraphTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(StopGradientsGraphTest, self).setUp()
self._graph = utils_tf.data_dicts_to_graphs_tuple([{
"senders": tf.zeros([10], dtype=tf.int32),
"receivers": tf.zeros([10], dtype=tf.int32),
"nodes": tf.ones([5, 7]),
"edges": tf.zeros([10, 6]),
"globals": tf.zeros([1, 8])
}])
def _check_if_gradients_exist(self, stopped_gradients_graph):
gradients = []
for field in ["globals", "nodes", "edges"]:
xs = getattr(self._graph, field)
ys = getattr(stopped_gradients_graph, field)
gradient = tf.gradients(ys, xs)[0] if ys is not None else ys
gradients.append(gradient)
return [True if grad is not None else False for grad in gradients]
@parameterized.named_parameters(
("stop_all_fields", True, True, True),
("stop_globals", True, False, False), ("stop_nodes", False, True, False),
("stop_edges", False, False, True), ("stop_none", False, False, False))
def test_stop_gradients_outputs(self, stop_globals, stop_nodes, stop_edges):
stopped_gradients_graph = utils_tf.stop_gradient(
self._graph,
stop_globals=stop_globals,
stop_nodes=stop_nodes,
stop_edges=stop_edges)
gradients_exist = self._check_if_gradients_exist(stopped_gradients_graph)
expected_gradients_exist = [
not stop_globals, not stop_nodes, not stop_edges
]
self.assertAllEqual(expected_gradients_exist, gradients_exist)
@parameterized.named_parameters(("no_nodes", "nodes"), ("no_edges", "edges"),
("no_globals", "globals"))
def test_stop_gradients_with_missing_field_raises(self, none_field):
self._graph = self._graph.map(lambda _: None, [none_field])
with self.assertRaisesRegexp(ValueError, none_field):
utils_tf.stop_gradient(self._graph)
def test_stop_gradients_default_params(self):
"""Tests for the default params of `utils_tf.stop_gradient`."""
stopped_gradients_graph = utils_tf.stop_gradient(self._graph)
gradients_exist = self._check_if_gradients_exist(stopped_gradients_graph)
expected_gradients_exist = [False, False, False]
self.assertAllEqual(expected_gradients_exist, gradients_exist)
class IdentityTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for the `identity` method."""
def setUp(self):
super(IdentityTest, self).setUp()
self._graph = utils_tf.data_dicts_to_graphs_tuple([{
"senders": tf.random_uniform([10], maxval=10, dtype=tf.int32),
"receivers": tf.random_uniform([10], maxval=10, dtype=tf.int32),
"nodes": tf.random_uniform([5, 7]),
"edges": tf.random_uniform([10, 6]),
"globals": tf.random_uniform([1, 8])
}])
def test_name_scope(self):
"""Tests that the name scope are correctly pushed through this function."""
graph = self._graph
with tf.name_scope("test"):
graph_id = utils_tf.identity(graph)
for field in [
"nodes", "edges", "globals", "receivers", "senders", "n_node", "n_edge"
]:
self.assertEqual("test", getattr(graph_id, field).name.split("/")[0])
@parameterized.named_parameters(
("all fields defined", []), ("no node features", ["nodes"]),
("no edge features", ["edges"]), ("no global features", ["globals"]),
("no edges", ["edges", "receivers", "senders"]))
def test_output(self, none_fields):
"""Tests that this function produces the identity."""
graph = self._graph.map(lambda _: None, none_fields)
with tf.name_scope("test"):
graph_id = utils_tf.identity(graph)
graph = utils_tf.make_runnable_in_session(graph)
graph_id = utils_tf.make_runnable_in_session(graph_id)
with tf.Session() as sess:
expected_out, actual_out = sess.run([graph, graph_id])
for field in [
"nodes", "edges", "globals", "receivers", "senders", "n_node", "n_edge"
]:
if field in none_fields:
self.assertEqual(None, getattr(actual_out, field))
else:
self.assertNDArrayNear(
getattr(expected_out, field), getattr(actual_out, field), err=1e-4)
class RunGraphWithNoneInSessionTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(RunGraphWithNoneInSessionTest, self).setUp()
self._graph = utils_tf.data_dicts_to_graphs_tuple([{
"senders": tf.random_uniform([10], maxval=10, dtype=tf.int32),
"receivers": tf.random_uniform([10], maxval=10, dtype=tf.int32),
"nodes": tf.random_uniform([5, 7]),
"edges": tf.random_uniform([10, 6]),
"globals": tf.random_uniform([1, 8])
}])
@parameterized.named_parameters(
("all fields defined", []), ("no node features", ["nodes"]),
("no edge features", ["edges"]), ("no global features", ["globals"]),
("no edges", ["edges", "receivers", "senders"]))
def test_output(self, none_fields):
"""Tests that this function produces the identity."""
graph = self._graph.map(lambda _: None, none_fields)
with tf.name_scope("test"):
graph_id = utils_tf.make_runnable_in_session(graph)
graph = graph.map(tf.no_op, none_fields)
with tf.Session() as sess:
expected_out, actual_out = sess.run([graph, graph_id])
for field in [
"nodes", "edges", "globals", "receivers", "senders", "n_node", "n_edge"
]:
if field in none_fields:
self.assertEqual(None, getattr(actual_out, field))
else:
self.assertNDArrayNear(
getattr(expected_out, field), getattr(actual_out, field), err=1e-4)
class ComputeOffsetTest(tf.test.TestCase):
"""Tests for the `compute_stacked_offsets` method."""
def setUp(self):
super(ComputeOffsetTest, self).setUp()
tf.reset_default_graph()
self.sizes = [5, 4, 3, 1, 2, 0, 3, 0, 4, 7]
self.repeats = [2, 2, 0, 2, 1, 3, 2, 0, 3, 2]
self.offset = [
0, 0, 5, 5, 12, 12, 13, 15, 15, 15, 15, 15, 18, 18, 18, 22, 22
]
def test_compute_stacked_offsets(self):
offset0 = utils_tf._compute_stacked_offsets(self.sizes, self.repeats)
offset1 = utils_tf._compute_stacked_offsets(
np.array(self.sizes), np.array(self.repeats))
offset2 = utils_tf._compute_stacked_offsets(
tf.constant(self.sizes, dtype=tf.int32),
tf.constant(self.repeats, dtype=tf.int32))
with tf.Session() as sess:
o0, o1, o2 = sess.run([offset0, offset1, offset2])
self.assertAllEqual(self.offset, o0.tolist())
self.assertAllEqual(self.offset, o1.tolist())
self.assertAllEqual(self.offset, o2.tolist())
class DataDictsCompletionTests(test_utils.GraphsTest, parameterized.TestCase):
"""Tests for the methods creating complete graphs from partial graphs."""
def _assert_indices_sizes(self, dict_, n_relation):
for key in ["receivers", "senders"]:
self.assertAllEqual((n_relation,), dict_[key].get_shape().as_list())
@parameterized.named_parameters(
("static", utils_tf._create_complete_edges_from_nodes_static),
("dynamic", utils_tf._create_complete_edges_from_nodes_dynamic),
)
def test_create_complete_edges_from_nodes_include_self_edges(self, method):
for graph_dict in self.graphs_dicts_in:
n_node = graph_dict["nodes"].shape[0]
edges_dict = method(n_node, exclude_self_edges=False)
self._assert_indices_sizes(edges_dict, n_node**2)
@parameterized.named_parameters(
("static", utils_tf._create_complete_edges_from_nodes_static),
("dynamic", utils_tf._create_complete_edges_from_nodes_dynamic),
)
def test_create_complete_edges_from_nodes_exclude_self_edges(self, method):
for graph_dict in self.graphs_dicts_in:
n_node = graph_dict["nodes"].shape[0]
edges_dict = method(n_node, exclude_self_edges=True)
self._assert_indices_sizes(edges_dict, n_node * (n_node - 1))
def test_create_complete_edges_from_nodes_dynamic_number_of_nodes(self):
for graph_dict in self.graphs_dicts_in:
n_node = tf.shape(tf.constant(graph_dict["nodes"]))[0]
edges_dict = utils_tf._create_complete_edges_from_nodes_dynamic(
n_node, exclude_self_edges=False)
n_relation_op = n_node**2
with tf.Session() as sess:
n_relation, receivers, senders, n_edge = sess.run([
n_relation_op, edges_dict["receivers"], edges_dict["senders"],
edges_dict["n_edge"]
])
self.assertAllEqual((n_relation,), receivers.shape)
self.assertAllEqual((n_relation,), senders.shape)
self.assertEqual(n_relation, n_edge)
class GraphsCompletionTests(test_utils.GraphsTest, parameterized.TestCase):
"""Tests for completing partial GraphsTuple."""
def _assert_indices_sizes(self, graph, n_relation):
for key in ["receivers", "senders"]:
self.assertAllEqual((n_relation,),
getattr(graph, key).get_shape().as_list())
@parameterized.named_parameters(("edge size 0", 0), ("edge size 1", 1))
def test_fill_edge_state(self, edge_size):
"""Tests for filling the edge state with a constant content."""
for g in self.graphs_dicts_in:
g.pop("edges")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
n_edges = np.sum(self.reference_graph.n_edge)
graphs_tuple = utils_tf.set_zero_edge_features(graphs_tuple, edge_size)
self.assertAllEqual((n_edges, edge_size),
graphs_tuple.edges.get_shape().as_list())
@parameterized.named_parameters(("edge size 0", 0), ("edge size 1", 1))
def test_fill_edge_state_dynamic(self, edge_size):
"""Tests for filling the edge state with a constant content."""
for g in self.graphs_dicts_in:
g.pop("edges")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs_tuple = graphs_tuple._replace(
n_edge=tf.placeholder_with_default(
graphs_tuple.n_edge, shape=graphs_tuple.n_edge.get_shape()))
n_edges = np.sum(self.reference_graph.n_edge)
graphs_tuple = utils_tf.set_zero_edge_features(graphs_tuple, edge_size)
with tf.Session() as sess:
actual_edges = sess.run(graphs_tuple.edges)
self.assertNDArrayNear(
np.zeros((n_edges, edge_size)), actual_edges, err=1e-4)
@parameterized.named_parameters(("global size 0", 0), ("global size 1", 1))
def test_fill_global_state(self, global_size):
"""Tests for filling the global state with a constant content."""
for g in self.graphs_dicts_in:
g.pop("globals")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
n_graphs = self.reference_graph.n_edge.shape[0]
graphs_tuple = utils_tf.set_zero_global_features(graphs_tuple, global_size)
self.assertAllEqual((n_graphs, global_size),
graphs_tuple.globals.get_shape().as_list())
@parameterized.named_parameters(("global size 0", 0), ("global size 1", 1))
def test_fill_global_state_dynamic(self, global_size):
"""Tests for filling the global state with a constant content."""
for g in self.graphs_dicts_in:
g.pop("globals")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
# Hide global shape information
graphs_tuple = graphs_tuple._replace(
n_node=tf.placeholder_with_default(graphs_tuple.n_node, shape=[None]))
n_graphs = self.reference_graph.n_edge.shape[0]
graphs_tuple = utils_tf.set_zero_global_features(graphs_tuple, global_size)
with tf.Session() as sess:
actual_globals = sess.run(graphs_tuple.globals)
self.assertNDArrayNear(
np.zeros((n_graphs, global_size)), actual_globals, err=1e-4)
@parameterized.named_parameters(("node size 0", 0), ("node size 1", 1))
def test_fill_node_state(self, node_size):
"""Tests for filling the node state with a constant content."""
for g in self.graphs_dicts_in:
g["n_node"] = g["nodes"].shape[0]
g.pop("nodes")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
n_nodes = np.sum(self.reference_graph.n_node)
graphs_tuple = utils_tf.set_zero_node_features(graphs_tuple, node_size)
self.assertAllEqual((n_nodes, node_size),
graphs_tuple.nodes.get_shape().as_list())
@parameterized.named_parameters(("node size 0", 0), ("node size 1", 1))
def test_fill_node_state_dynamic(self, node_size):
"""Tests for filling the node state with a constant content."""
for g in self.graphs_dicts_in:
g["n_node"] = g["nodes"].shape[0]
g.pop("nodes")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs_tuple = graphs_tuple._replace(
n_node=tf.placeholder_with_default(
graphs_tuple.n_node, shape=graphs_tuple.n_node.get_shape()))
n_nodes = np.sum(self.reference_graph.n_node)
graphs_tuple = utils_tf.set_zero_node_features(graphs_tuple, node_size)
with tf.Session() as sess:
actual_nodes = sess.run(graphs_tuple.nodes)
self.assertNDArrayNear(
np.zeros((n_nodes, node_size)), actual_nodes, err=1e-4)
def test_fill_edge_state_with_missing_fields_raises(self):
"""Edge field cannot be filled if receivers or senders are missing."""
for g in self.graphs_dicts_in:
g.pop("receivers")
g.pop("senders")
g.pop("edges")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
with self.assertRaisesRegexp(ValueError, "receivers"):
graphs_tuple = utils_tf.set_zero_edge_features(graphs_tuple, edge_size=1)
def test_fill_state_default_types(self):
"""Tests that the features are created with the correct default type."""
for g in self.graphs_dicts_in:
g.pop("nodes")
g.pop("globals")
g.pop("edges")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs_tuple = utils_tf.set_zero_edge_features(graphs_tuple, edge_size=1)
graphs_tuple = utils_tf.set_zero_node_features(graphs_tuple, node_size=1)
graphs_tuple = utils_tf.set_zero_global_features(
graphs_tuple, global_size=1)
self.assertEqual(tf.float32, graphs_tuple.edges.dtype)
self.assertEqual(tf.float32, graphs_tuple.nodes.dtype)
self.assertEqual(tf.float32, graphs_tuple.globals.dtype)
@parameterized.parameters(
(tf.float64,),
(tf.int32,),
)
def test_fill_state_user_specified_types(self, dtype):
"""Tests that the features are created with the correct default type."""
for g in self.graphs_dicts_in:
g.pop("nodes")
g.pop("globals")
g.pop("edges")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs_tuple = utils_tf.set_zero_edge_features(graphs_tuple, 1, dtype)
graphs_tuple = utils_tf.set_zero_node_features(graphs_tuple, 1, dtype)
graphs_tuple = utils_tf.set_zero_global_features(graphs_tuple, 1, dtype)
self.assertEqual(dtype, graphs_tuple.edges.dtype)
self.assertEqual(dtype, graphs_tuple.nodes.dtype)
self.assertEqual(dtype, graphs_tuple.globals.dtype)
@parameterized.named_parameters(
("no self edges", False),
("self edges", True),
)
def test_fully_connect_graph_dynamic(self, exclude_self_edges):
for g in self.graphs_dicts_in:
g.pop("edges")
g.pop("receivers")
g.pop("senders")
n_relation = 0
for g in self.graphs_dicts_in:
n_node = g["nodes"].shape[0]
if exclude_self_edges:
n_relation += n_node * (n_node - 1)
else:
n_relation += n_node * n_node
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs_tuple = utils_tf.fully_connect_graph_dynamic(graphs_tuple,
exclude_self_edges)
with tf.Session() as sess:
actual_receivers, actual_senders = sess.run(
[graphs_tuple.receivers, graphs_tuple.senders])
self.assertAllEqual((n_relation,), actual_receivers.shape)
self.assertAllEqual((n_relation,), actual_senders.shape)
self.assertAllEqual((len(self.graphs_dicts_in),),
graphs_tuple.n_edge.get_shape().as_list())
@parameterized.named_parameters(
("no self edges", False),
("self edges", True),
)
def test_fully_connect_graph_dynamic_with_dynamic_sizes(
self, exclude_self_edges):
for g in self.graphs_dicts_in:
g.pop("edges")
g.pop("receivers")
g.pop("senders")
n_relation = 0
for g in self.graphs_dicts_in:
n_node = g["nodes"].shape[0]
if exclude_self_edges:
n_relation += n_node * (n_node - 1)
else:
n_relation += n_node * n_node
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs_tuple = graphs_tuple.map(test_utils.mask_leading_dimension,
["nodes", "globals", "n_node", "n_edge"])
graphs_tuple = utils_tf.fully_connect_graph_dynamic(graphs_tuple,
exclude_self_edges)
with tf.Session() as sess:
actual_receivers, actual_senders, actual_n_edge = sess.run(
[graphs_tuple.receivers, graphs_tuple.senders, graphs_tuple.n_edge])
self.assertAllEqual((n_relation,), actual_receivers.shape)
self.assertAllEqual((n_relation,), actual_senders.shape)
self.assertAllEqual((len(self.graphs_dicts_in),), actual_n_edge.shape)
expected_edges = []
offset = 0
for graph in self.graphs_dicts_in:
n_node = graph["nodes"].shape[0]
for e1 in range(n_node):
for e2 in range(n_node):
if not exclude_self_edges or e1 != e2:
expected_edges.append((e1 + offset, e2 + offset))
offset += n_node
actual_edges = zip(actual_receivers, actual_senders)
self.assertSetEqual(set(actual_edges), set(expected_edges))
@parameterized.named_parameters(
("no self edges", False),
("self edges", True),
)
def test_fully_connect_graph_static(self, exclude_self_edges):
for g in self.graphs_dicts_in:
g.pop("edges")
g.pop("receivers")
g.pop("senders")
num_graphs = 2
num_nodes = 3
if exclude_self_edges:
n_edges = num_nodes * (num_nodes - 1)
else:
n_edges = num_nodes * num_nodes
n_relation = num_graphs * n_edges
graphs_dicts = [{
"nodes": tf.zeros([num_nodes, 1])
} for _ in range(num_graphs)]
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(graphs_dicts)
graphs_tuple = utils_tf.fully_connect_graph_static(graphs_tuple,
exclude_self_edges)
self.assertAllEqual((n_relation,),
graphs_tuple.receivers.get_shape().as_list())
self.assertAllEqual((n_relation,),
graphs_tuple.senders.get_shape().as_list())
self.assertAllEqual((num_graphs,),
graphs_tuple.n_edge.get_shape().as_list())
with tf.Session() as sess:
actual_receivers, actual_senders, actual_n_edge = sess.run(
[graphs_tuple.receivers, graphs_tuple.senders, graphs_tuple.n_edge])
expected_edges = []
offset = 0
for _ in range(num_graphs):
for v1 in range(num_nodes):
for v2 in range(num_nodes):
if not exclude_self_edges or v1 != v2:
expected_edges.append((v1 + offset, v2 + offset))
offset += num_nodes
actual_edges = zip(actual_receivers, actual_senders)
self.assertNDArrayNear(
np.array([n_edges] * num_graphs), actual_n_edge, 1e-4)
self.assertSetEqual(set(actual_edges), set(expected_edges))
def test_fully_connect_graph_static_with_dynamic_sizes_raises(self):
for g in self.graphs_dicts_in:
g.pop("edges")
g.pop("receivers")
g.pop("senders")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs_tuple_1 = graphs_tuple.map(test_utils.mask_leading_dimension,
["n_node"])
with self.assertRaisesRegexp(ValueError, "known at construction time"):
utils_tf.fully_connect_graph_static(graphs_tuple_1)
graphs_tuple_2 = graphs_tuple.map(test_utils.mask_leading_dimension,
["nodes"])
with self.assertRaisesRegexp(ValueError, "known at construction time"):
utils_tf.fully_connect_graph_static(graphs_tuple_2)
with self.assertRaisesRegexp(ValueError, "the same in all graphs"):
utils_tf.fully_connect_graph_static(graphs_tuple)
class GraphsTupleConversionTests(test_utils.GraphsTest, parameterized.TestCase):
"""Tests for the method converting between data dicts and GraphsTuple."""
@parameterized.named_parameters(("all fields defined", []), (
"no edge features",
["edges"],
), (
"no node features",
["nodes"],
), (
"no globals",
["globals"],
), (
"no edges",
["edges", "receivers", "senders"],
))
def test_data_dicts_to_graphs_tuple(self, none_fields):
"""Fields in `none_fields` will be cleared out."""
for field in none_fields:
for graph_dict in self.graphs_dicts_in:
if field in graph_dict:
if field == "nodes":
graph_dict["n_node"] = graph_dict["nodes"].shape[0]
graph_dict[field] = None
self.reference_graph = self.reference_graph._replace(**{field: None})
if field == "senders":
self.reference_graph = self.reference_graph._replace(
n_edge=np.zeros_like(self.reference_graph.n_edge))
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
for field in none_fields:
self.assertEqual(None, getattr(graphs_tuple, field))
graphs_tuple = graphs_tuple.map(tf.no_op, none_fields)
with tf.Session() as sess:
self._assert_graph_equals_np(self.reference_graph, sess.run(graphs_tuple))
@parameterized.parameters(("receivers",), ("senders",))
def test_data_dicts_to_graphs_tuple_raises(self, none_field):
"""Fields that cannot be missing."""
for graph_dict in self.graphs_dicts_in:
graph_dict[none_field] = None
with self.assertRaisesRegexp(ValueError, none_field):
utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
def test_data_dicts_to_graphs_tuple_no_raise(self):
"""Not having nodes is fine, if the number of nodes is provided."""
for graph_dict in self.graphs_dicts_in:
graph_dict["n_node"] = graph_dict["nodes"].shape[0]
graph_dict["nodes"] = None
utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
def test_data_dicts_to_graphs_tuple_cast_types(self):
"""Index and number fields should be cast to tensors of the right type."""
for graph_dict in self.graphs_dicts_in:
graph_dict["n_node"] = np.array(
graph_dict["nodes"].shape[0], dtype=np.int64)
graph_dict["receivers"] = graph_dict["receivers"].astype(np.int16)
graph_dict["senders"] = graph_dict["senders"].astype(np.float64)
graph_dict["nodes"] = graph_dict["nodes"].astype(np.float64)
graph_dict["edges"] = tf.constant(graph_dict["edges"], dtype=tf.float64)
out = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
for key in ["n_node", "n_edge", "receivers", "senders"]:
self.assertEqual(tf.int32, getattr(out, key).dtype)
self.assertEqual(type(tf.int32), type(getattr(out, key).dtype))
for key in ["nodes", "edges"]:
self.assertEqual(type(tf.float64), type(getattr(out, key).dtype))
self.assertEqual(tf.float64, getattr(out, key).dtype)
class GraphsIndexingTests(test_utils.GraphsTest, parameterized.TestCase):
"""Tests for the `get_graph` method."""
@parameterized.named_parameters(("int_index", False),
("tensor_index", True))
def test_getitem_one(self, use_tensor_index):
index = 2
expected = self.graphs_dicts_out[index]
if use_tensor_index:
index = tf.constant(index)
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graph_op = utils_tf.get_graph(graphs_tuple, index)
graph_op = utils_tf.make_runnable_in_session(graph_op)
with tf.Session() as sess:
graph = sess.run(graph_op)
actual, = utils_np.graphs_tuple_to_data_dicts(graph)
for k, v in expected.items():
self.assertAllClose(v, actual[k])
self.assertEqual(expected["nodes"].shape[0], actual["n_node"])
self.assertEqual(expected["edges"].shape[0], actual["n_edge"])
@parameterized.named_parameters(("int_slice", False),
("tensor_slice", True))
def test_getitem(self, use_tensor_slice):
index = slice(1, 3)
expected = self.graphs_dicts_out[index]
if use_tensor_slice:
index = slice(tf.constant(index.start), tf.constant(index.stop))
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs2_op = utils_tf.get_graph(graphs_tuple, index)
graphs2_op = utils_tf.make_runnable_in_session(graphs2_op)
with tf.Session() as sess:
graphs2 = sess.run(graphs2_op)
actual = utils_np.graphs_tuple_to_data_dicts(graphs2)
for ex, ac in zip(expected, actual):
for k, v in ex.items():
self.assertAllClose(v, ac[k])
self.assertEqual(ex["nodes"].shape[0], ac["n_node"])
self.assertEqual(ex["edges"].shape[0], ac["n_edge"])
@parameterized.named_parameters(
("index_bad_type", 1.,
TypeError, "Index must be a valid scalar integer"),
("index_bad_shape", tf.constant([0, 1]),
TypeError, "Valid tensor indices must be scalars"),
("index_bad_dtype", tf.constant(1.),
TypeError, "Valid tensor indices must have types"),
("slice_bad_type_stop", slice(1.),
TypeError, "Valid tensor indices must be integers"),
("slice_bad_shape_stop", slice(tf.constant([0, 1])),
TypeError, "Valid tensor indices must be scalars"),
("slice_bad_dtype_stop", slice(tf.constant(1.)),
TypeError, "Valid tensor indices must have types"),
("slice_bad_type_start", slice(0., 1),
TypeError, "Valid tensor indices must be integers"),
("slice_bad_shape_start", slice(tf.constant([0, 1]), 1),
TypeError, "Valid tensor indices must be scalars"),
("slice_bad_dtype_start", slice(tf.constant(0.), 1),
TypeError, "Valid tensor indices must have types"),
("slice_with_step", slice(0, 1, 1),
ValueError, "slices with step/stride are not supported"),
)
def test_raises(self, index, error_type, message):
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
with self.assertRaisesRegexp(error_type, message):
utils_tf.get_graph(graphs_tuple, index)
class TestNumGraphs(test_utils.GraphsTest):
"""Tests for the `get_num_graphs` function."""
def setUp(self):
super(TestNumGraphs, self).setUp()
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
self.empty_graph = graphs_tuple.map(lambda _: None,
graphs.GRAPH_DATA_FIELDS)
def test_num_graphs_static(self):
graph = self.empty_graph.replace(n_node=tf.zeros([3], dtype=tf.int32))
self.assertEqual(3, utils_tf.get_num_graphs(graph))
def test_num_graphs_dynamic(self):
n_node_placeholder = tf.placeholder(tf.int32, [None])
graph = self.empty_graph.replace(n_node=n_node_placeholder)
num_graphs = utils_tf.get_num_graphs(graph)
with tf.Session() as sess:
actual_num_graphs = sess.run(
num_graphs, {n_node_placeholder: np.zeros([3], dtype=np.int32)})
self.assertEqual(3, actual_num_graphs)
class TestSpecsFromGraphsTuple(test_utils.GraphsTest, parameterized.TestCase):
"""Tests for the `spec_from_graphs_tuple` function."""
@parameterized.named_parameters(
("dynamic_nodes_edges_not_batched_without_constants",
True, True, False, False, False, None),
("dynamic_num_graphs_not_batched_without_constants",
False, False, True, False, False, None),
("static_num_graphs_not_batched_without_constants",
False, False, True, False, False, None),
("static_num_graphs_batched_without_constants",
False, False, True, True, False, None),
("dynamic_nodes_edges_not_batched_with_constants",
True, True, False, False, True, None),
("dynamic_num_graphs_not_batched_with_constants",
False, False, True, False, True, None),
("static_num_graphs_not_batched_with_constants",
False, False, True, False, True, None),
("static_num_graphs_batched_with_constants",
False, False, True, True, True, None),
("dynamic_graphs_batched_empty_nested_features",
False, False, True, False, False, "emtpy_nests"),
("dynamic_graphs_batched_deep_nested_features",
False, False, True, False, False, "deep_nests"),
)
def test_correct_signature(
self,
dynamic_num_nodes,
dynamic_num_edges,
dynamic_num_graphs,
batched,
replace_globals_with_constant,
nested_features_type):
"""Tests that the correct spec is created when using different options."""
if batched:
input_data_dicts = [self.graphs_dicts[1], self.graphs_dicts[2]]
else:
input_data_dicts = [self.graphs_dicts[1]]
graph = utils_np.data_dicts_to_graphs_tuple(input_data_dicts)
num_graphs = len(input_data_dicts)
num_edges = sum(graph.n_edge).item()
num_nodes = sum(graph.n_node).item()
# Manually setting edges and globals fields to give some variety in
# testing situations.
# Making edges have rank 1 to .
graph = graph.replace(edges=np.zeros(num_edges))
# Make a constant field.
if replace_globals_with_constant:
graph = graph.replace(globals=np.array(0.0, dtype=np.float32))
if nested_features_type is not None:
graph = self._add_nested_features(graph, nested_features_type)
spec_signature = utils_tf.specs_from_graphs_tuple(
graph, dynamic_num_graphs, dynamic_num_nodes, dynamic_num_edges)
# Captures if nodes/edges will be dynamic either due to dynamic nodes/edges
# or dynamic graphs.
dynamic_nodes_or_graphs = dynamic_num_nodes or dynamic_num_graphs
dynamic_edges_or_graphs = dynamic_num_edges or dynamic_num_graphs
num_edges = None if dynamic_edges_or_graphs else num_edges
num_nodes = None if dynamic_nodes_or_graphs else num_nodes
num_graphs = None if dynamic_num_graphs else num_graphs
if replace_globals_with_constant:
expected_globals_shape = []
else:
expected_globals_shape = [num_graphs,] + test_utils.GLOBALS_DIMS
expected_answer = graphs.GraphsTuple(
nodes=tf.TensorSpec(
shape=[num_nodes,] + test_utils.NODES_DIMS,
dtype=tf.float32),
edges=tf.TensorSpec(
shape=[num_edges], # Edges were manually replaced to have dim 1.
dtype=tf.float64),
n_node=tf.TensorSpec(
shape=[num_graphs],
dtype=tf.int32),
n_edge=tf.TensorSpec(
shape=[num_graphs],
dtype=tf.int32),
globals=tf.TensorSpec(
shape=expected_globals_shape,
dtype=tf.float32),
receivers=tf.TensorSpec(
shape=[num_edges],
dtype=tf.int32),
senders=tf.TensorSpec(
shape=[num_edges],
dtype=tf.int32),
)
if nested_features_type is not None:
expected_answer = self._add_nested_features(
expected_answer, nested_features_type)
with self.subTest(name="Correct Type."):
self.assertIsInstance(spec_signature, graphs.GraphsTuple)
with self.subTest(name="Correct Structure."):
tree.assert_same_structure(spec_signature, expected_answer)
with self.subTest(name="Correct Signature."):
def assert_equal(actual, expected):
self.assertEqual(actual, expected)
return True
tree.map_structure(assert_equal, spec_signature, expected_answer)
def _add_nested_features(self, graphs_tuple, nested_feature_type):
if nested_feature_type == "emtpy_nests":
return graphs_tuple.replace(
nodes={},
edges=tuple([]),
globals=[])
else:
return graphs_tuple.replace(
nodes={"a": graphs_tuple.nodes, "b": [graphs_tuple.nodes]},
edges=(graphs_tuple.edges, {"c": graphs_tuple.edges}),
globals=[graphs_tuple.globals, {"d": graphs_tuple.globals}])
@parameterized.parameters(
(graphs.GLOBALS,), (graphs.EDGES,), (graphs.NODES,))
def test_none_throws_error(self, none_field):
"""Tests that an error is thrown if a GraphsTuple field is None."""
graphs_tuple = utils_np.data_dicts_to_graphs_tuple([self.graphs_dicts[1]])
graphs_tuple = graphs_tuple.replace(**{none_field: None})
with self.assertRaisesRegex(
ValueError, "`{}` was `None`. All fields of the `G".format(none_field)):
utils_tf.specs_from_graphs_tuple(graphs_tuple)
if __name__ == "__main__":
tf.test.main()
| graph_nets-master | graph_nets/tests/utils_tf_test.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Model architectures for the demos in TensorFlow 2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from graph_nets import modules
from graph_nets import utils_tf
from six.moves import range
import sonnet as snt
NUM_LAYERS = 2 # Hard-code number of layers in the edge/node/global models.
LATENT_SIZE = 16 # Hard-code latent layer sizes for demos.
def make_mlp_model():
"""Instantiates a new MLP, followed by LayerNorm.
The parameters of each new MLP are not shared with others generated by
this function.
Returns:
A Sonnet module which contains the MLP and LayerNorm.
"""
return snt.Sequential([
snt.nets.MLP([LATENT_SIZE] * NUM_LAYERS, activate_final=True),
snt.LayerNorm(axis=-1, create_offset=True, create_scale=True)
])
class MLPGraphIndependent(snt.Module):
"""GraphIndependent with MLP edge, node, and global models."""
def __init__(self, name="MLPGraphIndependent"):
super(MLPGraphIndependent, self).__init__(name=name)
self._network = modules.GraphIndependent(
edge_model_fn=make_mlp_model,
node_model_fn=make_mlp_model,
global_model_fn=make_mlp_model)
def __call__(self, inputs):
return self._network(inputs)
class MLPGraphNetwork(snt.Module):
"""GraphNetwork with MLP edge, node, and global models."""
def __init__(self, name="MLPGraphNetwork"):
super(MLPGraphNetwork, self).__init__(name=name)
self._network = modules.GraphNetwork(make_mlp_model, make_mlp_model,
make_mlp_model)
def __call__(self, inputs):
return self._network(inputs)
class EncodeProcessDecode(snt.Module):
"""Full encode-process-decode model.
The model we explore includes three components:
- An "Encoder" graph net, which independently encodes the edge, node, and
global attributes (does not compute relations etc.).
- A "Core" graph net, which performs N rounds of processing (message-passing)
steps. The input to the Core is the concatenation of the Encoder's output
and the previous output of the Core (labeled "Hidden(t)" below, where "t" is
the processing step).
- A "Decoder" graph net, which independently decodes the edge, node, and
global attributes (does not compute relations etc.), on each message-passing
step.
Hidden(t) Hidden(t+1)
| ^
*---------* | *------* | *---------*
| | | | | | | |
Input --->| Encoder | *->| Core |--*->| Decoder |---> Output(t)
| |---->| | | |
*---------* *------* *---------*
"""
def __init__(self,
edge_output_size=None,
node_output_size=None,
global_output_size=None,
name="EncodeProcessDecode"):
super(EncodeProcessDecode, self).__init__(name=name)
self._encoder = MLPGraphIndependent()
self._core = MLPGraphNetwork()
self._decoder = MLPGraphIndependent()
# Transforms the outputs into the appropriate shapes.
if edge_output_size is None:
edge_fn = None
else:
edge_fn = lambda: snt.Linear(edge_output_size, name="edge_output")
if node_output_size is None:
node_fn = None
else:
node_fn = lambda: snt.Linear(node_output_size, name="node_output")
if global_output_size is None:
global_fn = None
else:
global_fn = lambda: snt.Linear(global_output_size, name="global_output")
self._output_transform = modules.GraphIndependent(
edge_fn, node_fn, global_fn)
def __call__(self, input_op, num_processing_steps):
latent = self._encoder(input_op)
latent0 = latent
output_ops = []
for _ in range(num_processing_steps):
core_input = utils_tf.concat([latent0, latent], axis=1)
latent = self._core(core_input)
decoded_op = self._decoder(latent)
output_ops.append(self._output_transform(decoded_op))
return output_ops
| graph_nets-master | graph_nets/demos_tf2/models.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Graph networks library demos in TensorFlow 2."""
| graph_nets-master | graph_nets/demos_tf2/__init__.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for `utils_tf_test` in Tensorflow 2.
This provides a base class for tests involving `graphs.GraphsTuple`
containing either numpy or tensorflow data. This base class is populated with
test data and also provides a convenience method for asserting graph equality.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import itertools
from graph_nets import graphs
from graph_nets import utils_np
import numpy as np
import tensorflow as tf
@contextlib.contextmanager
def assert_new_op_prefixes(test, expected_prefix, assert_some_new_ops=True):
"""Asserts the namescope of tf ops created within the context manager."""
ops_before = [n.name for n in tf.get_default_graph().as_graph_def().node]
yield
ops_after = [n.name for n in tf.get_default_graph().as_graph_def().node]
new_ops = set(ops_after) - set(ops_before)
prefix_length = len(expected_prefix)
if assert_some_new_ops:
test.assertNotEqual(0, len(new_ops))
for op_name in new_ops:
test.assertEqual(expected_prefix, op_name[:prefix_length])
def mask_leading_dimension(tensor):
# Place holder masking does not work anymore. Just use the tensor as is.
return tf.constant(tensor, shape=tensor.get_shape().as_list())
class GraphsTest(tf.test.TestCase):
"""A base class for tests that operate on GraphsNP or GraphsTF."""
def _populate_test_data(self, max_size):
"""Populates the class fields with data used for the tests.
This creates a batch of graphs with number of nodes from 0 to `num`,
number of edges ranging from 1 to `num`, plus an empty graph with no nodes
and no edges (so that the total number of graphs is 1 + (num ** (num + 1)).
The nodes states, edges states and global states of the graphs are
created to have different types and shapes.
Those graphs are stored both as dictionaries (in `self.graphs_dicts_in`,
without `n_node` and `n_edge` information, and in `self.graphs_dicts_out`
with these two fields filled), and a corresponding numpy
`graphs.GraphsTuple` is stored in `self.reference_graph`.
Args:
max_size: The maximum number of nodes and edges (inclusive).
"""
filt = lambda x: (x[0] > 0) or (x[1] == 0)
n_node, n_edge = zip(*list(
filter(filt, itertools.product(
range(max_size + 1), range(max_size + 1)))))
graphs_dicts = []
nodes = []
edges = []
receivers = []
senders = []
globals_ = []
def _make_default_state(shape, dtype):
return np.arange(np.prod(shape)).reshape(shape).astype(dtype)
for i, (n_node_, n_edge_) in enumerate(zip(n_node, n_edge)):
n = _make_default_state([n_node_, 7, 11], "f4") + i * 100.
e = _make_default_state([n_edge_, 13, 14], np.float64) + i * 100. + 1000.
r = _make_default_state([n_edge_], np.int32) % n_node[i]
s = (_make_default_state([n_edge_], np.int32) + 1) % n_node[i]
g = _make_default_state([5, 3], "f4") - i * 100. - 1000.
nodes.append(n)
edges.append(e)
receivers.append(r)
senders.append(s)
globals_.append(g)
graphs_dict = dict(nodes=n, edges=e, receivers=r, senders=s, globals=g)
graphs_dicts.append(graphs_dict)
# Graphs dicts without n_node / n_edge (to be used as inputs).
self.graphs_dicts_in = graphs_dicts
# Graphs dicts with n_node / n_node (to be checked against outputs).
self.graphs_dicts_out = []
for dict_ in self.graphs_dicts_in:
completed_dict = dict_.copy()
completed_dict["n_node"] = completed_dict["nodes"].shape[0]
completed_dict["n_edge"] = completed_dict["edges"].shape[0]
self.graphs_dicts_out.append(completed_dict)
# pylint: disable=protected-access
offset = utils_np._compute_stacked_offsets(n_node, n_edge)
# pylint: enable=protected-access
self.reference_graph = graphs.GraphsTuple(**dict(
nodes=np.concatenate(nodes, axis=0),
edges=np.concatenate(edges, axis=0),
receivers=np.concatenate(receivers, axis=0) + offset,
senders=np.concatenate(senders, axis=0) + offset,
globals=np.stack(globals_),
n_node=np.array(n_node),
n_edge=np.array(n_edge)))
def _assert_graph_equals_np(self, graph0, graph, force_edges_ordering=False):
"""Asserts that all the graph fields of graph0 and graph match."""
def silent_convert_to_numpy(tensor):
if isinstance(tensor, tf.Tensor):
return tensor.numpy()
else:
return tensor
graph = graph.map(silent_convert_to_numpy, graphs.ALL_FIELDS)
graph0 = graph0.map(silent_convert_to_numpy, graphs.ALL_FIELDS)
if graph0.nodes is None:
self.assertEqual(None, graph.nodes)
else:
self.assertAllClose(graph0.nodes, graph.nodes)
if graph0.globals is None:
self.assertEqual(None, graph.globals)
else:
self.assertAllClose(graph0.globals, graph.globals)
self.assertAllClose(graph0.n_node, graph.n_node.tolist())
if graph0.receivers is None:
self.assertEqual(None, graph.receivers)
self.assertEqual(None, graph.senders)
self.assertEqual(None, graph.edges)
self.assertAllEqual(graph0.n_edge, graph.n_edge)
return
self.assertAllClose(graph0.n_edge, graph.n_edge.tolist())
if not force_edges_ordering:
self.assertAllClose(graph0.receivers, graph.receivers)
self.assertAllClose(graph0.senders, graph.senders)
if graph0.edges is not None:
self.assertAllClose(graph0.edges, graph.edges)
else:
self.assertEqual(None, graph.edges)
return
# To compare edges content, we need to make sure they appear in the same
# order
if graph0.edges is not None:
sorted_receivers0, sorted_senders0, sorted_content0 = zip(
*sorted(zip(graph0.receivers, graph0.senders, graph0.edges.tolist())))
sorted_receivers, sorted_senders, sorted_content = zip(
*sorted(zip(graph.receivers, graph.senders, graph.edges.tolist())))
self.assertAllClose(sorted_content0, sorted_content)
elif graph.receivers is not None:
sorted_receivers0, sorted_senders0 = zip(
*sorted(zip(graph0.receivers, graph0.senders)))
sorted_receivers, sorted_senders = zip(
*sorted(zip(graph.receivers, graph.senders)))
else:
return
self.assertAllClose(sorted_receivers0, sorted_receivers)
self.assertAllClose(sorted_senders0, sorted_senders)
def setUp(self):
self._populate_test_data(max_size=2)
| graph_nets-master | graph_nets/tests_tf2/test_utils.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Graph networks library tests for TF2."""
| graph_nets-master | graph_nets/tests_tf2/__init__.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for blocks.py in Tensorflow 2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl.testing import parameterized
from graph_nets import blocks
from graph_nets import graphs
from graph_nets import utils_np
from graph_nets import utils_tf
import numpy as np
import sonnet as snt
import tensorflow as tf
SMALL_GRAPH_1 = {
"globals": [1.1, 1.2, 1.3, 1.4],
"nodes": [[10.1, 10.2], [20.1, 20.2], [30.1, 30.2]],
"edges": [[101., 102., 103., 104.], [201., 202., 203., 204.]],
"senders": [0, 1],
"receivers": [1, 2],
}
SMALL_GRAPH_2 = {
"globals": [-1.1, -1.2, -1.3, -1.4],
"nodes": [[-10.1, -10.2], [-20.1, -20.2], [-30.1, -30.2]],
"edges": [[-101., -102., -103., -104.]],
"senders": [1,],
"receivers": [2,],
}
SMALL_GRAPH_3 = {
"globals": [1.1, 1.2, 1.3, 1.4],
"nodes": [[10.1, 10.2], [20.1, 20.2], [30.1, 30.2]],
"edges": [[101., 102., 103., 104.], [201., 202., 203., 204.]],
"senders": [1, 1],
"receivers": [0, 2],
}
SMALL_GRAPH_4 = {
"globals": [1.1, 1.2, 1.3, 1.4],
"nodes": [[10.1, 10.2], [20.1, 20.2], [30.1, 30.2]],
"edges": [[101., 102., 103., 104.], [201., 202., 203., 204.]],
"senders": [0, 2],
"receivers": [1, 1],
}
class GraphModuleTest(tf.test.TestCase, parameterized.TestCase):
"""Base class for all the tests in this file."""
def setUp(self):
super(GraphModuleTest, self).setUp()
tf.random.set_seed(0)
def _get_input_graph(self, none_fields=None):
if none_fields is None:
none_fields = []
input_graph = utils_tf.data_dicts_to_graphs_tuple(
[SMALL_GRAPH_1, SMALL_GRAPH_2, SMALL_GRAPH_3, SMALL_GRAPH_4])
input_graph = input_graph.map(lambda _: None, none_fields)
return input_graph
def _get_shaped_input_graph(self):
return graphs.GraphsTuple(
nodes=tf.zeros([3, 4, 5, 11], dtype=tf.float32),
edges=tf.zeros([5, 4, 5, 12], dtype=tf.float32),
globals=tf.zeros([2, 4, 5, 13], dtype=tf.float32),
receivers=tf.range(5, dtype=tf.int32) // 3,
senders=tf.range(5, dtype=tf.int32) % 3,
n_node=tf.constant([2, 1], dtype=tf.int32),
n_edge=tf.constant([3, 2], dtype=tf.int32),
)
def _assert_build_and_run(self, network, input_graph):
# No error at construction time.
_ = network(input_graph)
BROADCAST_GLOBAL_TO_EDGES = [
[1.1, 1.2, 1.3, 1.4],
[1.1, 1.2, 1.3, 1.4],
[-1.1, -1.2, -1.3, -1.4],
]
BROADCAST_GLOBAL_TO_NODES = [
[1.1, 1.2, 1.3, 1.4],
[1.1, 1.2, 1.3, 1.4],
[1.1, 1.2, 1.3, 1.4],
[-1.1, -1.2, -1.3, -1.4],
[-1.1, -1.2, -1.3, -1.4],
[-1.1, -1.2, -1.3, -1.4],
]
SENDER_NODES_TO_EDGES = [
[10.1, 10.2],
[20.1, 20.2],
[-20.1, -20.2],
]
RECEIVER_NODES_TO_EDGES = [
[20.1, 20.2],
[30.1, 30.2],
[-30.1, -30.2],
]
class BroadcastersTest(GraphModuleTest):
"""Tests for the broadcasters."""
@parameterized.named_parameters(
("globals_to_edges",
blocks.broadcast_globals_to_edges, BROADCAST_GLOBAL_TO_EDGES),
("globals_to_nodes",
blocks.broadcast_globals_to_nodes, BROADCAST_GLOBAL_TO_NODES),
("sender_nodes_to_edges",
blocks.broadcast_sender_nodes_to_edges, SENDER_NODES_TO_EDGES),
("receiver_nodes_to_edges",
blocks.broadcast_receiver_nodes_to_edges, RECEIVER_NODES_TO_EDGES),
)
def test_output_values(self, broadcaster, expected):
"""Test the broadcasted output value."""
input_graph = utils_tf.data_dicts_to_graphs_tuple(
[SMALL_GRAPH_1, SMALL_GRAPH_2])
broadcasted_out = broadcaster(input_graph)
self.assertNDArrayNear(
np.array(expected, dtype=np.float32), broadcasted_out, err=1e-4)
@parameterized.named_parameters(
("globals_to_edges",
blocks.broadcast_globals_to_edges, BROADCAST_GLOBAL_TO_EDGES),
("globals_to_nodes",
blocks.broadcast_globals_to_nodes, BROADCAST_GLOBAL_TO_NODES),
("sender_nodes_to_edges",
blocks.broadcast_sender_nodes_to_edges, SENDER_NODES_TO_EDGES),
("receiver_nodes_to_edges",
blocks.broadcast_receiver_nodes_to_edges, RECEIVER_NODES_TO_EDGES),
)
def test_output_values_larger_rank(self, broadcaster, expected):
"""Test the broadcasted output value."""
input_graph = utils_tf.data_dicts_to_graphs_tuple(
[SMALL_GRAPH_1, SMALL_GRAPH_2])
input_graph = input_graph.map(
lambda v: tf.reshape(v, [v.get_shape().as_list()[0]] + [2, -1]))
broadcasted_out = broadcaster(input_graph)
self.assertNDArrayNear(
np.reshape(np.array(expected, dtype=np.float32),
[len(expected)] + [2, -1]),
broadcasted_out,
err=1e-4)
@parameterized.named_parameters(
("globals_to_edges_no_globals",
blocks.broadcast_globals_to_edges, ("globals",)),
("globals_to_nodes_no_globals",
blocks.broadcast_globals_to_nodes, ("globals",)),
("sender_nodes_to_edges_none_nodes",
blocks.broadcast_sender_nodes_to_edges, ("nodes",)),
("sender_nodes_to_edges_none_senders",
blocks.broadcast_sender_nodes_to_edges,
("edges", "senders", "receivers")),
("receiver_nodes_to_edges_none_nodes",
blocks.broadcast_receiver_nodes_to_edges, ("nodes",)),
)
def test_missing_field_raises_exception(self, broadcaster, none_fields):
"""Test that an error is raised if a required field is `None`."""
input_graph = self._get_input_graph(none_fields)
with self.assertRaisesRegex(
ValueError, "field cannot be None when broadcasting"):
broadcaster(input_graph)
class ReducersTest(GraphModuleTest):
"""Tests for the reducers."""
@parameterized.parameters(
(blocks.unsorted_segment_min_or_zero,
[[0., 0.],
[0.1, -0.1],
[0.2, -0.3],
[0.4, -0.6],
[0.7, -1.],
[0.9, -0.9],
[0., 0.]]),
(blocks.unsorted_segment_max_or_zero,
[[0., 0.],
[0.1, -0.1],
[0.3, -0.2],
[0.6, -0.4],
[1., -0.7],
[0.9, -0.9],
[0., 0.]]),
)
def test_output_values(self, reducer, expected_values):
input_values_np = np.array([[0.1, -0.1],
[0.2, -0.2],
[0.3, -0.3],
[0.4, -0.4],
[0.5, -0.5],
[0.6, -0.6],
[0.7, -0.7],
[0.8, -0.8],
[0.9, -0.9],
[1., -1.]], dtype=np.float32)
input_indices_np = np.array([1, 2, 2, 3, 3, 3, 4, 4, 5, 4], dtype=np.int32)
num_groups_np = np.array(7, dtype=np.int32)
input_indices = tf.constant(input_indices_np, dtype=tf.int32)
input_values = tf.constant(input_values_np, dtype=tf.float32)
num_groups = tf.constant(num_groups_np, dtype=tf.int32)
reduced_out = reducer(input_values, input_indices, num_groups)
self.assertNDArrayNear(
np.array(expected_values, dtype=np.float32), reduced_out, err=1e-4)
SEGMENT_SUM_EDGES_TO_GLOBALS = [
[302., 304., 306., 308.],
[-101., -102., -103., -104.],
[302., 304., 306., 308.],
[302., 304., 306., 308.],
]
SEGMENT_SUM_NODES_TO_GLOBALS = [
[60.3, 60.6],
[-60.3, -60.6],
[60.3, 60.6],
[60.3, 60.6],
]
SEGMENT_SUM_SENT_EDGES_TO_NODES = [
[101., 102., 103., 104.],
[201., 202., 203., 204.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[-101., -102., -103., -104.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[302., 304., 306., 308.],
[0., 0., 0., 0.,],
[101., 102., 103., 104.],
[0., 0., 0., 0.],
[201., 202., 203., 204.],
]
SEGMENT_SUM_RECEIVED_EDGES_TO_NODES = [
[0., 0., 0., 0.],
[101., 102., 103., 104.],
[201., 202., 203., 204.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[-101., -102., -103., -104.],
[101., 102., 103., 104.],
[0., 0., 0., 0.],
[201., 202., 203., 204.],
[0., 0., 0., 0.],
[302., 304., 306., 308,],
[0., 0., 0., 0.],
]
class FieldAggregatorsTest(GraphModuleTest):
@parameterized.named_parameters(
("edges_to_globals",
blocks.EdgesToGlobalsAggregator(tf.math.unsorted_segment_sum),
SEGMENT_SUM_EDGES_TO_GLOBALS,),
("nodes_to_globals",
blocks.NodesToGlobalsAggregator(tf.math.unsorted_segment_sum),
SEGMENT_SUM_NODES_TO_GLOBALS,),
("sent_edges_to_nodes",
blocks.SentEdgesToNodesAggregator(tf.math.unsorted_segment_sum),
SEGMENT_SUM_SENT_EDGES_TO_NODES,),
("received_edges_to_nodes",
blocks.ReceivedEdgesToNodesAggregator(tf.math.unsorted_segment_sum),
SEGMENT_SUM_RECEIVED_EDGES_TO_NODES),
)
def test_output_values(self, aggregator, expected):
input_graph = self._get_input_graph()
aggregated_out = aggregator(input_graph)
self.assertNDArrayNear(
np.array(expected, dtype=np.float32), aggregated_out, err=1e-4)
@parameterized.named_parameters(
("edges_to_globals",
blocks.EdgesToGlobalsAggregator(tf.math.unsorted_segment_sum),
SEGMENT_SUM_EDGES_TO_GLOBALS,),
("nodes_to_globals",
blocks.NodesToGlobalsAggregator(tf.math.unsorted_segment_sum),
SEGMENT_SUM_NODES_TO_GLOBALS,),
("sent_edges_to_nodes",
blocks.SentEdgesToNodesAggregator(tf.math.unsorted_segment_sum),
SEGMENT_SUM_SENT_EDGES_TO_NODES,),
("received_edges_to_nodes",
blocks.ReceivedEdgesToNodesAggregator(tf.math.unsorted_segment_sum),
SEGMENT_SUM_RECEIVED_EDGES_TO_NODES),
)
def test_output_values_larger_rank(self, aggregator, expected):
input_graph = self._get_input_graph()
input_graph = input_graph.map(
lambda v: tf.reshape(v, [v.get_shape().as_list()[0]] + [2, -1]))
aggregated_out = aggregator(input_graph)
self.assertNDArrayNear(
np.reshape(np.array(expected, dtype=np.float32),
[len(expected)] + [2, -1]),
aggregated_out,
err=1e-4)
@parameterized.named_parameters(
("received edges to nodes missing edges",
blocks.ReceivedEdgesToNodesAggregator, "edges"),
("sent edges to nodes missing edges",
blocks.SentEdgesToNodesAggregator, "edges"),
("nodes to globals missing nodes",
blocks.NodesToGlobalsAggregator, "nodes"),
("edges to globals missing nodes",
blocks.EdgesToGlobalsAggregator, "edges"),)
def test_missing_field_raises_exception(self, constructor, none_field):
"""Tests that aggregator fail if a required field is missing."""
input_graph = self._get_input_graph([none_field])
with self.assertRaisesRegex(ValueError, none_field):
constructor(tf.math.unsorted_segment_sum)(input_graph)
@parameterized.named_parameters(
("received edges to nodes missing nodes and globals",
blocks.ReceivedEdgesToNodesAggregator, ["nodes", "globals"]),
("sent edges to nodes missing nodes and globals",
blocks.SentEdgesToNodesAggregator, ["nodes", "globals"]),
("nodes to globals missing edges and globals",
blocks.NodesToGlobalsAggregator,
["edges", "receivers", "senders", "globals"]),
("edges to globals missing globals",
blocks.EdgesToGlobalsAggregator, ["globals"]),
)
def test_unused_field_can_be_none(self, constructor, none_fields):
"""Tests that aggregator fail if a required field is missing."""
input_graph = self._get_input_graph(none_fields)
constructor(tf.math.unsorted_segment_sum)(input_graph)
class EdgeBlockTest(GraphModuleTest):
def setUp(self):
super(EdgeBlockTest, self).setUp()
self._scale = 10.
self._edge_model_fn = lambda: lambda features: features * self._scale
model_args = lambda features, scale, offset: features * scale + offset
self._edge_model_args_fn = lambda: model_args
@parameterized.named_parameters(
("all inputs", True, True, True, True),
("edges nodes only", True, False, False, False),
("receiver nodes only", False, True, False, False),
("sender nodes only", False, False, True, False),
("globals only", False, False, False, True),
("edges and sender nodes", True, False, True, False),
("receiver nodes and globals", False, True, False, True),
)
def test_output_values(
self, use_edges, use_receiver_nodes, use_sender_nodes, use_globals):
"""Compares the output of an EdgeBlock to an explicit computation."""
input_graph = self._get_input_graph()
edge_block = blocks.EdgeBlock(
edge_model_fn=self._edge_model_fn,
use_edges=use_edges,
use_receiver_nodes=use_receiver_nodes,
use_sender_nodes=use_sender_nodes,
use_globals=use_globals)
output_graph_out = edge_block(input_graph)
model_inputs = []
if use_edges:
model_inputs.append(input_graph.edges)
if use_receiver_nodes:
model_inputs.append(blocks.broadcast_receiver_nodes_to_edges(input_graph))
if use_sender_nodes:
model_inputs.append(blocks.broadcast_sender_nodes_to_edges(input_graph))
if use_globals:
model_inputs.append(blocks.broadcast_globals_to_edges(input_graph))
model_inputs = tf.concat(model_inputs, axis=-1)
self.assertIs(input_graph.nodes, output_graph_out.nodes)
self.assertIs(input_graph.globals, output_graph_out.globals)
expected_output_edges = model_inputs * self._scale
self.assertNDArrayNear(
expected_output_edges.numpy(), output_graph_out.edges.numpy(), err=1e-4)
@parameterized.named_parameters(
("only scaling", 2, 0),
("only offsetting", 0, 2),
("scaling and offsetting", 2, 2),
("without scaling and offsetting", 1, 0),
)
def test_optional_arguments(self, scale, offset):
"""Assesses the correctness of the EdgeBlock using arguments."""
input_graph = self._get_input_graph()
edge_block = blocks.EdgeBlock(edge_model_fn=self._edge_model_args_fn)
output_graph_out = edge_block(
input_graph, edge_model_kwargs=dict(scale=scale, offset=offset))
fixed_scale = scale
fixed_offset = offset
model_fn = lambda: lambda features: features * fixed_scale + fixed_offset
hardcoded_edge_block = blocks.EdgeBlock(edge_model_fn=model_fn)
expected_graph_out = hardcoded_edge_block(input_graph)
self.assertIs(expected_graph_out.nodes, output_graph_out.nodes)
self.assertIs(expected_graph_out.globals, output_graph_out.globals)
self.assertNDArrayNear(
expected_graph_out.edges.numpy(),
output_graph_out.edges.numpy(),
err=1e-4)
@parameterized.named_parameters(
("all inputs", True, True, True, True, 12),
("edges only", True, False, False, False, 4),
("receivers only", False, True, False, False, 2),
("senders only", False, False, True, False, 2),
("globals only", False, False, False, True, 4),
)
def test_created_variables(self,
use_edges, use_receiver_nodes, use_sender_nodes,
use_globals, expected_first_dim_w):
"""Verifies the variable names and shapes created by an EdgeBlock."""
output_size = 10
expected_var_shapes_dict = {
"edge_block/mlp/linear_0/b:0": [output_size],
"edge_block/mlp/linear_0/w:0": [expected_first_dim_w, output_size]}
input_graph = self._get_input_graph()
edge_block = blocks.EdgeBlock(
edge_model_fn=functools.partial(snt.nets.MLP,
output_sizes=[output_size]),
use_edges=use_edges,
use_receiver_nodes=use_receiver_nodes,
use_sender_nodes=use_sender_nodes,
use_globals=use_globals)
edge_block(input_graph)
variables = edge_block.variables
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("missing node (receivers only)", False, True, False, False, ("nodes",)),
("missing node (senders only)", False, False, True, False, ("nodes",)),
("missing edge data", True, False, False, False, ("edges",)),
("missing edges (but no edge consumption)", False, True, True, False,
("edges", "senders", "receivers")),
("missing globals", False, False, False, True, ("globals",)),
)
def test_missing_field_raises_exception(
self, use_edges, use_receiver_nodes, use_sender_nodes, use_globals,
none_fields):
"""Checks that missing a required field raises an exception."""
input_graph = self._get_input_graph(none_fields)
edge_block = blocks.EdgeBlock(
edge_model_fn=self._edge_model_fn,
use_edges=use_edges,
use_receiver_nodes=use_receiver_nodes,
use_sender_nodes=use_sender_nodes,
use_globals=use_globals)
with self.assertRaisesRegex(ValueError, "field cannot be None"):
edge_block(input_graph)
def test_compatible_higher_rank_no_raise(self):
"""No exception should occur with higher ranks tensors."""
input_graph = self._get_shaped_input_graph()
input_graph = input_graph.map(lambda v: tf.transpose(v, [0, 2, 1, 3]))
network = blocks.EdgeBlock(
functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]))
self._assert_build_and_run(network, input_graph)
@parameterized.named_parameters(
("mismatched edges and r. nodes", True, True, False, False, "nodes"),
("mismatched edges and s. nodes", True, False, True, False, "nodes"),
("mismatched edges and globals", True, False, False, True, "edges"),
("mismatched nodes and globals", False, True, True, True, "globals"),
)
def test_incompatible_higher_rank_inputs_raises(self,
use_edges,
use_receiver_nodes,
use_sender_nodes,
use_globals,
field):
"""A exception should be raised if the inputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
input_graph = input_graph.replace(
**{field: tf.transpose(getattr(input_graph, field), [0, 2, 1, 3])})
network = blocks.EdgeBlock(
functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]),
use_edges=use_edges,
use_receiver_nodes=use_receiver_nodes,
use_sender_nodes=use_sender_nodes,
use_globals=use_globals
)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError, "Dimensions of inputs should match"):
network(input_graph)
@parameterized.named_parameters(
("mismatched nodes", True, False, False, True, "nodes"),
("mismatched edges", False, True, True, True, "edges"),
("mismatched globals", True, True, True, False, "globals"),
)
def test_incompatible_higher_rank_inputs_no_raise(self,
use_edges,
use_receiver_nodes,
use_sender_nodes,
use_globals,
field):
"""No exception should occur if a differently shapped field is not used."""
input_graph = self._get_shaped_input_graph()
input_graph = input_graph.replace(
**{field: tf.transpose(getattr(input_graph, field), [0, 2, 1, 3])})
network = blocks.EdgeBlock(
functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]),
use_edges=use_edges,
use_receiver_nodes=use_receiver_nodes,
use_sender_nodes=use_sender_nodes,
use_globals=use_globals
)
self._assert_build_and_run(network, input_graph)
@parameterized.named_parameters(
("no edges", False, True, True, "edges"),
("no nodes", True, False, True, "nodes"),
("no globals", True, True, False, "globals"),
)
def test_unused_field_can_be_none(
self, use_edges, use_nodes, use_globals, none_field):
"""Checks that computation can handle non-necessary fields left None."""
input_graph = self._get_input_graph([none_field])
edge_block = blocks.EdgeBlock(
edge_model_fn=self._edge_model_fn,
use_edges=use_edges,
use_receiver_nodes=use_nodes,
use_sender_nodes=use_nodes,
use_globals=use_globals)
output_graph = edge_block(input_graph)
model_inputs = []
if use_edges:
model_inputs.append(input_graph.edges)
if use_nodes:
model_inputs.append(blocks.broadcast_receiver_nodes_to_edges(input_graph))
model_inputs.append(blocks.broadcast_sender_nodes_to_edges(input_graph))
if use_globals:
model_inputs.append(blocks.broadcast_globals_to_edges(input_graph))
model_inputs = tf.concat(model_inputs, axis=-1)
self.assertIs(input_graph.nodes, output_graph.nodes)
self.assertIs(input_graph.globals, output_graph.globals)
actual_edges = output_graph.edges.numpy()
model_inputs_out = model_inputs.numpy()
expected_output_edges = model_inputs_out * self._scale
self.assertNDArrayNear(expected_output_edges, actual_edges, err=1e-4)
def test_no_input_raises_exception(self):
"""Checks that receiving no input raises an exception."""
with self.assertRaisesRegex(ValueError, "At least one of "):
blocks.EdgeBlock(
edge_model_fn=self._edge_model_fn,
use_edges=False,
use_receiver_nodes=False,
use_sender_nodes=False,
use_globals=False)
class NodeBlockTest(GraphModuleTest):
def setUp(self):
super(NodeBlockTest, self).setUp()
self._scale = 10.
self._node_model_fn = lambda: lambda features: features * self._scale
model_args = lambda features, scale, offset: features * scale + offset
self._node_model_args_fn = lambda: model_args
@parameterized.named_parameters(
("all inputs, custom reductions", True, True, True, True,
tf.math.unsorted_segment_sum, tf.math.unsorted_segment_mean),
("received edges only, blocks reducer",
True, False, False, False, blocks.unsorted_segment_max_or_zero, None),
("sent edges only, custom reduction",
False, True, False, False, None, tf.math.unsorted_segment_prod),
("nodes only",
False, False, True, False, None, None),
("globals only",
False, False, False, True, None, None),
("received edges and nodes, custom reductions",
True, False, True, False,
blocks.unsorted_segment_min_or_zero, tf.math.unsorted_segment_prod),
("sent edges and globals, custom reduction",
False, True, False, True, None, blocks.unsorted_segment_min_or_zero),
)
def test_output_values(
self, use_received_edges, use_sent_edges, use_nodes,
use_globals, received_edges_reducer, sent_edges_reducer):
"""Compares the output of a NodeBlock to an explicit computation."""
input_graph = self._get_input_graph()
node_block = blocks.NodeBlock(
node_model_fn=self._node_model_fn,
use_received_edges=use_received_edges,
use_sent_edges=use_sent_edges,
use_nodes=use_nodes,
use_globals=use_globals,
received_edges_reducer=received_edges_reducer,
sent_edges_reducer=sent_edges_reducer)
output_graph = node_block(input_graph)
model_inputs = []
if use_received_edges:
model_inputs.append(
blocks.ReceivedEdgesToNodesAggregator(
received_edges_reducer)(input_graph))
if use_sent_edges:
model_inputs.append(
blocks.SentEdgesToNodesAggregator(sent_edges_reducer)(input_graph))
if use_nodes:
model_inputs.append(input_graph.nodes)
if use_globals:
model_inputs.append(blocks.broadcast_globals_to_nodes(input_graph))
model_inputs = tf.concat(model_inputs, axis=-1)
self.assertIs(input_graph.edges, output_graph.edges)
self.assertIs(input_graph.globals, output_graph.globals)
output_graph_out = utils_tf.nest_to_numpy(output_graph)
model_inputs_out = model_inputs
expected_output_nodes = model_inputs_out * self._scale
self.assertNDArrayNear(
expected_output_nodes, output_graph_out.nodes, err=1e-4)
@parameterized.named_parameters(
("only scaling", 2, 0),
("only offsetting", 0, 2),
("scaling and offsetting", 2, 2),
("without scaling and offsetting", 1, 0),
)
def test_optional_arguments(self, scale, offset):
"""Assesses the correctness of the NodeBlock using arguments."""
input_graph = self._get_input_graph()
node_block = blocks.NodeBlock(node_model_fn=self._node_model_args_fn)
output_graph_out = node_block(
input_graph, node_model_kwargs=dict(scale=scale, offset=offset))
fixed_scale = scale
fixed_offset = offset
model_fn = lambda: lambda features: features * fixed_scale + fixed_offset
hardcoded_node_block = blocks.NodeBlock(node_model_fn=model_fn)
expected_graph_out = hardcoded_node_block(input_graph)
self.assertIs(expected_graph_out.edges, output_graph_out.edges)
self.assertIs(expected_graph_out.globals, output_graph_out.globals)
self.assertNDArrayNear(
expected_graph_out.nodes.numpy(),
output_graph_out.nodes.numpy(),
err=1e-4)
@parameterized.named_parameters(
("all inputs", True, True, True, True, 14),
("received edges only", True, False, False, False, 4),
("sent edges only", False, True, False, False, 4),
("nodes only", False, False, True, False, 2),
("globals only", False, False, False, True, 4),
)
def test_created_variables(self,
use_received_edges, use_sent_edges, use_nodes,
use_globals, expected_first_dim_w):
"""Verifies the variable names and shapes created by a NodeBlock."""
output_size = 10
expected_var_shapes_dict = {
"node_block/mlp/linear_0/b:0": [output_size],
"node_block/mlp/linear_0/w:0": [expected_first_dim_w, output_size]}
input_graph = self._get_input_graph()
node_block = blocks.NodeBlock(
node_model_fn=functools.partial(snt.nets.MLP,
output_sizes=[output_size]),
use_received_edges=use_received_edges,
use_sent_edges=use_sent_edges,
use_nodes=use_nodes,
use_globals=use_globals)
node_block(input_graph)
variables = node_block.variables
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("missing nodes", False, False, True, False, ("nodes",)),
("missing edge data (receivers only)",
True, False, False, False, ("edges",)),
("missing edge data (senders only)",
False, True, False, False, ("edges",)),
("missing globals", False, False, False, True, ("globals",)),
)
def test_missing_field_raises_exception(
self, use_received_edges, use_sent_edges, use_nodes, use_globals,
none_fields):
"""Checks that missing a required field raises an exception."""
input_graph = self._get_input_graph(none_fields)
node_block = blocks.NodeBlock(
node_model_fn=self._node_model_fn,
use_received_edges=use_received_edges,
use_sent_edges=use_sent_edges,
use_nodes=use_nodes,
use_globals=use_globals)
with self.assertRaisesRegex(ValueError, "field cannot be None"):
node_block(input_graph)
@parameterized.named_parameters(
("no received edges reducer", True, False, None,
tf.math.unsorted_segment_sum),
("no sent edges reducer", False, True, tf.math.unsorted_segment_sum,
None),
)
def test_missing_aggregation_raises_exception(
self, use_received_edges, use_sent_edges,
received_edges_reducer, sent_edges_reducer):
"""Checks that missing a required aggregation argument raises an error."""
with self.assertRaisesRegex(ValueError, "should not be None"):
blocks.NodeBlock(
node_model_fn=self._node_model_fn,
use_received_edges=use_received_edges,
use_sent_edges=use_sent_edges,
use_nodes=False,
use_globals=False,
received_edges_reducer=received_edges_reducer,
sent_edges_reducer=sent_edges_reducer)
def test_compatible_higher_rank_no_raise(self):
"""No exception should occur with higher ranks tensors."""
input_graph = self._get_shaped_input_graph()
input_graph = input_graph.map(lambda v: tf.transpose(v, [0, 2, 1, 3]))
network = blocks.NodeBlock(
functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]))
self._assert_build_and_run(network, input_graph)
@parameterized.named_parameters(
("mismatched nodes and r. edges", True, False, True, False, "edges"),
("mismatched nodes and s. edges", True, False, True, False, "edges"),
("mismatched edges and globals", True, False, False, True, "globals"),
("mismatched nodes and globals", False, True, True, True, "globals"),
)
def test_incompatible_higher_rank_inputs_raises(self,
use_received_edges,
use_sent_edges,
use_nodes,
use_globals,
field):
"""A exception should be raised if the inputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
input_graph = input_graph.replace(
**{field: tf.transpose(getattr(input_graph, field), [0, 2, 1, 3])})
network = blocks.NodeBlock(
functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]),
use_received_edges=use_received_edges,
use_sent_edges=use_sent_edges,
use_nodes=use_nodes,
use_globals=use_globals
)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError,
"Dimensions of inputs should match"):
network(input_graph)
@parameterized.named_parameters(
("mismatched nodes", True, True, False, True, "nodes"),
("mismatched edges", False, False, True, True, "edges"),
("mismatched globals", True, True, True, False, "globals"),
)
def test_incompatible_higher_rank_inputs_no_raise(self,
use_received_edges,
use_sent_edges,
use_nodes,
use_globals,
field):
"""No exception should occur if a differently shapped field is not used."""
input_graph = self._get_shaped_input_graph()
input_graph = input_graph.replace(
**{field: tf.transpose(getattr(input_graph, field), [0, 2, 1, 3])})
network = blocks.NodeBlock(
functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]),
use_received_edges=use_received_edges,
use_sent_edges=use_sent_edges,
use_nodes=use_nodes,
use_globals=use_globals
)
self._assert_build_and_run(network, input_graph)
@parameterized.named_parameters(
("no edges", False, True, True, "edges"),
("no nodes", True, False, True, "nodes"),
("no globals", True, True, False, "globals"),
)
def test_unused_field_can_be_none(
self, use_edges, use_nodes, use_globals, none_field):
"""Checks that computation can handle non-necessary fields left None."""
input_graph = self._get_input_graph([none_field])
node_block = blocks.NodeBlock(
node_model_fn=self._node_model_fn,
use_received_edges=use_edges,
use_sent_edges=use_edges,
use_nodes=use_nodes,
use_globals=use_globals)
output_graph = node_block(input_graph)
model_inputs = []
if use_edges:
model_inputs.append(
blocks.ReceivedEdgesToNodesAggregator(
tf.math.unsorted_segment_sum)(input_graph))
model_inputs.append(
blocks.SentEdgesToNodesAggregator(
tf.math.unsorted_segment_sum)(input_graph))
if use_nodes:
model_inputs.append(input_graph.nodes)
if use_globals:
model_inputs.append(blocks.broadcast_globals_to_nodes(input_graph))
model_inputs = tf.concat(model_inputs, axis=-1)
self.assertIs(input_graph.edges, output_graph.edges)
self.assertIs(input_graph.globals, output_graph.globals)
actual_nodes = output_graph.nodes.numpy()
model_inputs_out = model_inputs.numpy()
expected_output_nodes = model_inputs_out * self._scale
self.assertNDArrayNear(expected_output_nodes, actual_nodes, err=1e-4)
def test_no_input_raises_exception(self):
"""Checks that receiving no input raises an exception."""
with self.assertRaisesRegex(ValueError, "At least one of "):
blocks.NodeBlock(
node_model_fn=self._node_model_fn,
use_received_edges=False,
use_sent_edges=False,
use_nodes=False,
use_globals=False)
class GlobalBlockTest(GraphModuleTest):
"""Tests for the GlobalBlock."""
def setUp(self):
super(GlobalBlockTest, self).setUp()
self._scale = 10.
self._global_model_fn = lambda: lambda features: features * self._scale
model_args = lambda features, scale, offset: features * scale + offset
self._global_model_args_fn = lambda: model_args
@parameterized.named_parameters(
("all_inputs, custom reductions",
True, True, True, tf.math.unsorted_segment_sum,
tf.math.unsorted_segment_mean),
("edges only, blocks reducer",
True, False, False, blocks.unsorted_segment_max_or_zero, None),
("nodes only, custom reduction",
False, True, False, None, tf.math.unsorted_segment_prod),
("globals only",
False, False, True, None, None),
("edges and nodes, blocks reducer",
True, True, False, blocks.unsorted_segment_min_or_zero,
tf.math.unsorted_segment_prod),
("nodes and globals, blocks reducer",
False, True, True, None, blocks.unsorted_segment_min_or_zero),
)
def test_output_values(
self, use_edges, use_nodes, use_globals, edges_reducer, nodes_reducer):
"""Compares the output of a GlobalBlock to an explicit computation."""
input_graph = self._get_input_graph()
global_block = blocks.GlobalBlock(
global_model_fn=self._global_model_fn,
use_edges=use_edges,
use_nodes=use_nodes,
use_globals=use_globals,
edges_reducer=edges_reducer,
nodes_reducer=nodes_reducer)
output_graph = global_block(input_graph)
model_inputs = []
if use_edges:
model_inputs.append(
blocks.EdgesToGlobalsAggregator(edges_reducer)(input_graph))
if use_nodes:
model_inputs.append(
blocks.NodesToGlobalsAggregator(nodes_reducer)(input_graph))
if use_globals:
model_inputs.append(input_graph.globals)
model_inputs = tf.concat(model_inputs, axis=-1)
self.assertIs(input_graph.edges, output_graph.edges)
self.assertIs(input_graph.nodes, output_graph.nodes)
output_graph_out = utils_tf.nest_to_numpy(output_graph)
model_inputs_out = model_inputs
expected_output_globals = model_inputs_out * self._scale
self.assertNDArrayNear(
expected_output_globals, output_graph_out.globals, err=1e-4)
@parameterized.named_parameters(
("only scaling", 2, 0),
("only offsetting", 0, 2),
("scaling and offsetting", 2, 2),
("without scaling and offsetting", 1, 0),
)
def test_optional_arguments(self, scale, offset):
"""Assesses the correctness of the GlobalBlock using arguments."""
input_graph = self._get_input_graph()
global_block = blocks.GlobalBlock(
global_model_fn=self._global_model_args_fn)
output_graph_out = global_block(
input_graph, global_model_kwargs=dict(scale=scale, offset=offset))
fixed_scale = scale
fixed_offset = offset
model_fn = lambda: lambda features: features * fixed_scale + fixed_offset
hardcoded_global_block = blocks.GlobalBlock(global_model_fn=model_fn)
expected_graph_out = hardcoded_global_block(input_graph)
self.assertIs(expected_graph_out.edges, output_graph_out.edges)
self.assertIs(expected_graph_out.nodes, output_graph_out.nodes)
self.assertNDArrayNear(
expected_graph_out.globals.numpy(),
output_graph_out.globals.numpy(),
err=1e-4)
@parameterized.named_parameters(
("default", True, True, True, 10),
("use edges only", True, False, False, 4),
("use nodes only", False, True, False, 2),
("use globals only", False, False, True, 4),
)
def test_created_variables(self, use_edges, use_nodes,
use_globals, expected_first_dim_w):
"""Verifies the variable names and shapes created by a GlobalBlock."""
output_size = 10
expected_var_shapes_dict = {
"global_block/mlp/linear_0/b:0": [output_size],
"global_block/mlp/linear_0/w:0": [expected_first_dim_w, output_size]}
input_graph = self._get_input_graph()
global_block = blocks.GlobalBlock(
global_model_fn=functools.partial(snt.nets.MLP,
output_sizes=[output_size]),
use_edges=use_edges,
use_nodes=use_nodes,
use_globals=use_globals)
global_block(input_graph)
variables = global_block.variables
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("missing edges", True, False, False, "edges"),
("missing nodes", False, True, False, "nodes"),
("missing globals", False, False, True, "globals"),
)
def test_missing_field_raises_exception(
self, use_edges, use_nodes, use_globals, none_field):
"""Checks that missing a required field raises an exception."""
input_graph = self._get_input_graph([none_field])
global_block = blocks.GlobalBlock(
global_model_fn=self._global_model_fn,
use_edges=use_edges,
use_nodes=use_nodes,
use_globals=use_globals)
with self.assertRaisesRegex(ValueError, "field cannot be None"):
global_block(input_graph)
@parameterized.named_parameters(
("no edges", False, True, True, "edges"),
("no nodes", True, False, True, "nodes"),
("no globals", True, True, False, "globals"),
)
def test_unused_field_can_be_none(
self, use_edges, use_nodes, use_globals, none_field):
"""Checks that computation can handle non-necessary fields left None."""
input_graph = self._get_input_graph([none_field])
global_block = blocks.GlobalBlock(
global_model_fn=self._global_model_fn,
use_edges=use_edges,
use_nodes=use_nodes,
use_globals=use_globals)
output_graph = global_block(input_graph)
model_inputs = []
if use_edges:
model_inputs.append(
blocks.EdgesToGlobalsAggregator(
tf.math.unsorted_segment_sum)(input_graph))
if use_nodes:
model_inputs.append(
blocks.NodesToGlobalsAggregator(
tf.math.unsorted_segment_sum)(input_graph))
if use_globals:
model_inputs.append(input_graph.globals)
model_inputs = tf.concat(model_inputs, axis=-1)
self.assertIs(input_graph.edges, output_graph.edges)
self.assertIs(input_graph.nodes, output_graph.nodes)
actual_globals = output_graph.globals.numpy()
model_inputs_out = model_inputs
expected_output_globals = model_inputs_out * self._scale
self.assertNDArrayNear(expected_output_globals, actual_globals, err=1e-4)
def test_compatible_higher_rank_no_raise(self):
"""No exception should occur with higher ranks tensors."""
input_graph = self._get_shaped_input_graph()
input_graph = input_graph.map(lambda v: tf.transpose(v, [0, 2, 1, 3]))
network = blocks.GlobalBlock(
functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]))
self._assert_build_and_run(network, input_graph)
@parameterized.named_parameters(
("mismatched nodes and edges", True, True, False, "edges"),
("mismatched edges and globals", True, False, True, "globals"),
("mismatched nodes and globals", False, True, True, "globals"),
)
def test_incompatible_higher_rank_inputs_raises(self,
use_edges,
use_nodes,
use_globals,
field):
"""A exception should be raised if the inputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
input_graph = input_graph.replace(
**{field: tf.transpose(getattr(input_graph, field), [0, 2, 1, 3])})
network = blocks.GlobalBlock(
functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]),
use_edges=use_edges,
use_nodes=use_nodes,
use_globals=use_globals
)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError,
"Dimensions of inputs should match"):
network(input_graph)
@parameterized.named_parameters(
("mismatched nodes", True, False, True, "nodes"),
("mismatched edges", False, True, True, "edges"),
("mismatched globals", True, True, False, "globals"),
)
def test_incompatible_higher_rank_inputs_no_raise(self,
use_edges,
use_nodes,
use_globals,
field):
"""No exception should occur if a differently shapped field is not used."""
input_graph = self._get_shaped_input_graph()
input_graph = input_graph.replace(
**{field: tf.transpose(getattr(input_graph, field), [0, 2, 1, 3])})
network = blocks.GlobalBlock(
functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]),
use_edges=use_edges,
use_nodes=use_nodes,
use_globals=use_globals
)
self._assert_build_and_run(network, input_graph)
def test_no_input_raises_exception(self):
"""Checks that receiving no input raises an exception."""
with self.assertRaisesRegex(ValueError, "At least one of "):
blocks.GlobalBlock(
global_model_fn=self._global_model_fn,
use_edges=False,
use_nodes=False,
use_globals=False)
@parameterized.named_parameters(
("missing edges reducer", True, False, None,
tf.math.unsorted_segment_sum),
("missing nodes reducer", False, True, tf.math.unsorted_segment_sum,
None),
)
def test_missing_aggregation_raises_exception(
self, use_edges, use_nodes, edges_reducer,
nodes_reducer):
"""Checks that missing a required aggregation argument raises an error."""
with self.assertRaisesRegex(ValueError, "should not be None"):
blocks.GlobalBlock(
global_model_fn=self._global_model_fn,
use_edges=use_edges,
use_nodes=use_nodes,
use_globals=False,
edges_reducer=edges_reducer,
nodes_reducer=nodes_reducer)
class CommonBlockTests(GraphModuleTest):
"""Tests that are common to the EdgeBlock, NodeBlock and GlobalBlock."""
@parameterized.named_parameters(
("edge block", blocks.EdgeBlock),
("node block", blocks.NodeBlock),
("global block", blocks.GlobalBlock),
)
def test_dynamic_batch_sizes(self, block_constructor):
"""Checks that all batch sizes are as expected through a GraphNetwork."""
# Remove all placeholders from here, these are unnecessary in tf2.
input_graph = utils_np.data_dicts_to_graphs_tuple(
[SMALL_GRAPH_1, SMALL_GRAPH_2])
input_graph = input_graph.map(tf.constant, fields=graphs.ALL_FIELDS)
model = block_constructor(
functools.partial(snt.nets.MLP, output_sizes=[10]))
output = model(input_graph)
actual = utils_tf.nest_to_numpy(output)
for k, v in input_graph._asdict().items():
self.assertEqual(v.shape[0], getattr(actual, k).shape[0])
@parameterized.named_parameters(
("float64 data, edge block", tf.float64, tf.int32, blocks.EdgeBlock),
("int64 indices, edge block", tf.float32, tf.int64, blocks.EdgeBlock),
("float64 data, node block", tf.float64, tf.int32, blocks.NodeBlock),
("int64 indices, node block", tf.float32, tf.int64, blocks.NodeBlock),
("float64 data, global block", tf.float64, tf.int32, blocks.GlobalBlock),
("int64 indices, global block", tf.float32, tf.int64, blocks.GlobalBlock),
)
def test_dtypes(self, data_dtype, indices_dtype, block_constructor):
"""Checks that all the output types are as expected for blocks."""
input_graph = self._get_input_graph()
input_graph = input_graph.map(lambda v: tf.cast(v, data_dtype),
["nodes", "edges", "globals"])
input_graph = input_graph.map(lambda v: tf.cast(v, indices_dtype),
["receivers", "senders"])
model = block_constructor(
functools.partial(snt.nets.MLP, output_sizes=[10]))
output = model(input_graph)
for field in ["nodes", "globals", "edges"]:
self.assertEqual(data_dtype, getattr(output, field).dtype)
for field in ["receivers", "senders"]:
self.assertEqual(indices_dtype, getattr(output, field).dtype)
if __name__ == "__main__":
tf.test.main()
| graph_nets-master | graph_nets/tests_tf2/blocks_test.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for modules.py in Tensorflow 2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl.testing import parameterized
from graph_nets import blocks
from graph_nets import graphs
from graph_nets import modules
from graph_nets import utils_np
from graph_nets import utils_tf
import numpy as np
import sonnet as snt
import tensorflow as tf
SMALL_GRAPH_1 = {
"globals": [1.1, 1.2, 1.3],
"nodes": [[10.1, 10.2], [20.1, 20.2], [30.1, 30.2]],
"edges": [[101., 102., 103., 104.], [201., 202., 203., 204.]],
"senders": [0, 1],
"receivers": [1, 2],
}
SMALL_GRAPH_2 = {
"globals": [-1.1, -1.2, -1.3],
"nodes": [[-10.1, -10.2], [-20.1, -20.2], [-30.1, -30.2]],
"edges": [[-101., -102., -103., -104.]],
"senders": [1,],
"receivers": [2,],
}
SMALL_GRAPH_3 = {
"globals": [1.1, 1.2, 1.3],
"nodes": [[10.1, 10.2], [20.1, 20.2], [30.1, 30.2]],
"edges": [[101., 102., 103., 104.], [201., 202., 203., 204.]],
"senders": [1, 1],
"receivers": [0, 2],
}
SMALL_GRAPH_4 = {
"globals": [1.1, 1.2, 1.3],
"nodes": [[10.1, 10.2], [20.1, 20.2], [30.1, 30.2]],
"edges": [[101., 102., 103., 104.], [201., 202., 203., 204.]],
"senders": [0, 2],
"receivers": [1, 1],
}
class GraphModuleTest(tf.test.TestCase, parameterized.TestCase):
"""Base class for all the tests in this file."""
def setUp(self):
super(GraphModuleTest, self).setUp()
tf.random.set_seed(0)
def _assert_all_none_or_all_close(self, expected, actual, *args, **kwargs):
if expected is None:
return self.assertAllEqual(expected, actual)
return self.assertAllClose(expected, actual, *args, **kwargs)
def _get_input_graph(self, none_field=None):
input_graph = utils_tf.data_dicts_to_graphs_tuple(
[SMALL_GRAPH_1, SMALL_GRAPH_2, SMALL_GRAPH_3, SMALL_GRAPH_4])
if none_field:
input_graph = input_graph.replace(**{none_field: None})
return input_graph
def _get_shaped_input_graph(self):
return graphs.GraphsTuple(
nodes=tf.zeros([3, 4, 5, 11], dtype=tf.float32),
edges=tf.zeros([5, 4, 5, 12], dtype=tf.float32),
globals=tf.zeros([2, 4, 5, 13], dtype=tf.float32),
receivers=tf.range(5, dtype=tf.int32) // 3,
senders=tf.range(5, dtype=tf.int32) % 3,
n_node=tf.constant([2, 1], dtype=tf.int32),
n_edge=tf.constant([3, 2], dtype=tf.int32),
)
def _get_shaped_model_fns(self):
edge_model_fn = functools.partial(
snt.Conv2D, output_channels=10, kernel_shape=[3, 3])
node_model_fn = functools.partial(
snt.Conv2D, output_channels=8, kernel_shape=[3, 3])
global_model_fn = functools.partial(
snt.Conv2D, output_channels=7, kernel_shape=[3, 3])
return edge_model_fn, node_model_fn, global_model_fn
def _assert_build_and_run(self, network, input_graph):
# No error at construction time.
_ = network(input_graph)
class GraphIndependentTest(GraphModuleTest):
def _get_model(self, name=None):
kwargs = {
"edge_model_fn": functools.partial(snt.nets.MLP, output_sizes=[5]),
"node_model_fn": functools.partial(snt.nets.MLP, output_sizes=[10]),
"global_model_fn": functools.partial(snt.nets.MLP, output_sizes=[15]),
}
if name:
kwargs["name"] = name
return modules.GraphIndependent(**kwargs)
def test_same_as_subblocks(self):
"""Compares the output to explicit subblocks output."""
input_graph = self._get_input_graph()
model = self._get_model()
output_graph = utils_tf.nest_to_numpy(model(input_graph))
expected_output_edges = model._edge_model(input_graph.edges).numpy()
expected_output_nodes = model._node_model(input_graph.nodes).numpy()
expected_output_globals = model._global_model(input_graph.globals).numpy()
self._assert_all_none_or_all_close(expected_output_edges,
output_graph.edges)
self._assert_all_none_or_all_close(expected_output_nodes,
output_graph.nodes)
self._assert_all_none_or_all_close(expected_output_globals,
output_graph.globals)
@parameterized.named_parameters(
("with scale and offset", {"scale": 2, "offset": 1},
{"scale": .5, "offset": .25}, {"scale": 3, "offset": 1.5})
)
def test_kwargs(self,
edge_model_kwargs,
node_model_kwargs,
global_model_kwargs):
"""Compares the output to expected output graph using kwargs."""
input_graph = self._get_input_graph()
edge_model_fn = functools.partial(
snt.LayerNorm, axis=1, create_scale=False, create_offset=False)
edge_model_fn_with_params = functools.partial(
edge_model_fn(),
scale=edge_model_kwargs["scale"],
offset=edge_model_kwargs["offset"])
node_model_fn = functools.partial(
snt.LayerNorm, axis=1, create_scale=False, create_offset=False)
node_model_fn_with_params = functools.partial(
node_model_fn(),
scale=node_model_kwargs["scale"],
offset=node_model_kwargs["offset"])
global_model_fn = functools.partial(
snt.LayerNorm, axis=1, create_scale=False, create_offset=False)
global_model_fn_with_params = functools.partial(
global_model_fn(),
scale=global_model_kwargs["scale"],
offset=global_model_kwargs["offset"])
model = modules.GraphIndependent(
edge_model_fn, node_model_fn, global_model_fn)
model_with_params = modules.GraphIndependent(
lambda: edge_model_fn_with_params,
lambda: node_model_fn_with_params,
lambda: global_model_fn_with_params)
output_model = model(
input_graph, edge_model_kwargs, node_model_kwargs, global_model_kwargs)
output_graph = utils_tf.nest_to_numpy(output_model)
expected_graph = utils_tf.nest_to_numpy(model_with_params(input_graph))
self.assertAllEqual(expected_graph.receivers, output_graph.receivers,)
self.assertAllEqual(expected_graph.senders, output_graph.senders)
self._assert_all_none_or_all_close(expected_graph.edges, output_graph.edges)
self._assert_all_none_or_all_close(expected_graph.nodes, output_graph.nodes)
self._assert_all_none_or_all_close(
expected_graph.globals, output_graph.globals)
@parameterized.named_parameters(
("default name", None), ("custom name", "custom_name"))
def test_created_variables(self, name=None):
"""Verifies variable names and shapes created by a GraphIndependent."""
name = name if name is not None else "graph_independent"
expected_var_shapes_dict = {
name + "/edge_model/mlp/linear_0/b:0": [5],
name + "/edge_model/mlp/linear_0/w:0": [4, 5],
name + "/node_model/mlp/linear_0/b:0": [10],
name + "/node_model/mlp/linear_0/w:0": [2, 10],
name + "/global_model/mlp/linear_0/b:0": [15],
name + "/global_model/mlp/linear_0/w:0": [3, 15],
}
input_graph = self._get_input_graph()
model = self._get_model(name=name)
model(input_graph)
variables = model.variables
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("with tffunction", True),
("without tffunction", False),)
def test_gradient_flow(self, use_autograd):
"""Verifies that gradient flow is as expected."""
input_graph = self._get_input_graph()
for input_field in ["nodes", "edges", "globals"]:
input_tensor = getattr(input_graph, input_field)
for output_field in ["nodes", "edges", "globals"]:
model = self._get_model()
if use_autograd:
model.__call__ = tf.function(model.__call__)
with tf.GradientTape() as tape:
tape.watch(input_tensor)
output_graph = model(input_graph)
output_tensor = getattr(output_graph, output_field)
gradient = tape.gradient(output_tensor, input_tensor)
if input_field == output_field:
self.assertNotEqual(
None, gradient,
msg="gradient should flow from {} to {}".format(
output_field, input_field))
else:
self.assertIsNone(
gradient, msg="gradient should not flow from {} to {}".format(
output_field, input_field))
@parameterized.named_parameters(
("differently shaped edges", "edges"),
("differently shaped nodes", "nodes"),
("differently shaped globals", "globals"),)
def test_incompatible_higher_rank_inputs_no_raise(self, field_to_reshape):
"""A GraphIndependent does not make assumptions on its inputs shapes."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, node_model_fn, global_model_fn = self._get_shaped_model_fns()
input_graph = input_graph.map(
lambda v: tf.transpose(v, [0, 2, 1, 3]), [field_to_reshape])
network = modules.GraphIndependent(
edge_model_fn, node_model_fn, global_model_fn)
self._assert_build_and_run(network, input_graph)
class GraphNetworkTest(GraphModuleTest):
def _get_model(self):
edge_model_fn = functools.partial(snt.Linear, output_size=5)
node_model_fn = functools.partial(snt.Linear, output_size=10)
global_model_fn = functools.partial(snt.Linear, output_size=15)
return modules.GraphNetwork(
edge_model_fn=edge_model_fn,
node_model_fn=node_model_fn,
global_model_fn=global_model_fn)
@parameterized.named_parameters(
("default name", None), ("custom name", "custom_name"))
def test_created_variables(self, name=None):
"""Verifies variable names and shapes created by a GraphNetwork."""
name = name if name is not None else "graph_network"
expected_var_shapes_dict = {
name + "/edge_block/mlp/linear_0/b:0": [5],
name + "/edge_block/mlp/linear_0/w:0": [4 + 4 + 3, 5],
name + "/node_block/mlp/linear_0/b:0": [10],
name + "/node_block/mlp/linear_0/w:0": [5 + 2 + 3, 10],
name + "/global_block/mlp/linear_0/b:0": [15],
name + "/global_block/mlp/linear_0/w:0": [10 + 5 + 3, 15],
}
input_graph = self._get_input_graph()
extra_kwargs = {"name": name} if name else {}
model = modules.GraphNetwork(
edge_model_fn=functools.partial(snt.nets.MLP, output_sizes=[5]),
node_model_fn=functools.partial(snt.nets.MLP, output_sizes=[10]),
global_model_fn=functools.partial(snt.nets.MLP, output_sizes=[15]),
**extra_kwargs)
model(input_graph)
variables = model.variables
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("reduce sum reduction", tf.math.unsorted_segment_sum, False),
("reduce sum reduction with tf.function", tf.math.unsorted_segment_sum,
True),
("reduce max or zero reduction", blocks.unsorted_segment_max_or_zero,
True),)
def test_same_as_subblocks(self, reducer, use_tf_function):
"""Compares the output to explicit subblocks output.
Args:
reducer: The reducer used in the `NodeBlock` and `GlobalBlock`.
use_tf_function: Whether to compile the model with `tf.function`.
"""
input_graph = self._get_input_graph()
edge_model_fn = functools.partial(snt.Linear, output_size=5)
node_model_fn = functools.partial(snt.Linear, output_size=10)
global_model_fn = functools.partial(snt.Linear, output_size=15)
graph_network = modules.GraphNetwork(
edge_model_fn=edge_model_fn,
node_model_fn=node_model_fn,
global_model_fn=global_model_fn,
reducer=reducer)
if use_tf_function:
input_signature = [utils_tf.specs_from_graphs_tuple(input_graph)]
graph_network_fn = tf.function(graph_network, input_signature)
else:
graph_network_fn = graph_network
output_graph = graph_network_fn(input_graph)
edge_block = blocks.EdgeBlock(
edge_model_fn=lambda: graph_network._edge_block._edge_model,
use_sender_nodes=True,
use_edges=True,
use_receiver_nodes=True,
use_globals=True)
node_block = blocks.NodeBlock(
node_model_fn=lambda: graph_network._node_block._node_model,
use_nodes=True,
use_sent_edges=False,
use_received_edges=True,
use_globals=True,
received_edges_reducer=reducer)
global_block = blocks.GlobalBlock(
global_model_fn=lambda: graph_network._global_block._global_model,
use_nodes=True,
use_edges=True,
use_globals=True,
edges_reducer=reducer,
nodes_reducer=reducer)
expected_output_edge_block = edge_block(input_graph)
expected_output_node_block = node_block(expected_output_edge_block)
expected_output_global_block = global_block(expected_output_node_block)
expected_edges = expected_output_edge_block.edges.numpy()
expected_nodes = expected_output_node_block.nodes.numpy()
expected_globals = expected_output_global_block.globals.numpy()
self._assert_all_none_or_all_close(expected_edges,
output_graph.edges.numpy())
self._assert_all_none_or_all_close(expected_nodes,
output_graph.nodes.numpy())
self._assert_all_none_or_all_close(expected_globals,
output_graph.globals.numpy())
@parameterized.named_parameters(
("with scale and offset", {"scale": 2, "offset": 1},
{"scale": .5, "offset": .25}, {"scale": 3, "offset": 1.5})
)
def test_kwargs(self,
edge_model_kwargs,
node_model_kwargs,
global_model_kwargs):
"""Compares the output to expected output graph using kwargs."""
input_graph = self._get_input_graph()
edge_model_fn = functools.partial(
snt.LayerNorm, axis=1, create_scale=False, create_offset=False)
edge_model_fn_with_params = functools.partial(
edge_model_fn(),
scale=edge_model_kwargs["scale"],
offset=edge_model_kwargs["offset"])
node_model_fn = functools.partial(
snt.LayerNorm, axis=1, create_scale=False, create_offset=False)
node_model_fn_with_params = functools.partial(
node_model_fn(),
scale=node_model_kwargs["scale"],
offset=node_model_kwargs["offset"])
global_model_fn = functools.partial(
snt.LayerNorm, axis=1, create_scale=False, create_offset=False)
global_model_fn_with_params = functools.partial(
global_model_fn(),
scale=global_model_kwargs["scale"],
offset=global_model_kwargs["offset"])
model = modules.GraphNetwork(edge_model_fn, node_model_fn, global_model_fn)
model_with_params = modules.GraphNetwork(
lambda: edge_model_fn_with_params,
lambda: node_model_fn_with_params,
lambda: global_model_fn_with_params)
output_model = model(
input_graph, edge_model_kwargs, node_model_kwargs, global_model_kwargs)
output_graph = utils_tf.nest_to_numpy(output_model)
expected_graph = utils_tf.nest_to_numpy(model_with_params(input_graph))
self.assertAllEqual(expected_graph.receivers, output_graph.receivers,)
self.assertAllEqual(expected_graph.senders, output_graph.senders)
self._assert_all_none_or_all_close(expected_graph.edges, output_graph.edges)
self._assert_all_none_or_all_close(expected_graph.nodes, output_graph.nodes)
self._assert_all_none_or_all_close(
expected_graph.globals, output_graph.globals)
def test_dynamic_batch_sizes(self):
"""Checks that all batch sizes are as expected through a GraphNetwork."""
input_graph = utils_np.data_dicts_to_graphs_tuple(
[SMALL_GRAPH_1, SMALL_GRAPH_2])
input_graph = input_graph.map(tf.constant, fields=graphs.ALL_FIELDS)
model = self._get_model()
output = model(input_graph)
for k, v in input_graph._asdict().items():
self.assertEqual(v.shape[0], getattr(output, k).shape[0])
@parameterized.named_parameters(
("float64 data", tf.float64, tf.int32),
("int64 indices", tf.float32, tf.int64),)
def test_dtypes(self, data_dtype, indices_dtype):
"""Checks that all the output types are as expected in a GraphNetwork."""
input_graph = self._get_input_graph()
input_graph = input_graph.map(lambda v: tf.cast(v, data_dtype),
["nodes", "globals", "edges"])
input_graph = input_graph.map(lambda v: tf.cast(v, indices_dtype),
["senders", "receivers"])
model = self._get_model()
output = model(input_graph)
for field in ["nodes", "globals", "edges"]:
self.assertEqual(data_dtype, getattr(output, field).dtype)
for field in ["receivers", "senders"]:
self.assertEqual(indices_dtype, getattr(output, field).dtype)
@parameterized.named_parameters(
("edges only", True, False, False, False),
("receivers only", False, True, False, False),
("senders only", False, False, True, False),
("globals only", False, False, False, True),)
def test_edge_block_options(self,
use_edges,
use_receiver_nodes,
use_sender_nodes,
use_globals):
"""Test for configuring the EdgeBlock options."""
reducer = tf.math.unsorted_segment_sum
input_graph = self._get_input_graph()
edge_model_fn = functools.partial(snt.Linear, output_size=10)
edge_block_opt = {"use_edges": use_edges,
"use_receiver_nodes": use_receiver_nodes,
"use_sender_nodes": use_sender_nodes,
"use_globals": use_globals}
# Identity node model
node_model_fn = lambda: tf.identity
node_block_opt = {"use_received_edges": False,
"use_sent_edges": False,
"use_nodes": True,
"use_globals": False}
# Identity global model
global_model_fn = lambda: tf.identity
global_block_opt = {"use_globals": True,
"use_nodes": False,
"use_edges": False}
graph_network = modules.GraphNetwork(
edge_model_fn=edge_model_fn,
edge_block_opt=edge_block_opt,
node_model_fn=node_model_fn,
node_block_opt=node_block_opt,
global_model_fn=global_model_fn,
global_block_opt=global_block_opt,
reducer=reducer)
output_graph = utils_tf.nest_to_numpy(graph_network(input_graph))
edge_block = blocks.EdgeBlock(
edge_model_fn=lambda: graph_network._edge_block._edge_model,
use_edges=use_edges,
use_receiver_nodes=use_receiver_nodes,
use_sender_nodes=use_sender_nodes,
use_globals=use_globals)
expected_output_edge_block = edge_block(input_graph)
expected_output_node_block = expected_output_edge_block
expected_output_global_block = expected_output_node_block
expected_edges = expected_output_edge_block.edges.numpy()
expected_nodes = expected_output_node_block.nodes.numpy()
expected_globals = expected_output_global_block.globals.numpy()
self._assert_all_none_or_all_close(expected_edges,
output_graph.edges)
self._assert_all_none_or_all_close(expected_nodes,
output_graph.nodes)
self._assert_all_none_or_all_close(expected_globals,
output_graph.globals)
@parameterized.named_parameters(
("received edges only", True, False, False, False, None, None),
("received edges, max reduction",
True, False, False, False, tf.math.unsorted_segment_max, None),
("sent edges only", False, True, False, False, None, None),
("sent edges, max reduction",
False, True, False, False, None, tf.math.unsorted_segment_max),
("nodes only", False, False, True, False, None, None),
("globals only", False, False, False, True, None, None),
)
def test_node_block_options(self,
use_received_edges,
use_sent_edges,
use_nodes,
use_globals,
received_edges_reducer,
sent_edges_reducer):
"""Test for configuring the NodeBlock options."""
input_graph = self._get_input_graph()
if use_received_edges:
received_edges_reducer = (
received_edges_reducer or tf.math.unsorted_segment_sum)
if use_sent_edges:
sent_edges_reducer = (
sent_edges_reducer or tf.math.unsorted_segment_sum)
# Identity edge model.
edge_model_fn = lambda: tf.identity
edge_block_opt = {"use_edges": True,
"use_receiver_nodes": False,
"use_sender_nodes": False,
"use_globals": False}
node_model_fn = functools.partial(snt.Linear, output_size=10)
node_block_opt = {"use_received_edges": use_received_edges,
"use_sent_edges": use_sent_edges,
"use_nodes": use_nodes,
"use_globals": use_globals,
"received_edges_reducer": received_edges_reducer,
"sent_edges_reducer": sent_edges_reducer}
# Identity global model
global_model_fn = lambda: tf.identity
global_block_opt = {"use_globals": True,
"use_nodes": False,
"use_edges": False}
graph_network = modules.GraphNetwork(
edge_model_fn=edge_model_fn,
edge_block_opt=edge_block_opt,
node_model_fn=node_model_fn,
node_block_opt=node_block_opt,
global_model_fn=global_model_fn,
global_block_opt=global_block_opt)
output_graph = utils_tf.nest_to_numpy(graph_network(input_graph))
node_block = blocks.NodeBlock(
node_model_fn=lambda: graph_network._node_block._node_model,
use_nodes=use_nodes,
use_sent_edges=use_sent_edges,
use_received_edges=use_received_edges,
use_globals=use_globals,
received_edges_reducer=received_edges_reducer,
sent_edges_reducer=sent_edges_reducer)
expected_output_edge_block = input_graph
expected_output_node_block = node_block(input_graph)
expected_output_global_block = expected_output_node_block
expected_edges = expected_output_edge_block.edges.numpy()
expected_nodes = expected_output_node_block.nodes.numpy()
expected_globals = expected_output_global_block.globals.numpy()
self._assert_all_none_or_all_close(expected_edges,
output_graph.edges)
self._assert_all_none_or_all_close(expected_nodes,
output_graph.nodes)
self._assert_all_none_or_all_close(expected_globals,
output_graph.globals)
@parameterized.named_parameters(
("edges only", True, False, False, None, None),
("edges only, max", True, False, False, tf.math.unsorted_segment_max,
None),
("nodes only", False, True, False, None, None),
("nodes only, max", False, True, False, None,
tf.math.unsorted_segment_max),
("globals only", False, False, True, None, None),
)
def test_global_block_options(self,
use_edges,
use_nodes,
use_globals,
edges_reducer,
nodes_reducer):
"""Test for configuring the NodeBlock options."""
input_graph = self._get_input_graph()
if use_edges:
edges_reducer = edges_reducer or tf.math.unsorted_segment_sum
if use_nodes:
nodes_reducer = nodes_reducer or tf.math.unsorted_segment_sum
# Identity edge model.
edge_model_fn = lambda: tf.identity
edge_block_opt = {"use_edges": True,
"use_receiver_nodes": False,
"use_sender_nodes": False,
"use_globals": False}
# Identity node model
node_model_fn = lambda: tf.identity
node_block_opt = {"use_received_edges": False,
"use_sent_edges": False,
"use_nodes": True,
"use_globals": False}
global_model_fn = functools.partial(snt.Linear, output_size=10)
global_block_opt = {"use_globals": use_globals,
"use_nodes": use_nodes,
"use_edges": use_edges,
"edges_reducer": edges_reducer,
"nodes_reducer": nodes_reducer}
graph_network = modules.GraphNetwork(
edge_model_fn=edge_model_fn,
edge_block_opt=edge_block_opt,
node_model_fn=node_model_fn,
node_block_opt=node_block_opt,
global_model_fn=global_model_fn,
global_block_opt=global_block_opt)
output_graph = utils_tf.nest_to_numpy(graph_network(input_graph))
global_block = blocks.GlobalBlock(
global_model_fn=lambda: graph_network._global_block._global_model,
use_edges=use_edges,
use_nodes=use_nodes,
use_globals=use_globals,
edges_reducer=edges_reducer,
nodes_reducer=nodes_reducer)
expected_output_edge_block = input_graph
expected_output_node_block = expected_output_edge_block
expected_output_global_block = global_block(expected_output_node_block)
expected_edges = expected_output_edge_block.edges.numpy()
expected_nodes = expected_output_node_block.nodes.numpy()
expected_globals = expected_output_global_block.globals.numpy()
self._assert_all_none_or_all_close(expected_edges,
output_graph.edges)
self._assert_all_none_or_all_close(expected_nodes,
output_graph.nodes)
self._assert_all_none_or_all_close(expected_globals,
output_graph.globals)
def test_higher_rank_outputs(self):
"""Tests that a graph net can be build with higher rank inputs/outputs."""
input_graph = self._get_shaped_input_graph()
network = modules.GraphNetwork(*self._get_shaped_model_fns())
self._assert_build_and_run(network, input_graph)
@parameterized.named_parameters(
("wrongly shaped edges", "edges"),
("wrongly shaped nodes", "nodes"),
("wrongly shaped globals", "globals"),)
def test_incompatible_higher_rank_inputs_raises(self, field_to_reshape):
"""A exception should be raised if the inputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, node_model_fn, global_model_fn = self._get_shaped_model_fns()
input_graph = input_graph.map(
lambda v: tf.transpose(v, [0, 2, 1, 3]), [field_to_reshape])
graph_network = modules.GraphNetwork(
edge_model_fn, node_model_fn, global_model_fn)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError,
"Dimensions of inputs should match"):
graph_network(input_graph)
def test_incompatible_higher_rank_partial_outputs_raises(self):
"""A error should be raised if partial outputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, node_model_fn, global_model_fn = self._get_shaped_model_fns()
edge_model_fn_2 = functools.partial(
snt.Conv2D, output_channels=10, kernel_shape=[3, 3], stride=[1, 2])
graph_network = modules.GraphNetwork(
edge_model_fn_2, node_model_fn, global_model_fn)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError, "Dimensions of inputs should match"):
graph_network(input_graph)
node_model_fn_2 = functools.partial(
snt.Conv2D, output_channels=10, kernel_shape=[3, 3], stride=[1, 2])
graph_network = modules.GraphNetwork(
edge_model_fn, node_model_fn_2, global_model_fn)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError, "Dimensions of inputs should match"):
graph_network(input_graph)
class InteractionNetworkTest(GraphModuleTest):
def _get_model(self, reducer=None, name=None):
kwargs = {
"edge_model_fn": functools.partial(snt.Linear, output_size=5),
"node_model_fn": functools.partial(snt.Linear, output_size=10)
}
if reducer:
kwargs["reducer"] = reducer
if name:
kwargs["name"] = name
return modules.InteractionNetwork(**kwargs)
@parameterized.named_parameters(
("default name", None), ("custom name", "custom_name"))
def test_created_variables(self, name=None):
"""Verifies variable names and shapes created by an InteractionNetwork."""
name = name if name is not None else "interaction_network"
expected_var_shapes_dict = {
name + "/edge_block/linear/b:0": [5],
name + "/edge_block/linear/w:0": [2 + 2 + 4, 5],
name + "/node_block/linear/b:0": [10],
name + "/node_block/linear/w:0": [5 + 2, 10],
}
input_graph = utils_tf.data_dicts_to_graphs_tuple([SMALL_GRAPH_1])
model = self._get_model(name=name)
model(input_graph)
variables = model.variables
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("default", tf.math.unsorted_segment_sum,),
("max or zero reduction", blocks.unsorted_segment_max_or_zero,),
("no globals", tf.math.unsorted_segment_sum, "globals"),
)
def test_same_as_subblocks(self, reducer, none_field=None):
"""Compares the output to explicit subblocks output.
Args:
reducer: The reducer used in the `NodeBlock`s.
none_field: (string, default=None) If not None, the corresponding field
is removed from the input graph.
"""
input_graph = self._get_input_graph(none_field)
interaction_network = self._get_model(reducer)
output_graph = interaction_network(input_graph)
edges_out = output_graph.edges.numpy()
nodes_out = output_graph.nodes.numpy()
self.assertAllEqual(input_graph.globals, output_graph.globals)
edge_block = blocks.EdgeBlock(
edge_model_fn=lambda: interaction_network._edge_block._edge_model,
use_sender_nodes=True,
use_edges=True,
use_receiver_nodes=True,
use_globals=False)
node_block = blocks.NodeBlock(
node_model_fn=lambda: interaction_network._node_block._node_model,
use_nodes=True,
use_sent_edges=False,
use_received_edges=True,
use_globals=False,
received_edges_reducer=reducer)
expected_output_edge_block = edge_block(input_graph)
expected_output_node_block = node_block(expected_output_edge_block)
expected_edges = expected_output_edge_block.edges.numpy()
expected_nodes = expected_output_node_block.nodes.numpy()
self._assert_all_none_or_all_close(expected_edges, edges_out)
self._assert_all_none_or_all_close(expected_nodes, nodes_out)
@parameterized.named_parameters(
("with scale and offset",
{"scale": 2, "offset": 1}, {"scale": .5, "offset": .25})
)
def test_kwargs(self, edge_model_kwargs, node_model_kwargs):
"""Compares the output to expected output graph using kwargs."""
input_graph = self._get_input_graph()
edge_model_fn = functools.partial(
snt.LayerNorm, axis=1, create_scale=False, create_offset=False)
edge_model_fn_with_params = functools.partial(
edge_model_fn(),
scale=edge_model_kwargs["scale"],
offset=edge_model_kwargs["offset"])
node_model_fn = functools.partial(
snt.LayerNorm, axis=1, create_scale=False, create_offset=False)
node_model_fn_with_params = functools.partial(
node_model_fn(),
scale=node_model_kwargs["scale"],
offset=node_model_kwargs["offset"])
model = modules.InteractionNetwork(edge_model_fn, node_model_fn)
model_with_params = modules.InteractionNetwork(
lambda: edge_model_fn_with_params, lambda: node_model_fn_with_params)
output_graph = utils_tf.nest_to_numpy(
model(input_graph, edge_model_kwargs, node_model_kwargs))
expected_graph = utils_tf.nest_to_numpy(model_with_params(input_graph))
self.assertAllEqual(expected_graph.globals, output_graph.globals)
self.assertAllEqual(expected_graph.receivers, output_graph.receivers,)
self.assertAllEqual(expected_graph.senders, output_graph.senders)
self._assert_all_none_or_all_close(expected_graph.edges, output_graph.edges)
self._assert_all_none_or_all_close(expected_graph.nodes, output_graph.nodes)
@parameterized.named_parameters(
("no nodes", ["nodes"],),
("no edge data", ["edges"],),
("no edges", ["edges", "receivers", "senders"],),
)
def test_field_must_not_be_none(self, none_fields):
"""Tests that the model cannot be built if required fields are missing."""
input_graph = utils_tf.data_dicts_to_graphs_tuple([SMALL_GRAPH_1])
input_graph = input_graph.map(lambda _: None, none_fields)
interaction_network = self._get_model()
with self.assertRaises(ValueError):
interaction_network(input_graph)
def test_higher_rank_outputs(self):
"""Tests that an IN can be build with higher rank inputs/outputs."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, node_model_fn, _ = self._get_shaped_model_fns()
graph_network = modules.InteractionNetwork(edge_model_fn, node_model_fn)
self._assert_build_and_run(graph_network, input_graph)
@parameterized.named_parameters(
("wrongly shaped edges", "edges"),
("wrongly shaped nodes", "nodes"),)
def test_incompatible_higher_rank_inputs_raises(self, field_to_reshape):
"""Am exception should be raised if the inputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, node_model_fn, _ = self._get_shaped_model_fns()
input_graph = input_graph.map(
lambda v: tf.transpose(v, [0, 2, 1, 3]), [field_to_reshape])
graph_network = modules.InteractionNetwork(edge_model_fn, node_model_fn)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError, "Dimensions of inputs should match"):
graph_network(input_graph)
def test_incompatible_higher_rank_inputs_no_raise(self):
"""The globals can have an arbitrary shape in the input."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, node_model_fn, _ = self._get_shaped_model_fns()
input_graph = input_graph.replace(
globals=tf.transpose(input_graph.globals, [0, 2, 1, 3]))
graph_network = modules.InteractionNetwork(edge_model_fn, node_model_fn)
self._assert_build_and_run(graph_network, input_graph)
class RelationNetworkTest(GraphModuleTest):
def _get_model(self, reducer=tf.math.unsorted_segment_sum, name=None):
kwargs = {
"edge_model_fn": functools.partial(snt.Linear, output_size=5),
"global_model_fn": functools.partial(snt.Linear, output_size=15)
}
if reducer:
kwargs["reducer"] = reducer
if name:
kwargs["name"] = name
return modules.RelationNetwork(**kwargs)
@parameterized.named_parameters(
("default name", None), ("custom name", "custom_name"))
def test_created_variables(self, name=None):
"""Verifies variable names and shapes created by a RelationNetwork."""
name = name if name is not None else "relation_network"
expected_var_shapes_dict = {
name + "/edge_block/linear/b:0": [5],
name + "/edge_block/linear/w:0": [2 + 2, 5],
name + "/global_block/linear/b:0": [15],
name + "/global_block/linear/w:0": [5, 15],
}
input_graph = utils_tf.data_dicts_to_graphs_tuple([SMALL_GRAPH_1])
model = self._get_model(name=name)
model(input_graph)
variables = model.variables
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("default", tf.math.unsorted_segment_sum, None),
("max or zero reduction", blocks.unsorted_segment_max_or_zero, None),
("no edges", tf.math.unsorted_segment_sum, "edges"),
("no globals", tf.math.unsorted_segment_sum, "globals"),
)
def test_same_as_subblocks(self, reducer, none_field=None):
"""Compares the output to explicit subblocks output.
Args:
reducer: The reducer used in the `GlobalBlock`.
none_field: (string, default=None) If not None, the corresponding field
is removed from the input graph.
"""
input_graph = self._get_input_graph(none_field)
relation_network = self._get_model(reducer)
output_graph = relation_network(input_graph)
edge_block = blocks.EdgeBlock(
edge_model_fn=lambda: relation_network._edge_block._edge_model,
use_edges=False,
use_receiver_nodes=True,
use_sender_nodes=True,
use_globals=False)
global_block = blocks.GlobalBlock(
global_model_fn=lambda: relation_network._global_block._global_model,
use_edges=True,
use_nodes=False,
use_globals=False,
edges_reducer=reducer,
nodes_reducer=reducer)
expected_output_edge_block = edge_block(input_graph)
expected_output_global_block = global_block(expected_output_edge_block)
self.assertIs(input_graph.edges, output_graph.edges)
self.assertIs(input_graph.nodes, output_graph.nodes)
self._assert_all_none_or_all_close(
output_graph.globals.numpy(),
expected_output_global_block.globals.numpy())
@parameterized.named_parameters(
("with scale and offset",
{"scale": 2, "offset": 1}, {"scale": .5, "offset": .25})
)
def test_kwargs(self, edge_model_kwargs, global_model_kwargs):
"""Compares the output to expected output graph using kwargs."""
input_graph = self._get_input_graph()
edge_model_fn = functools.partial(
snt.LayerNorm, axis=1, create_scale=False, create_offset=False)
edge_model_fn_with_params = functools.partial(
edge_model_fn(),
scale=edge_model_kwargs["scale"],
offset=edge_model_kwargs["offset"])
global_model_fn = functools.partial(
snt.LayerNorm, axis=1, create_scale=False, create_offset=False)
global_model_fn_with_params = functools.partial(
global_model_fn(),
scale=global_model_kwargs["scale"],
offset=global_model_kwargs["offset"])
model = modules.RelationNetwork(edge_model_fn, global_model_fn)
model_with_params = modules.RelationNetwork(
lambda: edge_model_fn_with_params, lambda: global_model_fn_with_params)
output_graph = utils_tf.nest_to_numpy(
model(input_graph, edge_model_kwargs, global_model_kwargs))
expected_graph = utils_tf.nest_to_numpy(model_with_params(input_graph))
self.assertAllEqual(expected_graph.edges, output_graph.edges)
self.assertAllEqual(expected_graph.nodes, output_graph.nodes)
self.assertAllEqual(expected_graph.senders, expected_graph.senders)
self.assertAllEqual(expected_graph.edges, output_graph.edges)
self._assert_all_none_or_all_close(
expected_graph.globals, output_graph.globals)
@parameterized.named_parameters(
("no nodes", ["nodes"],), ("no edges", ["edges", "receivers", "senders"],)
)
def test_field_must_not_be_none(self, none_fields):
"""Tests that the model cannot be built if required fields are missing."""
input_graph = utils_tf.data_dicts_to_graphs_tuple([SMALL_GRAPH_1])
input_graph = input_graph.map(lambda _: None, none_fields)
relation_network = self._get_model()
with self.assertRaises(ValueError):
relation_network(input_graph)
@parameterized.named_parameters(
("differently shaped edges", "edges"),
("differently shaped nodes", "nodes"),
("differently shaped globals", "globals"),)
def test_incompatible_higher_rank_inputs_no_raise(self, field_to_reshape):
"""A RelationNetwork does not make assumptions on its inputs shapes."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, _, global_model_fn = self._get_shaped_model_fns()
input_graph = input_graph.map(
lambda v: tf.transpose(v, [0, 2, 1, 3]), [field_to_reshape])
network = modules.RelationNetwork(edge_model_fn, global_model_fn)
self._assert_build_and_run(network, input_graph)
class DeepSetsTest(GraphModuleTest):
def _get_model(self, reducer=None, name=None):
kwargs = {
"node_model_fn": functools.partial(snt.Linear, output_size=5),
"global_model_fn": functools.partial(snt.Linear, output_size=15)
}
if reducer:
kwargs["reducer"] = reducer
if name:
kwargs["name"] = name
return modules.DeepSets(**kwargs)
@parameterized.named_parameters(
("default name", None), ("custom name", "custom_name"))
def test_created_variables(self, name=None):
"""Verifies variable names and shapes created by a DeepSets network."""
name = name if name is not None else "deep_sets"
expected_var_shapes_dict = {
name + "/node_block/linear/b:0": [5],
name + "/node_block/linear/w:0": [2 + 3, 5],
name + "/global_block/linear/b:0": [15],
name + "/global_block/linear/w:0": [5, 15],
}
input_graph = self._get_input_graph()
model = self._get_model(name=name)
model(input_graph)
variables = model.variables
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("default", tf.math.unsorted_segment_sum, []),
("no edge data", tf.math.unsorted_segment_sum, ["edges"]),
("no edges", tf.math.unsorted_segment_sum,
["edges", "receivers", "senders"]),
("max or zero reduction", blocks.unsorted_segment_max_or_zero, []),
)
def test_same_as_subblocks(self, reducer, none_fields):
"""Compares the output to explicit subblocks output.
Args:
reducer: The reducer used in the NodeBlock.
none_fields: (list of strings) The corresponding fields are removed from
the input graph.
"""
input_graph = self._get_input_graph()
input_graph = input_graph.map(lambda _: None, none_fields)
deep_sets = self._get_model(reducer)
output_graph = deep_sets(input_graph)
output_nodes = output_graph.nodes.numpy()
output_globals = output_graph.globals.numpy()
node_block = blocks.NodeBlock(
node_model_fn=lambda: deep_sets._node_block._node_model,
use_received_edges=False,
use_sent_edges=False,
use_nodes=True,
use_globals=True)
global_block = blocks.GlobalBlock(
global_model_fn=lambda: deep_sets._global_block._global_model,
use_edges=False,
use_nodes=True,
use_globals=False,
nodes_reducer=reducer)
node_block_out = node_block(input_graph)
expected_nodes = node_block_out.nodes.numpy()
expected_globals = global_block(node_block_out).globals.numpy()
self.assertAllEqual(input_graph.edges, output_graph.edges)
self.assertAllEqual(input_graph.receivers, output_graph.receivers)
self.assertAllEqual(input_graph.senders, output_graph.senders)
self._assert_all_none_or_all_close(expected_nodes, output_nodes)
self._assert_all_none_or_all_close(expected_globals, output_globals)
@parameterized.named_parameters(
("with scale and offset",
{"scale": .5, "offset": .25}, {"scale": 3, "offset": 1.5})
)
def test_kwargs(self, node_model_kwargs, global_model_kwargs):
"""Compares the output to expected output graph using kwargs."""
input_graph = self._get_input_graph()
node_model_fn = functools.partial(
snt.LayerNorm, axis=1, create_scale=False, create_offset=False)
node_model_fn_with_params = functools.partial(
node_model_fn(),
scale=node_model_kwargs["scale"],
offset=node_model_kwargs["offset"])
global_model_fn = functools.partial(
snt.LayerNorm, axis=1, create_scale=False, create_offset=False)
global_model_fn_with_params = functools.partial(
global_model_fn(),
scale=global_model_kwargs["scale"],
offset=global_model_kwargs["offset"])
model = modules.DeepSets(node_model_fn, global_model_fn)
model_with_params = modules.DeepSets(
lambda: node_model_fn_with_params, lambda: global_model_fn_with_params)
output_graph = utils_tf.nest_to_numpy(
model(input_graph, node_model_kwargs, global_model_kwargs))
expected_graph = utils_tf.nest_to_numpy(model_with_params(input_graph))
self.assertAllEqual(expected_graph.receivers, output_graph.receivers)
self.assertAllEqual(expected_graph.senders, expected_graph.senders)
self.assertAllEqual(expected_graph.edges, output_graph.edges)
self._assert_all_none_or_all_close(
expected_graph.nodes, output_graph.nodes)
self._assert_all_none_or_all_close(
expected_graph.globals, output_graph.globals)
@parameterized.parameters(
("nodes",), ("globals",),
)
def test_field_must_not_be_none(self, none_field):
"""Tests that the model cannot be built if required fields are missing."""
input_graph = utils_tf.data_dicts_to_graphs_tuple([SMALL_GRAPH_1])
input_graph = input_graph.replace(**{none_field: None})
deep_sets = self._get_model()
with self.assertRaises(ValueError):
deep_sets(input_graph)
def test_incompatible_higher_rank_inputs_raises(self):
"""A exception should be raised if the inputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
_, node_model_fn, global_model_fn = self._get_shaped_model_fns()
input_graph = input_graph.replace(
nodes=tf.transpose(input_graph.nodes, [0, 2, 1, 3]))
graph_network = modules.DeepSets(node_model_fn, global_model_fn)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError,
"Dimensions of inputs should match"):
graph_network(input_graph)
def test_incompatible_higher_rank_partial_outputs_no_raise(self):
"""There is no constraint on the size of the partial outputs."""
input_graph = self._get_shaped_input_graph()
node_model_fn = functools.partial(
snt.Conv2D, output_channels=10, kernel_shape=[3, 3], stride=[1, 2])
global_model_fn = functools.partial(
snt.Conv2D, output_channels=10, kernel_shape=[3, 3])
network = modules.DeepSets(node_model_fn, global_model_fn)
self._assert_build_and_run(network, input_graph)
def test_incompatible_higher_rank_inputs_no_raise(self):
"""A DeepSets does not make assumptions on the shape if its input edges."""
input_graph = self._get_shaped_input_graph()
_, node_model_fn, global_model_fn = self._get_shaped_model_fns()
input_graph = input_graph.replace(
edges=tf.transpose(input_graph.edges, [0, 2, 1, 3]))
network = modules.DeepSets(node_model_fn, global_model_fn)
self._assert_build_and_run(network, input_graph)
class CommNetTest(GraphModuleTest):
def _get_model(self, reducer=None, name=None):
kwargs = {
"edge_model_fn": functools.partial(snt.Linear, output_size=15),
"node_encoder_model_fn": functools.partial(snt.Linear, output_size=8),
"node_model_fn": functools.partial(snt.Linear, output_size=5),
}
if reducer is not None:
kwargs["reducer"] = reducer
if name:
kwargs["name"] = name
return modules.CommNet(**kwargs)
@parameterized.named_parameters(
("default name", None), ("custom name", "custom_name"))
def test_created_variables(self, name=None):
"""Verifies variable names and shapes created by a DeepSets network."""
name = name if name is not None else "comm_net"
expected_var_shapes_dict = {
name + "/edge_block/linear/b:0": [15],
name + "/edge_block/linear/w:0": [2, 15],
name + "/node_encoder_block/linear/b:0": [8],
name + "/node_encoder_block/linear/w:0": [2, 8],
name + "/node_block/linear/b:0": [5],
name + "/node_block/linear/w:0": [15 + 8, 5],
}
input_graph = self._get_input_graph()
model = self._get_model(name=name)
model(input_graph)
variables = model.variables
var_shapes_dict = {var.name: var.get_shape().as_list() for var in variables}
self.assertDictEqual(expected_var_shapes_dict, var_shapes_dict)
@parameterized.named_parameters(
("default", tf.math.unsorted_segment_sum,),
("no edges", tf.math.unsorted_segment_sum, "edges"),
("no globals", tf.math.unsorted_segment_sum, "globals"),
("max or zero reduction", blocks.unsorted_segment_max_or_zero,),
)
def test_same_as_subblocks(self, reducer, none_field=None):
"""Compares the output to explicit subblocks output.
Args:
reducer: The reducer used in the `NodeBlock`s.
none_field: (string, default=None) If not None, the corresponding field
is removed from the input graph.
"""
input_graph = self._get_input_graph(none_field)
comm_net = self._get_model(reducer)
output_graph = comm_net(input_graph)
output_nodes = output_graph.nodes
edge_subblock = blocks.EdgeBlock(
edge_model_fn=lambda: comm_net._edge_block._edge_model,
use_edges=False,
use_receiver_nodes=False,
use_sender_nodes=True,
use_globals=False)
node_encoder_subblock = blocks.NodeBlock(
node_model_fn=lambda: comm_net._node_encoder_block._node_model,
use_received_edges=False,
use_sent_edges=False,
use_nodes=True,
use_globals=False,
received_edges_reducer=reducer)
node_subblock = blocks.NodeBlock(
node_model_fn=lambda: comm_net._node_block._node_model,
use_received_edges=True,
use_sent_edges=False,
use_nodes=True,
use_globals=False,
received_edges_reducer=reducer)
edge_block_out = edge_subblock(input_graph)
encoded_nodes = node_encoder_subblock(input_graph).nodes
node_input_graph = input_graph.replace(
edges=edge_block_out.edges, nodes=encoded_nodes)
node_block_out = node_subblock(node_input_graph)
expected_nodes = node_block_out.nodes
self.assertAllEqual(input_graph.globals, output_graph.globals)
self.assertAllEqual(input_graph.edges, output_graph.edges)
self.assertAllEqual(input_graph.receivers, output_graph.receivers,)
self.assertAllEqual(input_graph.senders, output_graph.senders)
self._assert_all_none_or_all_close(
expected_nodes.numpy(), output_nodes.numpy())
@parameterized.named_parameters(
("with scale and offset", {"scale": 2, "offset": 1},
{"scale": .5, "offset": .25}, {"scale": 3, "offset": 1.5})
)
def test_kwargs(self,
edge_model_kwargs,
node_encoder_model_kwargs,
node_model_kwargs):
"""Compares the output to expected output graph using kwargs."""
input_graph = self._get_input_graph()
edge_model_fn = functools.partial(
snt.LayerNorm, axis=1, create_scale=False, create_offset=False)
edge_model_fn_with_params = functools.partial(
edge_model_fn(),
scale=edge_model_kwargs["scale"],
offset=edge_model_kwargs["offset"])
node_encoder_model_fn = functools.partial(
snt.LayerNorm, axis=1, create_scale=False, create_offset=False)
node_encoder_model_fn_with_params = functools.partial(
node_encoder_model_fn(),
scale=node_encoder_model_kwargs["scale"],
offset=node_encoder_model_kwargs["offset"])
node_model_fn = functools.partial(
snt.LayerNorm, axis=1, create_scale=False, create_offset=False)
node_model_fn_with_params = functools.partial(
node_model_fn(),
scale=node_model_kwargs["scale"],
offset=node_model_kwargs["offset"])
model = modules.CommNet(edge_model_fn, node_encoder_model_fn, node_model_fn)
model_with_params = modules.CommNet(
lambda: edge_model_fn_with_params,
lambda: node_encoder_model_fn_with_params,
lambda: node_model_fn_with_params)
output_model = model(
input_graph,
edge_model_kwargs,
node_encoder_model_kwargs,
node_model_kwargs)
output_graph = utils_tf.nest_to_numpy(output_model)
expected_graph = utils_tf.nest_to_numpy(model_with_params(input_graph))
self.assertAllEqual(expected_graph.globals, output_graph.globals)
self.assertAllEqual(expected_graph.edges, output_graph.edges)
self.assertAllEqual(expected_graph.receivers, output_graph.receivers,)
self.assertAllEqual(expected_graph.senders, output_graph.senders)
self._assert_all_none_or_all_close(expected_graph.nodes, output_graph.nodes)
@parameterized.named_parameters(
("no nodes", ["nodes"],), ("no edges", ["edges", "receivers", "senders"],)
)
def test_field_must_not_be_none(self, none_fields):
"""Tests that the model cannot be built if required fields are missing."""
input_graph = utils_tf.data_dicts_to_graphs_tuple([SMALL_GRAPH_1])
input_graph = input_graph.map(lambda _: None, none_fields)
comm_net = self._get_model()
with self.assertRaises(ValueError):
comm_net(input_graph)
def test_higher_rank_outputs(self):
"""Tests that a CommNet can be build with higher rank inputs/outputs."""
input_graph = self._get_shaped_input_graph()
graph_network = modules.CommNet(*self._get_shaped_model_fns())
self._assert_build_and_run(graph_network, input_graph)
class SelfAttentionTest(GraphModuleTest):
def _get_model(self, reducer=None, name=None):
kwargs = {
"edge_model_fn": functools.partial(snt.Linear, output_size=15),
"node_encoder_model_fn": functools.partial(snt.Linear, output_size=8),
"node_model_fn": functools.partial(snt.Linear, output_size=5),
}
if reducer is not None:
kwargs["reducer"] = reducer
if name:
kwargs["name"] = name
return modules.CommNet(**kwargs)
LOGITS_1D = [np.log(2), np.log(2), np.log(2), 0., 0., 0.]
SOFTMAX_1D = [1., 2/3, 0.5, 0.25, 0.25, 1/3]
LOGITS_2D = [[np.log(2), 1.], [np.log(2), 1.], [np.log(2), 1.],
[0., 1.], [0., 1.], [0., 1.]]
SOFTMAX_2D = [[1., 1.], [2/3, 0.5], [1/2, 1/3],
[1/4, 1/3], [1/4, 1/3], [1/3, 0.5]]
SENDERS = [0, 2, 2, 3, 4, 3]
RECEIVERS = [1, 5, 6, 6, 6, 5]
N_NODE = [2, 5]
N_EDGE = [1, 5]
@parameterized.named_parameters(
("one dimensional", LOGITS_1D, SOFTMAX_1D),
("two dimensional", LOGITS_2D, SOFTMAX_2D),)
def test_unsorted_segment_softmax(self, data, expected_softmax):
"""Verifies variable names and shapes created by a DeepSets network."""
data = tf.constant(data, dtype=tf.float32)
segment_ids = tf.constant(self.RECEIVERS, dtype=tf.int32)
num_segments = tf.constant(sum(self.N_NODE), dtype=tf.int32)
actual_softmax = modules._unsorted_segment_softmax(
data, segment_ids, num_segments)
self.assertAllClose(expected_softmax, actual_softmax.numpy())
@parameterized.named_parameters(
("one dimensional", LOGITS_1D, SOFTMAX_1D,
modules._unsorted_segment_softmax),
("two dimensional", LOGITS_2D, SOFTMAX_2D,
modules._unsorted_segment_softmax),)
def test_received_edges_normalizer(self, logits,
expected_normalized, normalizer):
graph = graphs.GraphsTuple(
nodes=None,
edges=logits,
globals=None,
receivers=tf.constant(self.RECEIVERS, dtype=tf.int32),
senders=tf.constant(self.SENDERS, dtype=tf.int32),
n_node=tf.constant(self.N_NODE, dtype=tf.int32),
n_edge=tf.constant(self.N_EDGE, dtype=tf.int32),
)
actual_normalized_edges = modules._received_edges_normalizer(
graph, normalizer)
self.assertAllClose(expected_normalized, actual_normalized_edges.numpy())
def test_self_attention(self):
# Just one feature per node.
values_np = np.arange(sum(self.N_NODE)) + 1.
# Multiple heads, one positive values, one negative values.
values_np = np.stack([values_np, values_np*-1.], axis=-1)
# Multiple features per node, per head, at different scales.
values_np = np.stack([values_np, values_np*0.1], axis=-1)
values = tf.constant(values_np, dtype=tf.float32)
keys_np = [
[[0.3, 0.4]]*2, # Irrelevant (only sender to one node)
[[0.1, 0.5]]*2, # Not used (is not a sender)
[[1, 0], [0, 1]],
[[0, 1], [1, 0]],
[[1, 1], [1, 1]],
[[0.4, 0.3]]*2, # Not used (is not a sender)
[[0.3, 0.2]]*2] # Not used (is not a sender)
keys = tf.constant(keys_np, dtype=tf.float32)
queries_np = [
[[0.2, 0.7]]*2, # Not used (is not a receiver)
[[0.3, 0.2]]*2, # Irrelevant (only receives from one node)
[[0.2, 0.8]]*2, # Not used (is not a receiver)
[[0.2, 0.4]]*2, # Not used (is not a receiver)
[[0.3, 0.9]]*2, # Not used (is not a receiver)
[[0, np.log(2)], [np.log(3), 0]],
[[np.log(2), 0], [0, np.log(3)]]]
queries = tf.constant(queries_np, dtype=tf.float32)
attention_graph = graphs.GraphsTuple(
nodes=None,
edges=None,
globals=None,
receivers=tf.constant(self.RECEIVERS, dtype=tf.int32),
senders=tf.constant(self.SENDERS, dtype=tf.int32),
n_node=tf.constant(self.N_NODE, dtype=tf.int32),
n_edge=tf.constant(self.N_EDGE, dtype=tf.int32),)
self_attention = modules.SelfAttention()
output_graph = self_attention(values, keys, queries, attention_graph)
expected_mixed_nodes = [
[[0., 0.], [0., 0.]], # Does not receive any edges
[[1., 0.1], [-1., -0.1]], # Only receives from n0.
[[0., 0.], [0., 0.]], # Does not receive any edges
[[0., 0.], [0., 0.]], # Does not receive any edges
[[0., 0.], [0., 0.]], # Does not receive any edges
[[11/3, 11/3*0.1], # Head one, receives from n2(1/3) n3(2/3)
[-15/4, -15/4*0.1]], # Head two, receives from n2(1/4) n3(3/4)
[[20/5, 20/5*0.1], # Head one, receives from n2(2/5) n3(1/5) n4(2/5)
[-28/7, -28/7*0.1]], # Head two, receives from n2(3/7) n3(1/7) n4(3/7)
]
self.assertAllClose(expected_mixed_nodes, output_graph.nodes.numpy())
if __name__ == "__main__":
tf.test.main()
| graph_nets-master | graph_nets/tests_tf2/modules_test.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for utils_tf.py in Tensorflow 2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl.testing import parameterized
from graph_nets import graphs
from graph_nets import utils_np
from graph_nets import utils_tf
from graph_nets.tests import test_utils as test_utils_tf1
from graph_nets.tests_tf2 import test_utils
import networkx as nx
import numpy as np
from six.moves import range
import tensorflow as tf
import tree
class RepeatTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for `repeat`."""
@parameterized.named_parameters(
("base", (3,), [2, 3, 4], 0),
("empty_group_first", (3,), [0, 3, 4], 0),
("empty_group_middle", (3,), [2, 0, 4], 0),
("double_empty_group_middle", (4,), [2, 0, 0, 4], 0),
("empty_group_last", (3,), [2, 3, 0], 0),
("just_one_group", (1,), [2], 0),
("zero_groups", (0,), [], 0),
("axis 0", (2, 3, 4), [2, 3], 0),
("axis 1", (3, 2, 4), [2, 3], 1),
("axis 2", (4, 3, 2), [2, 3], 2),
("zero_groups_with_shape", (2, 0, 4), [], 1),
)
def test_repeat(self, shape, repeats, axis):
num_elements = np.prod(shape)
t = np.arange(num_elements).reshape(*shape)
expected = np.repeat(t, repeats, axis=axis)
tensor = tf.constant(t)
repeats = tf.constant(repeats, dtype=tf.int32)
actual = utils_tf.repeat(tensor, repeats, axis=axis)
self.assertAllEqual(expected, actual)
@parameterized.named_parameters(("default", "custom_name", None),
("custom", None, "repeat"))
def test_name_scope(self, name, expected_name):
self.skipTest("Uses get_default_graph.")
kwargs = {"name": name} if name else {}
expected_name = expected_name if expected_name else name
t = tf.zeros([3, 2, 4])
indices = tf.constant([2, 3])
with test_utils.assert_new_op_prefixes(self, expected_name + "/"):
utils_tf.repeat(t, indices, axis=1, **kwargs)
def _generate_graph(batch_index, n_nodes=4, add_edges=True):
graph = nx.DiGraph()
for node in range(n_nodes):
node_data = {"features": np.array([node, batch_index], dtype=np.float32)}
graph.add_node(node, **node_data)
if add_edges:
for edge, (receiver, sender) in enumerate(zip([0, 0, 1], [1, 2, 3])):
if sender < n_nodes and receiver < n_nodes:
edge_data = np.array([edge, edge + 1, batch_index], dtype=np.float64)
graph.add_edge(sender, receiver, features=edge_data, index=edge)
graph.graph["features"] = np.array([batch_index], dtype=np.float32)
return graph
class ConcatTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for `concat`, along various axis."""
@parameterized.named_parameters(
("no nones", []), ("stateless graph", ["nodes", "edges", "globals"]),
("no edges", ["edges", "receivers", "senders"]))
def test_concat_first_axis(self, none_fields):
graph_0 = utils_np.networkxs_to_graphs_tuple(
[_generate_graph(0, 3), _generate_graph(1, 2)])
graph_1 = utils_np.networkxs_to_graphs_tuple([_generate_graph(2, 2)])
graph_2 = utils_np.networkxs_to_graphs_tuple([_generate_graph(3, 3)])
graphs_ = [
gr.map(tf.convert_to_tensor, graphs.ALL_FIELDS)
for gr in [graph_0, graph_1, graph_2]
]
graphs_ = [gr.map(lambda _: None, none_fields) for gr in graphs_]
concat_graph = utils_tf.concat(graphs_, axis=0)
for none_field in none_fields:
self.assertIsNone(getattr(concat_graph, none_field))
concat_graph = concat_graph.map(tf.no_op, none_fields)
if "nodes" not in none_fields:
self.assertAllEqual(
np.array([0, 1, 2, 0, 1, 0, 1, 0, 1, 2]),
[x[0] for x in concat_graph.nodes])
self.assertAllEqual(
np.array([0, 0, 0, 1, 1, 2, 2, 3, 3, 3]),
[x[1] for x in concat_graph.nodes])
if "edges" not in none_fields:
self.assertAllEqual(
np.array([0, 1, 0, 0, 0, 1]), [x[0] for x in concat_graph.edges])
self.assertAllEqual(
np.array([0, 0, 1, 2, 3, 3]), [x[2] for x in concat_graph.edges])
self.assertAllEqual(np.array([3, 2, 2, 3]), concat_graph.n_node)
self.assertAllEqual(np.array([2, 1, 1, 2]), concat_graph.n_edge)
if "senders" not in none_fields:
# [1, 2], [1], [1], [1, 2] and 3, 2, 2, 3 nodes
# So we are summing [1, 2, 1, 1, 2] with [0, 0, 3, 5, 7, 7]
self.assertAllEqual(np.array([1, 2, 4, 6, 8, 9]), concat_graph.senders)
if "receivers" not in none_fields:
# [0, 0], [0], [0], [0, 0] and 3, 2, 2, 3 nodes
# So we are summing [0, 0, 0, 0, 0, 0] with [0, 0, 3, 5, 7, 7]
self.assertAllEqual(np.array([0, 0, 3, 5, 7, 7]), concat_graph.receivers)
if "globals" not in none_fields:
self.assertAllEqual(np.array([[0], [1], [2], [3]]), concat_graph.globals)
def test_nested_features(self):
graph_0 = utils_np.networkxs_to_graphs_tuple(
[_generate_graph(0, 3), _generate_graph(1, 2)])
graph_1 = utils_np.networkxs_to_graphs_tuple([_generate_graph(2, 2)])
graph_2 = utils_np.networkxs_to_graphs_tuple([_generate_graph(3, 3)])
graphs_ = [
gr.map(tf.convert_to_tensor, graphs.ALL_FIELDS)
for gr in [graph_0, graph_1, graph_2]
]
def _create_nested_fields(graphs_tuple):
new_nodes = ({"a": graphs_tuple.nodes,
"b": [graphs_tuple.nodes + 1,
graphs_tuple.nodes + 2]
},)
new_edges = [{"c": graphs_tuple.edges + 5,
"d": (graphs_tuple.edges + 1,
graphs_tuple.edges + 3),
}]
new_globals = []
return graphs_tuple.replace(nodes=new_nodes,
edges=new_edges,
globals=new_globals)
graphs_ = [_create_nested_fields(gr) for gr in graphs_]
concat_graph = utils_tf.concat(graphs_, axis=0)
actual_nodes = concat_graph.nodes
actual_edges = concat_graph.edges
actual_globals = concat_graph.globals
expected_nodes = tree.map_structure(
lambda *x: tf.concat(x, axis=0), *[gr.nodes for gr in graphs_])
expected_edges = tree.map_structure(
lambda *x: tf.concat(x, axis=0), *[gr.edges for gr in graphs_])
expected_globals = tree.map_structure(
lambda *x: tf.concat(x, axis=0), *[gr.globals for gr in graphs_])
tree.assert_same_structure(expected_nodes, actual_nodes)
tree.assert_same_structure(expected_edges, actual_edges)
tree.assert_same_structure(expected_globals, actual_globals)
tree.map_structure(self.assertAllEqual, expected_nodes, actual_nodes)
tree.map_structure(self.assertAllEqual, expected_edges, actual_edges)
tree.map_structure(self.assertAllEqual, expected_globals, actual_globals)
# Borrowed from `test_concat_first_axis`:
self.assertAllEqual(np.array([3, 2, 2, 3]), concat_graph.n_node)
self.assertAllEqual(np.array([2, 1, 1, 2]), concat_graph.n_edge)
self.assertAllEqual(np.array([1, 2, 4, 6, 8, 9]), concat_graph.senders)
self.assertAllEqual(np.array([0, 0, 3, 5, 7, 7]), concat_graph.receivers)
def test_concat_last_axis(self):
graph0 = utils_np.networkxs_to_graphs_tuple(
[_generate_graph(0, 3), _generate_graph(1, 2)])
graph1 = utils_np.networkxs_to_graphs_tuple(
[_generate_graph(2, 3), _generate_graph(3, 2)])
graph0 = graph0.map(tf.convert_to_tensor, graphs.ALL_FIELDS)
graph1 = graph1.map(tf.convert_to_tensor, graphs.ALL_FIELDS)
concat_graph = utils_tf.concat([graph0, graph1], axis=-1)
self.assertAllEqual(
np.array([[0, 0, 0, 2], [1, 0, 1, 2], [2, 0, 2, 2], [0, 1, 0, 3],
[1, 1, 1, 3]]), concat_graph.nodes)
self.assertAllEqual(
np.array([[0, 1, 0, 0, 1, 2], [1, 2, 0, 1, 2, 2], [0, 1, 1, 0, 1, 3]]),
concat_graph.edges)
self.assertAllEqual(np.array([3, 2]), concat_graph.n_node)
self.assertAllEqual(np.array([2, 1]), concat_graph.n_edge)
self.assertAllEqual(np.array([1, 2, 4]), concat_graph.senders)
self.assertAllEqual(np.array([0, 0, 3]), concat_graph.receivers)
self.assertAllEqual(np.array([[0, 2], [1, 3]]), concat_graph.globals)
@parameterized.parameters(
("nodes"),
("edges"),
("globals"),
)
def test_raise_all_or_no_nones(self, none_field):
graph_0 = utils_np.networkxs_to_graphs_tuple(
[_generate_graph(0, 3), _generate_graph(1, 2)])
graph_1 = utils_np.networkxs_to_graphs_tuple([_generate_graph(2, 2)])
graph_2 = utils_np.networkxs_to_graphs_tuple([_generate_graph(3, 3)])
graphs_ = [
gr.map(tf.convert_to_tensor, graphs.ALL_FIELDS)
for gr in [graph_0, graph_1, graph_2]
]
graphs_[1] = graphs_[1].replace(**{none_field: None})
with self.assertRaisesRegex(
ValueError,
"Different set of keys found when iterating over data dictionaries."):
utils_tf.concat(graphs_, axis=0)
class StopGradientsGraphTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(StopGradientsGraphTest, self).setUp()
self._graph = utils_tf.data_dicts_to_graphs_tuple([{
"senders": tf.zeros([10], dtype=tf.int32),
"receivers": tf.zeros([10], dtype=tf.int32),
"nodes": tf.ones([5, 7]),
"edges": tf.zeros([10, 6]),
"globals": tf.zeros([1, 8])
}])
def _check_if_gradients_exist(self, stopped_gradients_graph):
gradients = []
for field in ["globals", "nodes", "edges"]:
with tf.GradientTape() as tape:
xs = getattr(self._graph, field)
ys = getattr(stopped_gradients_graph, field)
gradient = tape.gradient(ys, xs) if ys is not None else ys
gradients.append(gradient)
return [True if grad is not None else False for grad in gradients]
@parameterized.named_parameters(
("stop_all_fields", True, True, True),
("stop_globals", True, False, False), ("stop_nodes", False, True, False),
("stop_edges", False, False, True), ("stop_none", False, False, False))
def test_stop_gradients_outputs(self, stop_globals, stop_nodes, stop_edges):
stopped_gradients_graph = utils_tf.stop_gradient(
self._graph,
stop_globals=stop_globals,
stop_nodes=stop_nodes,
stop_edges=stop_edges)
gradients_exist = self._check_if_gradients_exist(stopped_gradients_graph)
expected_gradients_exist = [
not stop_globals, not stop_nodes, not stop_edges
]
self.assertAllEqual(expected_gradients_exist, gradients_exist)
@parameterized.named_parameters(("no_nodes", "nodes"), ("no_edges", "edges"),
("no_globals", "globals"))
def test_stop_gradients_with_missing_field_raises(self, none_field):
self._graph = self._graph.map(lambda _: None, [none_field])
with self.assertRaisesRegex(ValueError, none_field):
utils_tf.stop_gradient(self._graph)
def test_stop_gradients_default_params(self):
"""Tests for the default params of `utils_tf.stop_gradient`."""
stopped_gradients_graph = utils_tf.stop_gradient(self._graph)
gradients_exist = self._check_if_gradients_exist(stopped_gradients_graph)
expected_gradients_exist = [False, False, False]
self.assertAllEqual(expected_gradients_exist, gradients_exist)
class IdentityTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for the `identity` method."""
def setUp(self):
super(IdentityTest, self).setUp()
self._graph = utils_tf.data_dicts_to_graphs_tuple([{
"senders": tf.random.uniform([10], maxval=10, dtype=tf.int32),
"receivers": tf.random.uniform([10], maxval=10, dtype=tf.int32),
"nodes": tf.random.uniform([5, 7]),
"edges": tf.random.uniform([10, 6]),
"globals": tf.random.uniform([1, 8])
}])
def test_name_scope(self):
"""Tests that the name scope are correctly pushed through this function."""
self.skipTest("Tensor.name is meaningless when eager execution is enabled")
@parameterized.named_parameters(
("all fields defined", []), ("no node features", ["nodes"]),
("no edge features", ["edges"]), ("no global features", ["globals"]),
("no edges", ["edges", "receivers", "senders"]))
def test_output(self, none_fields):
"""Tests that this function produces the identity."""
graph = self._graph.map(lambda _: None, none_fields)
with tf.name_scope("test"):
graph_id = utils_tf.identity(graph)
expected_out = utils_tf.nest_to_numpy(graph)
actual_out = utils_tf.nest_to_numpy(graph_id)
for field in [
"nodes", "edges", "globals", "receivers", "senders", "n_node", "n_edge"
]:
if field in none_fields:
self.assertIsNone(getattr(actual_out, field))
else:
self.assertNDArrayNear(
getattr(expected_out, field), getattr(actual_out, field), err=1e-4)
class RunGraphWithNoneTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(RunGraphWithNoneTest, self).setUp()
self._graph = utils_tf.data_dicts_to_graphs_tuple([{
"senders": tf.random.uniform([10], maxval=10, dtype=tf.int32),
"receivers": tf.random.uniform([10], maxval=10, dtype=tf.int32),
"nodes": tf.random.uniform([5, 7]),
"edges": tf.random.uniform([10, 6]),
"globals": tf.random.uniform([1, 8])
}])
@parameterized.named_parameters(
("all fields defined", []), ("no node features", ["nodes"]),
("no edge features", ["edges"]), ("no global features", ["globals"]),
("no edges", ["edges", "receivers", "senders"]))
def test_output(self, none_fields):
"""Tests that this function produces the identity."""
graph_id = self._graph.map(lambda _: None, none_fields)
graph = graph_id.map(tf.no_op, none_fields)
expected_out = graph
actual_out = graph_id
for field in [
"nodes", "edges", "globals", "receivers", "senders", "n_node", "n_edge"
]:
if field in none_fields:
self.assertIsNone(getattr(actual_out, field))
else:
self.assertNDArrayNear(
getattr(expected_out, field), getattr(actual_out, field), err=1e-4)
class ComputeOffsetTest(tf.test.TestCase):
"""Tests for the `compute_stacked_offsets` method."""
def setUp(self):
super(ComputeOffsetTest, self).setUp()
self.sizes = [5, 4, 3, 1, 2, 0, 3, 0, 4, 7]
self.repeats = [2, 2, 0, 2, 1, 3, 2, 0, 3, 2]
self.offset = [
0, 0, 5, 5, 12, 12, 13, 15, 15, 15, 15, 15, 18, 18, 18, 22, 22
]
def test_compute_stacked_offsets(self):
offset0 = utils_tf._compute_stacked_offsets(
self.sizes, self.repeats)
offset1 = utils_tf._compute_stacked_offsets(
np.array(self.sizes), np.array(self.repeats))
offset2 = utils_tf._compute_stacked_offsets(
tf.constant(self.sizes, dtype=tf.int32),
tf.constant(self.repeats, dtype=tf.int32))
self.assertAllEqual(self.offset, offset0.numpy().tolist())
self.assertAllEqual(self.offset, offset1.numpy().tolist())
self.assertAllEqual(self.offset, offset2.numpy().tolist())
class DataDictsCompletionTests(test_utils.GraphsTest, parameterized.TestCase):
"""Tests for the methods creating complete graphs from partial graphs."""
def _assert_indices_sizes(self, dict_, n_relation):
for key in ["receivers", "senders"]:
self.assertAllEqual((n_relation,), dict_[key].get_shape().as_list())
@parameterized.named_parameters(
("static", utils_tf._create_complete_edges_from_nodes_static),
("dynamic", utils_tf._create_complete_edges_from_nodes_dynamic),
)
def test_create_complete_edges_from_nodes_include_self_edges(self, method):
for graph_dict in self.graphs_dicts_in:
n_node = graph_dict["nodes"].shape[0]
edges_dict = method(n_node, exclude_self_edges=False)
self._assert_indices_sizes(edges_dict, n_node**2)
@parameterized.named_parameters(
("static", utils_tf._create_complete_edges_from_nodes_static),
("dynamic", utils_tf._create_complete_edges_from_nodes_dynamic),
)
def test_create_complete_edges_from_nodes_exclude_self_edges(self, method):
for graph_dict in self.graphs_dicts_in:
n_node = graph_dict["nodes"].shape[0]
edges_dict = method(n_node, exclude_self_edges=True)
self._assert_indices_sizes(edges_dict, n_node * (n_node - 1))
def test_create_complete_edges_from_nodes_dynamic_number_of_nodes(self):
for graph_dict in self.graphs_dicts_in:
n_node = tf.shape(tf.constant(graph_dict["nodes"]))[0]
edges_dict = utils_tf._create_complete_edges_from_nodes_dynamic(
n_node, exclude_self_edges=False)
n_relation = n_node**2
receivers = edges_dict["receivers"].numpy()
senders = edges_dict["senders"].numpy()
n_edge = edges_dict["n_edge"].numpy()
self.assertAllEqual((n_relation,), receivers.shape)
self.assertAllEqual((n_relation,), senders.shape)
self.assertEqual(n_relation, n_edge)
class GraphsCompletionTests(test_utils.GraphsTest, parameterized.TestCase):
"""Tests for completing partial GraphsTuple."""
def _assert_indices_sizes(self, graph, n_relation):
for key in ["receivers", "senders"]:
self.assertAllEqual((n_relation,),
getattr(graph, key).get_shape().as_list())
@parameterized.named_parameters(("edge size 0", 0), ("edge size 1", 1))
def test_fill_edge_state(self, edge_size):
"""Tests for filling the edge state with a constant content."""
for g in self.graphs_dicts_in:
g.pop("edges")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
n_edges = np.sum(self.reference_graph.n_edge)
graphs_tuple = utils_tf.set_zero_edge_features(graphs_tuple, edge_size)
self.assertAllEqual((n_edges, edge_size),
graphs_tuple.edges.get_shape().as_list())
@parameterized.named_parameters(("edge size 0", 0), ("edge size 1", 1))
def test_fill_edge_state_dynamic(self, edge_size):
"""Tests for filling the edge state with a constant content."""
for g in self.graphs_dicts_in:
g.pop("edges")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs_tuple = graphs_tuple._replace(
n_edge=tf.constant(
graphs_tuple.n_edge, shape=graphs_tuple.n_edge.get_shape()))
n_edges = np.sum(self.reference_graph.n_edge)
graphs_tuple = utils_tf.set_zero_edge_features(graphs_tuple, edge_size)
actual_edges = graphs_tuple.edges
self.assertNDArrayNear(
np.zeros((n_edges, edge_size)), actual_edges, err=1e-4)
@parameterized.named_parameters(("global size 0", 0), ("global size 1", 1))
def test_fill_global_state(self, global_size):
"""Tests for filling the global state with a constant content."""
for g in self.graphs_dicts_in:
g.pop("globals")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
n_graphs = self.reference_graph.n_edge.shape[0]
graphs_tuple = utils_tf.set_zero_global_features(graphs_tuple, global_size)
self.assertAllEqual((n_graphs, global_size),
graphs_tuple.globals.get_shape().as_list())
@parameterized.named_parameters(("global size 0", 0), ("global size 1", 1))
def test_fill_global_state_dynamic(self, global_size):
"""Tests for filling the global state with a constant content."""
for g in self.graphs_dicts_in:
g.pop("globals")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
# Hide global shape information
graphs_tuple = graphs_tuple._replace(
n_node=tf.constant(
graphs_tuple.n_node, shape=graphs_tuple.n_edge.get_shape()))
n_graphs = self.reference_graph.n_edge.shape[0]
graphs_tuple = utils_tf.set_zero_global_features(graphs_tuple, global_size)
actual_globals = graphs_tuple.globals.numpy()
self.assertNDArrayNear(
np.zeros((n_graphs, global_size)), actual_globals, err=1e-4)
@parameterized.named_parameters(("node size 0", 0), ("node size 1", 1))
def test_fill_node_state(self, node_size):
"""Tests for filling the node state with a constant content."""
for g in self.graphs_dicts_in:
g["n_node"] = g["nodes"].shape[0]
g.pop("nodes")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
n_nodes = np.sum(self.reference_graph.n_node)
graphs_tuple = utils_tf.set_zero_node_features(graphs_tuple, node_size)
self.assertAllEqual((n_nodes, node_size),
graphs_tuple.nodes.get_shape().as_list())
@parameterized.named_parameters(("node size 0", 0), ("node size 1", 1))
def test_fill_node_state_dynamic(self, node_size):
"""Tests for filling the node state with a constant content."""
for g in self.graphs_dicts_in:
g["n_node"] = g["nodes"].shape[0]
g.pop("nodes")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs_tuple = graphs_tuple._replace(
n_node=tf.constant(
graphs_tuple.n_node, shape=graphs_tuple.n_node.get_shape()))
n_nodes = np.sum(self.reference_graph.n_node)
graphs_tuple = utils_tf.set_zero_node_features(graphs_tuple, node_size)
actual_nodes = graphs_tuple.nodes.numpy()
self.assertNDArrayNear(
np.zeros((n_nodes, node_size)), actual_nodes, err=1e-4)
def test_fill_edge_state_with_missing_fields_raises(self):
"""Edge field cannot be filled if receivers or senders are missing."""
for g in self.graphs_dicts_in:
g.pop("receivers")
g.pop("senders")
g.pop("edges")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
with self.assertRaisesRegex(ValueError, "receivers"):
graphs_tuple = utils_tf.set_zero_edge_features(graphs_tuple, edge_size=1)
def test_fill_state_default_types(self):
"""Tests that the features are created with the correct default type."""
for g in self.graphs_dicts_in:
g.pop("nodes")
g.pop("globals")
g.pop("edges")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs_tuple = utils_tf.set_zero_edge_features(graphs_tuple, edge_size=1)
graphs_tuple = utils_tf.set_zero_node_features(graphs_tuple, node_size=1)
graphs_tuple = utils_tf.set_zero_global_features(
graphs_tuple, global_size=1)
self.assertEqual(tf.float32, graphs_tuple.edges.dtype)
self.assertEqual(tf.float32, graphs_tuple.nodes.dtype)
self.assertEqual(tf.float32, graphs_tuple.globals.dtype)
@parameterized.parameters(
(tf.float64,),
(tf.int32,),
)
def test_fill_state_user_specified_types(self, dtype):
"""Tests that the features are created with the correct default type."""
for g in self.graphs_dicts_in:
g.pop("nodes")
g.pop("globals")
g.pop("edges")
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs_tuple = utils_tf.set_zero_edge_features(graphs_tuple, 1, dtype)
graphs_tuple = utils_tf.set_zero_node_features(graphs_tuple, 1, dtype)
graphs_tuple = utils_tf.set_zero_global_features(graphs_tuple, 1, dtype)
self.assertEqual(dtype, graphs_tuple.edges.dtype)
self.assertEqual(dtype, graphs_tuple.nodes.dtype)
self.assertEqual(dtype, graphs_tuple.globals.dtype)
@parameterized.named_parameters(
("no self edges", False),
("self edges", True),
)
def test_fully_connect_graph_dynamic(self, exclude_self_edges):
for g in self.graphs_dicts_in:
g.pop("edges")
g.pop("receivers")
g.pop("senders")
n_relation = 0
for g in self.graphs_dicts_in:
n_node = g["nodes"].shape[0]
if exclude_self_edges:
n_relation += n_node * (n_node - 1)
else:
n_relation += n_node * n_node
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs_tuple = utils_tf.fully_connect_graph_dynamic(graphs_tuple,
exclude_self_edges)
actual_receivers = graphs_tuple.receivers.numpy()
actual_senders = graphs_tuple.senders.numpy()
self.assertAllEqual((n_relation,), actual_receivers.shape)
self.assertAllEqual((n_relation,), actual_senders.shape)
self.assertAllEqual((len(self.graphs_dicts_in),),
graphs_tuple.n_edge.get_shape().as_list())
@parameterized.named_parameters(
("no self edges", False),
("self edges", True),
)
def test_fully_connect_graph_dynamic_with_dynamic_sizes(
self, exclude_self_edges):
for g in self.graphs_dicts_in:
g.pop("edges")
g.pop("receivers")
g.pop("senders")
n_relation = 0
for g in self.graphs_dicts_in:
n_node = g["nodes"].shape[0]
if exclude_self_edges:
n_relation += n_node * (n_node - 1)
else:
n_relation += n_node * n_node
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs_tuple = graphs_tuple.map(test_utils.mask_leading_dimension,
["nodes", "globals", "n_node", "n_edge"])
graphs_tuple = utils_tf.fully_connect_graph_dynamic(graphs_tuple,
exclude_self_edges)
actual_receivers = graphs_tuple.receivers.numpy()
actual_senders = graphs_tuple.senders.numpy()
actual_n_edge = graphs_tuple.n_edge.numpy()
self.assertAllEqual((n_relation,), actual_receivers.shape)
self.assertAllEqual((n_relation,), actual_senders.shape)
self.assertAllEqual((len(self.graphs_dicts_in),), actual_n_edge.shape)
expected_edges = []
offset = 0
for graph in self.graphs_dicts_in:
n_node = graph["nodes"].shape[0]
for e1 in range(n_node):
for e2 in range(n_node):
if not exclude_self_edges or e1 != e2:
expected_edges.append((e1 + offset, e2 + offset))
offset += n_node
actual_edges = zip(actual_receivers, actual_senders)
self.assertSetEqual(set(actual_edges), set(expected_edges))
class GraphsTupleConversionTests(test_utils.GraphsTest, parameterized.TestCase):
"""Tests for the method converting between data dicts and GraphsTuple."""
@parameterized.named_parameters(("all fields defined", []), (
"no edge features",
["edges"],
), (
"no node features",
["nodes"],
), (
"no globals",
["globals"],
), (
"no edges",
["edges", "receivers", "senders"],
))
def test_data_dicts_to_graphs_tuple(self, none_fields):
"""Fields in `none_fields` will be cleared out."""
for field in none_fields:
for graph_dict in self.graphs_dicts_in:
if field in graph_dict:
if field == "nodes":
graph_dict["n_node"] = graph_dict["nodes"].shape[0]
graph_dict[field] = None
self.reference_graph = self.reference_graph._replace(**{field: None})
if field == "senders":
self.reference_graph = self.reference_graph._replace(
n_edge=np.zeros_like(self.reference_graph.n_edge))
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
for field in none_fields:
self.assertIsNone(getattr(graphs_tuple, field))
graphs_tuple = graphs_tuple.map(tf.no_op, none_fields)
self._assert_graph_equals_np(self.reference_graph, graphs_tuple)
@parameterized.parameters(("receivers",), ("senders",))
def test_data_dicts_to_graphs_tuple_raises(self, none_field):
"""Fields that cannot be missing."""
for graph_dict in self.graphs_dicts_in:
graph_dict[none_field] = None
with self.assertRaisesRegex(ValueError, none_field):
utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
def test_data_dicts_to_graphs_tuple_no_raise(self):
"""Not having nodes is fine, if the number of nodes is provided."""
for graph_dict in self.graphs_dicts_in:
graph_dict["n_node"] = graph_dict["nodes"].shape[0]
graph_dict["nodes"] = None
utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
def test_data_dicts_to_graphs_tuple_cast_types(self):
"""Index and number fields should be cast to tensors of the right type."""
for graph_dict in self.graphs_dicts_in:
graph_dict["n_node"] = np.array(
graph_dict["nodes"].shape[0], dtype=np.int64)
graph_dict["receivers"] = graph_dict["receivers"].astype(np.int16)
graph_dict["senders"] = graph_dict["senders"].astype(np.float64)
graph_dict["nodes"] = graph_dict["nodes"].astype(np.float64)
graph_dict["edges"] = tf.constant(graph_dict["edges"], dtype=tf.float64)
out = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
for key in ["n_node", "n_edge", "receivers", "senders"]:
self.assertEqual(tf.int32, getattr(out, key).dtype)
self.assertEqual(type(tf.int32), type(getattr(out, key).dtype))
for key in ["nodes", "edges"]:
self.assertEqual(type(tf.float64), type(getattr(out, key).dtype))
self.assertEqual(tf.float64, getattr(out, key).dtype)
class GraphsIndexingTests(test_utils.GraphsTest, parameterized.TestCase):
"""Tests for the `get_graph` method."""
@parameterized.named_parameters(("int_index", False),
("tensor_index", True))
def test_getitem_one(self, use_tensor_index):
index = 2
expected = self.graphs_dicts_out[index]
if use_tensor_index:
index = tf.constant(index)
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graph = utils_tf.get_graph(graphs_tuple, index)
graph = utils_tf.nest_to_numpy(graph)
actual, = utils_np.graphs_tuple_to_data_dicts(graph)
for k, v in expected.items():
self.assertAllClose(v, actual[k])
self.assertEqual(expected["nodes"].shape[0], actual["n_node"])
self.assertEqual(expected["edges"].shape[0], actual["n_edge"])
@parameterized.named_parameters(("int_slice", False),
("tensor_slice", True))
def test_getitem(self, use_tensor_slice):
index = slice(1, 3)
expected = self.graphs_dicts_out[index]
if use_tensor_slice:
index = slice(tf.constant(index.start), tf.constant(index.stop))
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
graphs2 = utils_tf.get_graph(graphs_tuple, index)
graphs2 = utils_tf.nest_to_numpy(graphs2)
actual = utils_np.graphs_tuple_to_data_dicts(graphs2)
for ex, ac in zip(expected, actual):
for k, v in ex.items():
self.assertAllClose(v, ac[k])
self.assertEqual(ex["nodes"].shape[0], ac["n_node"])
self.assertEqual(ex["edges"].shape[0], ac["n_edge"])
@parameterized.named_parameters(
("index_bad_type", 1.,
TypeError, "Index must be a valid scalar integer", False, False),
("index_bad_shape", [0, 1],
TypeError, "Valid tensor indices must be scalars", True, False),
("index_bad_dtype", 1.,
TypeError, "Valid tensor indices must have types", True, False),
("slice_bad_type_stop", 1.,
TypeError, "Valid tensor indices must be integers", False, True),
("slice_bad_shape_stop", [0, 1],
TypeError, "Valid tensor indices must be scalars", True, True),
("slice_bad_dtype_stop", 1.,
TypeError, "Valid tensor indices must have types", True, True),
("slice_bad_type_start", slice(0., 1),
TypeError, "Valid tensor indices must be integers", False, False),
("slice_with_step", slice(0, 1, 1),
ValueError, "slices with step/stride are not supported", False, False),
)
def test_raises(self, index, error_type, message, use_constant, use_slice):
if use_constant:
index = tf.constant(index)
if use_slice:
index = slice(index)
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
with self.assertRaisesRegex(error_type, message):
utils_tf.get_graph(graphs_tuple, index)
class TestNumGraphs(test_utils.GraphsTest):
"""Tests for the `get_num_graphs` function."""
def setUp(self):
super(TestNumGraphs, self).setUp()
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(self.graphs_dicts_in)
self.empty_graph = graphs_tuple.map(lambda _: None,
graphs.GRAPH_DATA_FIELDS)
def test_num_graphs(self):
graph = self.empty_graph.replace(n_node=tf.zeros([3], dtype=tf.int32))
self.assertEqual(3, utils_tf.get_num_graphs(graph))
class TestNestToNumpy(test_utils.GraphsTest):
"""Test that graph with tf.Tensor fields get converted to numpy."""
def setUp(self):
super(TestNestToNumpy, self).setUp()
self._graph = utils_tf.data_dicts_to_graphs_tuple([{
"senders": tf.random.uniform([10], maxval=10, dtype=tf.int32),
"receivers": tf.random.uniform([10], maxval=10, dtype=tf.int32),
"nodes": tf.random.uniform([5, 7]),
"edges": tf.random.uniform([10, 6]),
"globals": tf.random.uniform([1, 8])
}])
def test_single_graph(self):
numpy_graph = utils_tf.nest_to_numpy(self._graph)
for field in graphs.ALL_FIELDS:
self.assertIsInstance(getattr(numpy_graph, field), np.ndarray)
self.assertNDArrayNear(
getattr(self._graph, field).numpy(),
getattr(numpy_graph, field), 1e-8)
def test_mixed_graph_conversion(self):
graph = self._graph.replace(nodes=None)
graph = graph.map(lambda x: x.numpy(), ["edges"])
converted_graph = utils_tf.nest_to_numpy(graph)
self.assertIsNone(converted_graph.nodes)
self.assertIsInstance(converted_graph.edges, np.ndarray)
def test_nested_structure(self):
regular_graph = self._graph
graph_with_nested_fields = regular_graph.map(
lambda x: {"a": x, "b": tf.random.uniform([4, 6])})
nested_structure = [
None,
regular_graph,
(graph_with_nested_fields,),
tf.random.uniform([10, 6])]
nested_structure_numpy = utils_tf.nest_to_numpy(nested_structure)
tree.assert_same_structure(nested_structure, nested_structure_numpy)
for tensor_or_none, array_or_none in zip(
tree.flatten(nested_structure),
tree.flatten(nested_structure_numpy)):
if tensor_or_none is None:
self.assertIsNone(array_or_none)
continue
self.assertIsNotNone(array_or_none)
self.assertNDArrayNear(
tensor_or_none.numpy(),
array_or_none, 1e-8)
def _leading_static_shape(input_nest):
return tree.flatten(input_nest)[0].shape.as_list()[0]
def _compile_with_tf_function(fn, graphs_tuple):
input_signature = utils_tf.specs_from_graphs_tuple(
graphs_tuple,
dynamic_num_graphs=True,
dynamic_num_nodes=True,
dynamic_num_edges=True,)
@functools.partial(tf.function, input_signature=[input_signature])
def compiled_fn(graphs_tuple):
assert _leading_static_shape(graphs_tuple.n_node) is None
assert _leading_static_shape(graphs_tuple.senders) is None
assert _leading_static_shape(graphs_tuple.nodes) is None
return fn(graphs_tuple)
return compiled_fn
class GraphsTupleSizeTest(tf.test.TestCase, parameterized.TestCase):
def test_get_graphs_tuple_size(self):
data_dict = test_utils_tf1.generate_random_data_dict(
(1,), (1,), (1,),
num_nodes_range=(10, 15),
num_edges_range=(20, 25))
node_size_np = data_dict["nodes"].shape[0]
edge_size_np = data_dict["edges"].shape[0]
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(2 * [data_dict])
# Put it into a tf.function so the shapes are unknown statically.
compiled_fn = _compile_with_tf_function(
utils_tf.get_graphs_tuple_size, graphs_tuple)
graphs_tuple_size = compiled_fn(graphs_tuple)
node_size, edge_size, graph_size = graphs_tuple_size
self.assertEqual(node_size.numpy(), node_size_np * 2)
self.assertEqual(edge_size.numpy(), edge_size_np * 2)
self.assertEqual(graph_size.numpy(), 2)
class MaskTest(tf.test.TestCase, parameterized.TestCase):
def test_get_mask(self):
mask = utils_tf.get_mask(10, 12)
self.assertAllClose(mask, np.concatenate((np.ones(10), np.zeros(2))))
# If the padding is smaller than the mask, get all size of padding.
mask = utils_tf.get_mask(10, 8)
self.assertAllClose(mask, np.ones(8, dtype=bool))
mask = utils_tf.get_mask(tf.constant(10), 12)
self.assertAllClose(mask, np.concatenate((np.ones(10, dtype=bool),
np.zeros(2, dtype=bool))))
mask = utils_tf.get_mask(tf.constant(10), tf.constant(12))
self.assertAllClose(mask, np.concatenate((np.ones(10, dtype=bool),
np.zeros(2, dtype=bool))))
class PaddingTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
("standard", False, False),
("standard_nested_features", False, True),
("experimental_unconnected_padding_edges", True, False),
)
def test_add_remove_padding(
self, experimental_unconnected_padding_edges, nested_features):
data_dict = test_utils_tf1.generate_random_data_dict(
(7,), (8,), (9,),
num_nodes_range=(10, 15),
num_edges_range=(20, 25))
node_size_np = data_dict["nodes"].shape[0]
edge_size_np = data_dict["edges"].shape[0]
unpadded_batch_size = 2
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(
unpadded_batch_size * [data_dict])
if nested_features:
graphs_tuple = graphs_tuple.replace(
edges=[graphs_tuple.edges, {}],
nodes=({"tensor": graphs_tuple.nodes},),
globals=([], graphs_tuple.globals,))
num_padding_nodes = 3
num_padding_edges = 4
num_padding_graphs = 5
pad_nodes_to = unpadded_batch_size * node_size_np + num_padding_nodes
pad_edges_to = unpadded_batch_size * edge_size_np + num_padding_edges
pad_graphs_to = unpadded_batch_size + num_padding_graphs
def _get_padded_and_recovered_graphs_tuple(graphs_tuple):
padded_graphs_tuple = utils_tf.pad_graphs_tuple(
graphs_tuple,
pad_nodes_to,
pad_edges_to,
pad_graphs_to,
experimental_unconnected_padding_edges)
# Check that we have statically defined shapes after padding.
self.assertEqual(
_leading_static_shape(padded_graphs_tuple.nodes), pad_nodes_to)
self.assertEqual(
_leading_static_shape(padded_graphs_tuple.edges), pad_edges_to)
self.assertEqual(
_leading_static_shape(padded_graphs_tuple.senders), pad_edges_to)
self.assertEqual(
_leading_static_shape(padded_graphs_tuple.receivers), pad_edges_to)
self.assertEqual(
_leading_static_shape(padded_graphs_tuple.globals), pad_graphs_to)
self.assertEqual(
_leading_static_shape(padded_graphs_tuple.n_node), pad_graphs_to)
self.assertEqual(
_leading_static_shape(padded_graphs_tuple.n_edge), pad_graphs_to)
# Check that we can remove the padding.
graphs_tuple_size = utils_tf.get_graphs_tuple_size(graphs_tuple)
recovered_graphs_tuple = utils_tf.remove_graphs_tuple_padding(
padded_graphs_tuple, graphs_tuple_size)
return padded_graphs_tuple, recovered_graphs_tuple
# Put it into a tf.function so the shapes are unknown statically.
compiled_fn = _compile_with_tf_function(
_get_padded_and_recovered_graphs_tuple, graphs_tuple)
padded_graphs_tuple, recovered_graphs_tuple = compiled_fn(graphs_tuple)
if nested_features:
# Check that the whole structure of the outputs are the same.
tree.assert_same_structure(padded_graphs_tuple, graphs_tuple)
tree.assert_same_structure(recovered_graphs_tuple, graphs_tuple)
# Undo the nesting for the rest of the test.
def remove_nesting(this_graphs_tuple):
return this_graphs_tuple.replace(
edges=this_graphs_tuple.edges[0],
nodes=this_graphs_tuple.nodes[0]["tensor"],
globals=this_graphs_tuple.globals[1])
graphs_tuple = remove_nesting(graphs_tuple)
padded_graphs_tuple = remove_nesting(padded_graphs_tuple)
recovered_graphs_tuple = remove_nesting(recovered_graphs_tuple)
# Inspect the padded_graphs_tuple.
padded_graphs_tuple_data_dicts = utils_np.graphs_tuple_to_data_dicts(
utils_tf.nest_to_numpy(padded_graphs_tuple))
graphs_tuple_data_dicts = utils_np.graphs_tuple_to_data_dicts(
utils_tf.nest_to_numpy(graphs_tuple))
self.assertLen(padded_graphs_tuple, pad_graphs_to)
# Check that the first 2 graphs from the padded_graphs_tuple are the same.
for example_i in range(unpadded_batch_size):
tree.map_structure(
self.assertAllEqual,
graphs_tuple_data_dicts[example_i],
padded_graphs_tuple_data_dicts[example_i])
padding_data_dicts = padded_graphs_tuple_data_dicts[unpadded_batch_size:]
# Check that the third graph contains all of the padding nodes and edges.
for i, padding_data_dict in enumerate(padding_data_dicts):
# Only the first padding graph has nodes and edges.
num_nodes = num_padding_nodes if i == 0 else 0
num_edges = num_padding_edges if i == 0 else 0
self.assertAllEqual(padding_data_dict["globals"],
np.zeros([9], dtype=np.float32))
self.assertEqual(padding_data_dict["n_node"], num_nodes)
self.assertAllEqual(padding_data_dict["nodes"],
np.zeros([num_nodes, 7], dtype=np.float32))
self.assertEqual(padding_data_dict["n_edge"], num_edges)
self.assertAllEqual(padding_data_dict["edges"],
np.zeros([num_edges, 8], dtype=np.float32))
if experimental_unconnected_padding_edges:
self.assertAllEqual(padding_data_dict["receivers"],
np.zeros([num_edges], dtype=np.int32) + num_nodes)
self.assertAllEqual(padding_data_dict["senders"],
np.zeros([num_edges], dtype=np.int32) + num_nodes)
else:
self.assertAllEqual(padding_data_dict["receivers"],
np.zeros([num_edges], dtype=np.int32))
self.assertAllEqual(padding_data_dict["senders"],
np.zeros([num_edges], dtype=np.int32))
# Check that the recovered_graphs_tuple after removing padding is identical.
tree.map_structure(
self.assertAllEqual,
graphs_tuple._asdict(),
recovered_graphs_tuple._asdict())
@parameterized.parameters(
(None, False),
("edges", False),
("nodes", False),
("graphs", False),
(None, True),
("edges", True),
("nodes", True),
("graphs", True),
)
def test_raises_not_enough_space(
self, field_that_hits_limit, experimental_unconnected_padding_edges):
data_dict = test_utils_tf1.generate_random_data_dict(
(7,), (8,), (9,),
num_nodes_range=(10, 15),
num_edges_range=(20, 25))
node_size_np = data_dict["nodes"].shape[0]
edge_size_np = data_dict["edges"].shape[0]
unpadded_batch_size = 2
graphs_tuple = utils_tf.data_dicts_to_graphs_tuple(
unpadded_batch_size * [data_dict])
# Padding graph needs to have at least one graph, and at least one node,
# but should not need extra edges.
pad_edges_to = unpadded_batch_size * edge_size_np
pad_nodes_to = unpadded_batch_size * node_size_np + 1
pad_graphs_to = unpadded_batch_size + 1
if field_that_hits_limit == "edges":
pad_edges_to -= 1
elif field_that_hits_limit == "nodes":
pad_nodes_to -= 1
elif field_that_hits_limit == "graphs":
pad_graphs_to -= 1
def _get_padded_graphs_tuple(graphs_tuple):
return utils_tf.pad_graphs_tuple(
graphs_tuple,
pad_nodes_to,
pad_edges_to,
pad_graphs_to,
experimental_unconnected_padding_edges)
# Put it into a tf.function so the shapes are unknown statically.
compiled_fn = _compile_with_tf_function(
_get_padded_graphs_tuple, graphs_tuple)
if field_that_hits_limit is None:
# Should work if the test is not supposed to hit any limit.
compiled_fn(graphs_tuple)
else:
# Should raise an error.
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError,
"There is not enough space to pad the GraphsTuple"):
compiled_fn(graphs_tuple)
if __name__ == "__main__":
tf.test.main()
| graph_nets-master | graph_nets/tests_tf2/utils_tf_test.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Model architectures for the demos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from graph_nets import modules
from graph_nets import utils_tf
import sonnet as snt
NUM_LAYERS = 2 # Hard-code number of layers in the edge/node/global models.
LATENT_SIZE = 16 # Hard-code latent layer sizes for demos.
def make_mlp_model():
"""Instantiates a new MLP, followed by LayerNorm.
The parameters of each new MLP are not shared with others generated by
this function.
Returns:
A Sonnet module which contains the MLP and LayerNorm.
"""
return snt.Sequential([
snt.nets.MLP([LATENT_SIZE] * NUM_LAYERS, activate_final=True),
snt.LayerNorm()
])
class MLPGraphIndependent(snt.AbstractModule):
"""GraphIndependent with MLP edge, node, and global models."""
def __init__(self, name="MLPGraphIndependent"):
super(MLPGraphIndependent, self).__init__(name=name)
with self._enter_variable_scope():
self._network = modules.GraphIndependent(
edge_model_fn=make_mlp_model,
node_model_fn=make_mlp_model,
global_model_fn=make_mlp_model)
def _build(self, inputs):
return self._network(inputs)
class MLPGraphNetwork(snt.AbstractModule):
"""GraphNetwork with MLP edge, node, and global models."""
def __init__(self, name="MLPGraphNetwork"):
super(MLPGraphNetwork, self).__init__(name=name)
with self._enter_variable_scope():
self._network = modules.GraphNetwork(make_mlp_model, make_mlp_model,
make_mlp_model)
def _build(self, inputs):
return self._network(inputs)
class EncodeProcessDecode(snt.AbstractModule):
"""Full encode-process-decode model.
The model we explore includes three components:
- An "Encoder" graph net, which independently encodes the edge, node, and
global attributes (does not compute relations etc.).
- A "Core" graph net, which performs N rounds of processing (message-passing)
steps. The input to the Core is the concatenation of the Encoder's output
and the previous output of the Core (labeled "Hidden(t)" below, where "t" is
the processing step).
- A "Decoder" graph net, which independently decodes the edge, node, and
global attributes (does not compute relations etc.), on each message-passing
step.
Hidden(t) Hidden(t+1)
| ^
*---------* | *------* | *---------*
| | | | | | | |
Input --->| Encoder | *->| Core |--*->| Decoder |---> Output(t)
| |---->| | | |
*---------* *------* *---------*
"""
def __init__(self,
edge_output_size=None,
node_output_size=None,
global_output_size=None,
name="EncodeProcessDecode"):
super(EncodeProcessDecode, self).__init__(name=name)
self._encoder = MLPGraphIndependent()
self._core = MLPGraphNetwork()
self._decoder = MLPGraphIndependent()
# Transforms the outputs into the appropriate shapes.
if edge_output_size is None:
edge_fn = None
else:
edge_fn = lambda: snt.Linear(edge_output_size, name="edge_output")
if node_output_size is None:
node_fn = None
else:
node_fn = lambda: snt.Linear(node_output_size, name="node_output")
if global_output_size is None:
global_fn = None
else:
global_fn = lambda: snt.Linear(global_output_size, name="global_output")
with self._enter_variable_scope():
self._output_transform = modules.GraphIndependent(edge_fn, node_fn,
global_fn)
def _build(self, input_op, num_processing_steps):
latent = self._encoder(input_op)
latent0 = latent
output_ops = []
for _ in range(num_processing_steps):
core_input = utils_tf.concat([latent0, latent], axis=1)
latent = self._core(core_input)
decoded_op = self._decoder(latent)
output_ops.append(self._output_transform(decoded_op))
return output_ops
| graph_nets-master | graph_nets/demos/models.py |
# Copyright 2018 The GraphNets Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Graph networks library demos."""
| graph_nets-master | graph_nets/demos/__init__.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Install script for setuptools."""
from setuptools import find_packages
from setuptools import setup
setup(
name='causal_effect_bandits',
version='1.0',
description='Code reproducing the experiments of the paper'
'Malek, Chiappa. "Asymptotically Best Causal Effect Identification '
'with Multi-Armed Bandits", NeurIPS 2021.',
author='DeepMind',
author_email='[email protected]',
license='Apache License, Version 2.0',
url='https://github.com/deepmind/abcei_mab',
packages=find_packages(),
install_requires=[
'absl-py',
'numpy',
'matplotlib',
'scipy',
'sklearn',
'typing_extensions',
],
tests_require=['mock'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| abcei_mab-main | setup.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is needed to execute the simulations of
# Malek, Chiappa. "Asymptotically Best Causal Effect
# Identification with Multi-Armed Bandits." NeurIPS, 2021.
# https://proceedings.neurips.cc/paper/2021/...
# hash/b8102d1fa5df93e62cf26cd4400a0727-Abstract.html
"""Utilities for plotting."""
from typing import List, Optional
from causal_effect_bandits import arms
from causal_effect_bandits import bandits
import matplotlib.pyplot as plt
import numpy as np
def plot_bandit_results(ax,
arm_list: List[arms.VarianceEstimatorArm],
bdata: bandits.BanditData,
initial_points_to_ignore: Optional[int] = 0):
"""Plots a collection of variance estimates and confidence intervals.
Both are provided as dictionaries with List[float] values and
VarianceEstimatorArm obects ans keys.
Args:
ax: matplotlib axis object
arm_list: a list of varianceEstimatorArm objects to plot.
bdata: bandits.BanditData, as returned by BanditAlgorithm.run()
initial_points_to_ignore: number of initial rounds of the algorithm to omit
from the plot.
Returns:
matplotlib axis object.
"""
for arm in arm_list:
color = "tab:blue"
num_samples = bdata.cum_samples[initial_points_to_ignore:]
mid = bdata.var_est_by_arm[arm][initial_points_to_ignore:]
lower = bdata.lcb_by_arm[arm][initial_points_to_ignore:]
upper = bdata.ucb_by_arm[arm][initial_points_to_ignore:]
ax.plot(num_samples, mid, label=arm.name)
ax.plot(num_samples, lower, color=color, alpha=0.1)
ax.plot(num_samples, upper, color=color, alpha=0.1)
ax.fill_between(num_samples, lower, upper, alpha=0.2)
ax.set_xlabel("Samples")
ax.set_ylabel("Variance")
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.legend(loc="upper right")
return ax
def plot_3d_function(f, x_low, x_high, y_low, y_high):
"""Generates a 3d plot of f."""
n_points = 100
x = np.linspace(x_low, x_high, n_points)
y = np.linspace(y_low, y_high, n_points)
x, y = np.meshgrid(x, y)
z = f(np.c_[x.flatten(), y.flatten()])
z = np.reshape(z, (n_points, n_points))
ax = plt.axes(projection="3d")
ax.plot_surface(
x, y, z, rstride=1, cstride=1, cmap="viridis", edgecolor="none")
ax.set_xlabel("x")
ax.set_ylabel("y")
| abcei_mab-main | causal_effect_bandits/plotting_utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is needed to execute the simulations of
# Malek, Chiappa. "Asymptotically Best Causal Effect
# Identification with Multi-Armed Bandits." NeurIPS, 2021.
# https://proceedings.neurips.cc/paper/2021/...
# hash/b8102d1fa5df93e62cf26cd4400a0727-Abstract.html
"""Example SCMs used for unit tests."""
from causal_effect_bandits import scm
import numpy as np
def back_door_scm():
"""A common SCM used for many of the unit tests."""
var_names = ["Z", "X", "Y"]
cov_variables = ["Z"]
treatment_variable = "X"
response_variable = "Y"
### Define the parents
parents = {
"Z": [],
"X": ["Z"],
"Y": ["Z", "X"],
}
p = np.random.uniform(.2, .8)
beta = 10 * np.random.uniform(-.3, .3)
alpha = np.random.uniform(-.3, .3)
tau = 10
mu_z = p
var_z = p * (1 - p)
mu_x = .5 + alpha * p
var_x = mu_x * (1 - mu_x)
mu_y = tau * mu_x + beta * mu_z
var_y = 1 + tau**2 * var_x + beta**2 * var_z
# populate the CPDs
def y_cpd(_, parents):
return (np.random.normal(size=len(parents[0])) + tau * parents[1] +
beta * parents[0])
cpds = {
"Z": lambda n, parents: np.random.binomial(1, p, size=n),
"X": lambda n, parents: np.random.binomial(1, .5 + alpha * parents[0]),
"Y": y_cpd,
}
scm_gen = scm.SCM(
name="back-door example",
var_names=var_names,
parents=parents,
cpds=cpds,
cov_variables=cov_variables,
treatment_variable=treatment_variable,
response_variable=response_variable,
)
# Verify the means and variances
return (scm_gen, {
"tau": tau,
"mu_z": mu_z,
"var_z": var_z,
"mu_x": mu_x,
"var_x": var_x,
"mu_y": mu_y,
"var_y": var_y,
"beta": beta,
"alpha": alpha,
})
def frontdoor_scm():
"""A common SCM used for many of the unit tests."""
var_names = ["U", "X", "Z", "Y"]
cov_variables = ["U", "Z"]
treatment_variable = "X"
response_variable = "Y"
# Define the parents
parents = {
"U": [],
"X": ["U"],
"Z": ["X"],
"Y": ["Z", "U"],
}
p = np.random.uniform(.2, .8)
beta = np.random.uniform(-1, 1)
gamma = np.random.uniform(5, 10)
alpha_1 = np.random.uniform(-.3, .3)
alpha_2 = np.random.uniform(-.3, .3)
def g(x):
return 1 / (1 + np.exp(-x))
mu_u = p
var_u = p * (1 - p)
mu_x = p * g(alpha_1) + (1 - p) / 2
var_x = mu_x * (1 - mu_x)
mu_z = mu_x * g(beta) + (1 - mu_x) / 2
var_z = mu_z * (1 - mu_z)
mu_y = gamma * mu_z + alpha_2 * mu_u
tau = gamma * (g(beta) - .5) # calculated by hand
# Calculate the joint probabilities of X and u
pz1u1 = (g(beta) * g(alpha_1) + .5 * (1 - g(alpha_1))) * p
pz1u0 = (g(beta) / 2 + 1 / 4) * (1 - p)
pz0u1 = ((1 - g(beta)) * g(alpha_1) + (1 - g(alpha_1)) / 2) * p
# Take the expectation of Y^2 by hand.
mu_y2 = 1 + (gamma**2 +
alpha_2**2) * pz1u1 + gamma**2 * pz1u0 + alpha_2**2 * pz0u1
var_y = mu_y2 - mu_y**2
# populate the CPDs
def y_cpd(_, parents):
return (np.random.normal(size=len(parents[0])) + gamma * parents[0] +
alpha_2 * parents[1])
def x_cpd(n, parents):
return np.random.binomial(1, g(alpha_1 * parents[0]), size=n)
def z_cpd(n, parents):
return np.random.binomial(1, g(beta * parents[0]), size=n)
cpds = {
"U": lambda n, parents: np.random.binomial(1, p, size=n),
"X": x_cpd,
"Z": z_cpd,
"Y": y_cpd,
}
scm_gen = scm.SCM(
name="frontdoor example",
var_names=var_names,
parents=parents,
cpds=cpds,
cov_variables=cov_variables,
treatment_variable=treatment_variable,
response_variable=response_variable,
)
return (scm_gen, {
"tau": tau,
"mu_z": mu_z,
"var_z": var_z,
"mu_x": mu_x,
"var_x": var_x,
"mu_y": mu_y,
"var_y": var_y,
"mu_u": mu_u,
"var_u": var_u,
"alpha_1": alpha_1,
"alpha_2": alpha_2,
"beta": beta,
"gamma": gamma,
})
| abcei_mab-main | causal_effect_bandits/example_scm.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is needed to execute the simulations of
# Malek, Chiappa. "Asymptotically Best Causal Effect
# Identification with Multi-Armed Bandits." NeurIPS, 2021.
# https://proceedings.neurips.cc/paper/2021/...
# hash/b8102d1fa5df93e62cf26cd4400a0727-Abstract.html
"""Utilities for the unit tests."""
from typing import Optional
from causal_effect_bandits import arms
from causal_effect_bandits import data
import numpy as np
def run_single_arm(arm: arms.VarianceEstimatorArm,
data_gen: data.DataGenerator,
max_samples: int,
increment: int,
total_delta: Optional[float] = .05):
"""Runs a single variance estimator arm.
Recalculates the confidence intervals, ate, and variance estimators
for a sequence of datasets, where each dataset is the previous with
increment more samples.
Args:
arm: the arm to run
data_gen: the DataGenerator
max_samples: the number of samples used
increment: the number of samples generated between updates of the arm
total_delta: the total error for the confidence intervals of arm
Returns:
A dictionary with keys, values:
'LCBs': np.ndarray of lower confidence bounds for the dataset
sequence
'UCBs': np.ndarray of upper confidence bounds for the dataset
sequence
'var_estimates': np.ndarray of variance estimates for the dataset
sequence
'ATE_estimates': np.ndarray of ATE estimates for the dataset
sequence
'n_samples': np.ndarray of sizes of the dataset sequence
"""
lcbs = []
ucbs = []
var_estimates = []
ate_estimates = []
n_samples = []
current_n_samples = 0
arm_pulled = 0
while current_n_samples < max_samples:
current_n_samples += increment
n_samples.append(current_n_samples)
new_data = data_gen.generate(increment)
arm.update(new_data, delta=total_delta)
lcbs.append(arm.ci[0])
ucbs.append(arm.ci[1])
var_estimates.append(arm.var_est)
ate_estimates.append(arm.ate)
arm_pulled += 1
return {
'LCBs': lcbs,
'UCBs': ucbs,
'var_estimates': var_estimates,
'ATE_estmitates': ate_estimates,
'n_samples': n_samples
}
def estimate_true_variance(
data_gen: data.DataGenerator,
arm: arms.VarianceEstimatorArm,
n_samples: Optional[int] = 10000,
) -> float:
"""This method uses the true parameters to estimate the variance.
Calculates
E_n[(phi(W, eta) - tau)^2].
We use an empirical estimation to approximate the expected value,
at the true values of eta and tau.
Args:
data_gen: A data.DataGenerator used to produce the data
arm: arm used to estimate the variance
n_samples: the number of samples to generate
Returns:
A real number estimate of the true variance.
"""
new_data = data_gen.generate(n_samples)
arm.eta.set_to_truth(data_gen)
return np.var(arm.eta.calculate_score(new_data))
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
| abcei_mab-main | causal_effect_bandits/test_utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is needed to execute the simulations of
# Malek, Chiappa. "Asymptotically Best Causal Effect
# Identification with Multi-Armed Bandits." NeurIPS, 2021.
# https://proceedings.neurips.cc/paper/2021/...
# hash/b8102d1fa5df93e62cf26cd4400a0727-Abstract.html
"""Contains utilities needed to generate the paper's plots."""
from typing import List, Optional, Tuple
import warnings
from causal_effect_bandits import arms
from causal_effect_bandits import bandits
from causal_effect_bandits import data
from causal_effect_bandits import nonlinear_utils
from causal_effect_bandits import parameters
from causal_effect_bandits import scm
import numpy as np
import sklearn as sk
warnings.filterwarnings('ignore', category=sk.exceptions.ConvergenceWarning)
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def get_section_5_1_cpds(
z_dim: List[int],
v_dim: List[int],
num_back_door_paths: int,
seed: Optional[int] = None,
):
"""Returns the CPDs needed by this example.
Args:
z_dim: the dimension of Z
v_dim: the dimension of V
num_back_door_paths: the number of back_door paths, i.e. M
seed: the random seed
Returns:
A dictionary of functions, each representing a conditional
distsribution, indexed by the variable name strings.
"""
rng = np.random.default_rng(seed)
# Include a zero first index corresponding to z_0 and a placeholder for V_0.
# This fixes the indices so, e.g. mu_Vi = mu_V[i], beta_i = beta[i].
mu_v = [0]
cov_v = [0] # V_0 does not exist
mu_z = [.2 * rng.standard_normal(size=z_dim[0])]
cov_z = [.5 * np.identity(z_dim[0])]
alpha = [0]
gamma = []
mu_y = 0
cov_y = .1
beta = [.2 * rng.standard_normal(size=(z_dim[0]))]
for i in range(1, num_back_door_paths + 1):
strength = rng.uniform(.1, 1.9)
# V
mu_v.append(0 * rng.standard_normal(size=v_dim[i]))
cov_v.append(1 * np.identity(v_dim[i]))
# Z_i
mu_z.append(0 * rng.standard_normal(size=z_dim[i]))
cov_z.append(.1 * np.identity(z_dim[i]))
# Vi->X
gamma.append(strength * rng.standard_normal(size=v_dim[i]))
# Vi->Zi
alpha.append(.5 * strength * rng.standard_normal(size=(v_dim[i], z_dim[i])))
## Zi->Y
beta.append((2 - strength) * rng.standard_normal(size=(z_dim[i])))
beta.append((1 - strength) * rng.standard_normal(size=(z_dim[i])))
# Next, we define the CPDs
def x_cpd(n, parents):
del n
return scm.logistic_linear_bernoulli_cpd(parents, gamma)
def y_cpd(n, parents):
del n
return scm.normal_cpd(parents, beta, mean=mu_y, cov=cov_y)
cpds = {
'X': x_cpd,
'Y': y_cpd,
}
def make_v_cpd(mean, cov):
def f(n, parents):
del parents # unusued
return np.random.multivariate_normal(mean=mean, cov=cov, size=n)
return f
def make_z_cpd(alpha, mean, cov):
def f(n, parents):
del n # unused
return scm.normal_cpd(parents, [alpha], mean, cov)
return f
for i in range(1, num_back_door_paths + 1):
cpds['V' + str(i)] = make_v_cpd(mu_v[i], cov_v[i])
cpds['Z' + str(i)] = make_z_cpd(alpha[i], mu_z[i], cov_z[i])
## Z_0
support_size = 10 # the number of values Z_0 may have
z0_support = rng.normal(loc=mu_z[0], scale=1, size=(support_size, z_dim[0]))
tau = 0
tau_max = 0
# To select a tau on the larger side, we randomly sample 10 of them then
# choose the first tau that is larger. See the secretary problem.
for _ in range(10):
# Generate two random categorical distributions for Z0
p_z0x0 = softmax(.4 * rng.standard_normal(size=support_size))
p_z0x1 = softmax(.4 * rng.standard_normal(size=support_size))
tau_max = max(
tau,
p_z0x1.dot(z0_support).dot(beta[0]) -
p_z0x0.dot(z0_support).dot(beta[0]))
while tau < tau_max: # make sure tau is big enough
# Generate two random categorical distributions for Z0
p_z0x0 = softmax(.4 * rng.standard_normal(size=support_size))
p_z0x1 = softmax(.4 * rng.standard_normal(size=support_size))
tau = p_z0x1.dot(z0_support).dot(beta[0]) - p_z0x0.dot(z0_support).dot(
beta[0])
# X->Z0
def z0x0_cpd(n, parents):
del parents
idx = np.random.choice(support_size, size=n, p=p_z0x0)
return np.array([z0_support[int(i)] for i in idx])
def z0x1_cpd(n, parents):
del parents
idx = np.random.choice(support_size, size=n, p=p_z0x1)
return np.array([z0_support[int(i)] for i in idx])
def z0_cpd(n, parents):
del n
return scm.categorical_conditioning(
parents, 0, distributions=[z0x0_cpd, z0x1_cpd])
cpds['Z0'] = z0_cpd
return cpds
def get_section_5_2_cpds(
z_dim: List[int],
v_dim: List[int],
num_back_door_paths: int,
seed: Optional[int] = None,
):
"""Returns the CPDs needed by this example.
Args:
z_dim: the dimension of Z
v_dim: the dimension of V
num_back_door_paths: the number of back_door paths, i.e. M
seed: the random seed
Returns:
A dictionary of functions, each representing a conditional
distsribution, indexed by the variable name strings.
"""
rng = np.random.default_rng(seed)
# Include a zero first index corresponding to z_0 and a placeholder for V_0.
# This fixes the indices so, e.g. mu_Vi = mu_V[i], beta_i = beta[i].
mu_v = [0]
cov_v = [0] # V_0 does not exist
mu_z = [.2 * rng.standard_normal(size=z_dim[0])]
cov_z = [.5 * np.identity(z_dim[0])]
alpha = [0]
gamma = []
mu_y = 0
cov_y = .1
beta = [.2 * rng.standard_normal(size=(z_dim[0]))]
for i in range(1, num_back_door_paths + 1):
strength = np.random.uniform(.1, 1.9)
# V
mu_v.append(0 * rng.standard_normal(size=v_dim[i]))
cov_v.append(1 * np.identity(v_dim[i]))
# Z_i
mu_z.append(0 * rng.standard_normal(size=z_dim[i]))
cov_z.append(.1 * np.identity(z_dim[i]))
# Vi->X
gamma.append(strength * rng.standard_normal(size=v_dim[i]))
# Vi->Zi
alpha.append(.5 * strength * rng.standard_normal(size=(v_dim[i], z_dim[i])))
## Zi->Y
beta.append((2 - strength) * rng.standard_normal(size=(z_dim[i])))
beta.append((1 - strength) * rng.standard_normal(size=(z_dim[i])))
## Z_0
support_size = 10 # the number of values Z_0 may have
z0_support = rng.normal(loc=mu_z[0], scale=1, size=(support_size, z_dim[0]))
tau = 0
tau_max = 0
# To select a tau on the larger side, we randomly sample 10 of them then
# choose the first tau that is larger. See the secretary problem.
for _ in range(10):
# Generate two random categorical distributions for Z0
p_z0x0 = softmax(.4 * rng.standard_normal(size=support_size))
p_z0x1 = softmax(.4 * rng.standard_normal(size=support_size))
tau_max = max(
tau,
p_z0x1.dot(z0_support).dot(beta[0]) -
p_z0x0.dot(z0_support).dot(beta[0]))
while tau < tau_max: # make sure tau is big enough
# Generate two random categorical distributions for Z0
p_z0x0 = softmax(.4 * rng.standard_normal(size=support_size))
p_z0x1 = softmax(.4 * rng.standard_normal(size=support_size))
tau = p_z0x1.dot(z0_support).dot(beta[0]) - p_z0x0.dot(z0_support).dot(
beta[0])
# X->Z0
def z0x0_cpd(n, parents):
del parents
idx = np.random.choice(support_size, size=n, p=p_z0x0)
return np.array([z0_support[int(i)] for i in idx])
def z0x1_cpd(n, parents):
del parents
idx = np.random.choice(support_size, size=n, p=p_z0x1)
return np.array([z0_support[int(i)] for i in idx])
def z0_cpd(n, parents):
del n
return scm.categorical_conditioning(
parents, 0, distributions=[z0x0_cpd, z0x1_cpd])
# Specify Gaussian Process
kernel = sk.gaussian_process.kernels.RBF(
length_scale=1, length_scale_bounds=(1e-1, 1000.0))
f_ys = []
for dim in z_dim:
f_ys.append(
nonlinear_utils.generate_random_nd_function(
dim=dim,
out_dim=1,
kernel=kernel,
min_x=-2,
max_x=2,
n_points=10,
alpha=1e0))
# f_y should have shape (n_units,1)
f_y = lambda x: np.sum([f(x[i]) for i, f in enumerate(f_ys)], axis=0)
f_xs = []
for dim in v_dim[1:]:
f_xs.append(
nonlinear_utils.generate_random_nd_function(
dim=dim,
out_dim=1,
kernel=kernel,
min_x=-2,
max_x=2,
n_points=10,
alpha=1e0))
# f_x should have shape (n_units,1)
f_x = lambda x: np.sum([f(x[i]) for i, f in enumerate(f_xs)], axis=0)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def bernoulli(x):
return np.random.binomial(1, x)
add_noise = scm.add_gaussian_noise
# Next, we define the CPDs
cpds = {
'X': lambda n, parents: bernoulli(sigmoid(f_x(parents))).flatten(),
'Y': lambda n, parents: add_noise(f_y(parents), mean=mu_y, cov=cov_y),
}
cpds['Z0'] = z0_cpd
def make_v_cpd(mean, cov):
def f_v(n, parents):
del parents # unusued
return np.random.multivariate_normal(mean=mean, cov=cov, size=n)
return f_v
def make_z_cpd(f, mean, cov):
def f_z(n, parents):
del n # unused
return scm.structural_equation_with_noise(
parents, f=f, mean=mean, cov=cov)
return f_z
for i in range(1, num_back_door_paths + 1):
f_z = nonlinear_utils.generate_random_nd_function(
dim=v_dim[i],
out_dim=z_dim[i],
kernel=kernel,
min_x=-1,
max_x=1,
n_points=10,
alpha=1e0)
cpds['V' + str(i)] = make_v_cpd(mean=mu_v[i], cov=cov_v[i])
cpds['Z' + str(i)] = make_z_cpd(f=f_z, mean=mu_z[i], cov=cov_z[i])
return cpds
def get_section_5_1_example(
z_dim: List[int],
v_dim: List[int],
seed: int,
z_cost: float,
v_cost: float,
z0_cost: float,
num_back_door_paths: int,
sample_limit: int,
units_per_round: int,
latex_names=False,
) -> Tuple[scm.SCM, List[arms.VarianceEstimatorArm], bandits.LUCBAlgorithm,
bandits.SuccessiveEliminationAlgorithm, bandits.UniformAlgorithm,]:
"""Returns a random instance of an SCM from Section 5.1.
Args:
z_dim: the dimension of Z
v_dim: the dimension of V
seed: the random seed
z_cost: the cost of observing a Z_i
v_cost: the cost of observing a V_i
z0_cost: the cost of observing Z_0, the frontdoor variable.
num_back_door_paths: the number of back_door paths, i.e. M-1.
sample_limit: the total number of samples before termination
units_per_round: the number of units samples every round for each algorithm.
latex_names: whether we should include latex-friendly names for the arms.
Returns:
a scm.SCM data generator as described in Section 5.1.
arm_list: a list of VarianceEstimatorArms
lucb_bandit: a LUCBAlgorithm over the arms
se_bandit: a SuccessiveEliminationAlgorithm over the arms
uniform_bandit: a UniformAlgorithm over the arms
"""
cpd_dict = get_section_5_1_cpds(z_dim, v_dim, num_back_door_paths, seed)
return generate_example_from_cpd(
z_dim,
v_dim,
z_cost,
v_cost,
z0_cost,
num_back_door_paths,
cpd_dict,
sample_limit,
units_per_round,
latex_names,
arms.CIAlgorithm.FINITE_SAMPLE,
)
def get_section_5_2_example(
z_dim: List[int],
v_dim: List[int],
seed: int,
z_cost: float,
v_cost: float,
z0_cost: float,
num_back_door_paths: int,
sample_limit: int,
units_per_round: int,
latex_names=False,
) -> Tuple[scm.SCM, List[arms.VarianceEstimatorArm], bandits.LUCBAlgorithm,
bandits.SuccessiveEliminationAlgorithm, bandits.UniformAlgorithm,]:
"""Returns a random instance of an SCM from Section 5.2.
Args:
z_dim: the dimension of Z
v_dim: the dimension of V
seed: the random seed
z_cost: the cost of observing a Z_i
v_cost: the cost of observing a V_i
z0_cost: the cost of observing Z_0, the frontdoor variable.
num_back_door_paths: the number of back_door paths, i.e. M-1.
sample_limit: the total number of samples before termination
units_per_round: the number of units samples every round for each algorithm.
latex_names: whether we should include latex-friendly names for the arms.
Returns:
a scm.SCM data generator as described in Section 5.2.
arm_list: a list of VarianceEstimatorArms
lucb_bandit: a LUCBAlgorithm over the arms
se_bandit: a SuccessiveEliminationAlgorithm over the arms
uniform_bandit: a UniformAlgorithm over the arms
"""
cpd_dict = get_section_5_2_cpds(z_dim, v_dim, num_back_door_paths, seed)
return generate_example_from_cpd(
z_dim,
v_dim,
z_cost,
v_cost,
z0_cost,
num_back_door_paths,
cpd_dict,
sample_limit,
units_per_round,
latex_names,
arms.CIAlgorithm.CLT,
)
def generate_example_from_cpd(
z_dim: List[int],
v_dim: List[int],
z_cost: float,
v_cost: float,
z0_cost: float,
num_back_door_paths: int,
cpd_dict,
sample_limit: int,
units_per_round: int,
latex_names: bool,
ci_algorithm: arms.CIAlgorithm,
) -> Tuple[scm.SCM, List[arms.VarianceEstimatorArm], bandits.LUCBAlgorithm,
bandits.SuccessiveEliminationAlgorithm, bandits.UniformAlgorithm]:
"""Returns a random instance of an SCM from Section 5.1.
Args:
z_dim: the dimension of Z
v_dim: the dimension of V
z_cost: the cost of observing a z_i
v_cost: the cost of observing a V_i
z0_cost: the cost of observing z_0, the frontdoor variable.
num_back_door_paths: the number of back_door paths, i.e. M-1.
cpd_dict: a dictionary of cpd functions.
sample_limit: the total number of samples before termination
units_per_round: the number of units samples every round for each algorithm.
latex_names: whether we should include latex-friendly names for the arms.
ci_algorithm: the algorithm used for the confidence intervals
Returns:
a scm.SCM data generator as described in Section 5.1.
arm_list: a list of VarianceEstimatorArms
lucb_bandit: a LUCBAlgorithm over the arms
se_bandit: a SuccessiveEliminationAlgorithm over the arms
uniform_bandit: a UniformAlgorithm over the arms
"""
var_names = []
parents = {
'Z0': ['X'],
'X': [],
'Y': ['Z0'],
}
cov_variables = ['Z0']
treatment_variable = 'X'
response_variable = 'Y'
for i in range(1, num_back_door_paths + 1):
var_names.append('V' + str(i))
var_names.append('Z' + str(i))
cov_variables.append('V' + str(i))
cov_variables.append('Z' + str(i))
parents['V' + str(i)] = []
parents['Z' + str(i)] = ['V' + str(i)]
parents['Y'].append('Z' + str(i))
parents['X'].append('V' + str(i))
var_names.extend(['X', 'Z0', 'Y'])
scm_gen = scm.SCM(
'Section 5 SCM',
var_names,
parents,
cpd_dict,
cov_variables,
treatment_variable,
response_variable,
)
# creat lists of indices: Z_idx[i], V_idx[i] are the indices occupied by Z_i,
# V_i, respectively.
total_z_dim = sum(z_dim)
z_idx = [list(range(z_dim[0]))]
v_idx = [[]]
cum_z_dim = np.cumsum(z_dim)
cum_v_dim = np.cumsum(v_dim)
for i in range(num_back_door_paths):
z_idx.append(list(range(cum_z_dim[i], cum_z_dim[i + 1])))
v_idx.append(
list(range(total_z_dim + cum_v_dim[i], total_z_dim + cum_v_dim[i + 1])))
# the mask for Z0
select_z0_fn = data.get_coordinate_mask_fn(z_idx[0])
def generate_selection_masks(n, z_cost, v_cost):
if n == 1:
return [z_idx[n], v_idx[n]], ['Z_1', 'V_1'], [z_cost, v_cost]
masks, names, costs = generate_selection_masks(n - 1, z_cost, v_cost)
masks_with_z = [np.r_[m, z_idx[n]] for m in masks]
names_with_z = [name + 'Z_' + str(n) for name in names]
costs_with_z = [z_cost + c for c in costs]
masks_with_v = [np.r_[m, v_idx[n]] for m in masks]
names_with_v = [name + 'V_' + str(n) for name in names]
costs_with_v = [v_cost + c for c in costs]
return (
masks_with_z + masks_with_v,
names_with_z + names_with_v,
costs_with_z + costs_with_v,
)
(masks, names, costs) = generate_selection_masks(num_back_door_paths, z_cost,
v_cost)
if latex_names:
names = ['$' + n + '$' for n in names]
selection_fns = [data.get_coordinate_mask_fn(mask) for mask in masks]
# Calculate the dimension of the covariates
dims = [len(mask) for mask in masks]
# At this point, for arm i, we have
# - fns[i] converts data generated by scm_gen (which includes all covariates)
# to data that is needed by the ith estimator (which only includes the
# necessary covariates).
# - costs[i]: the observational cost
# - name[i]: the name
# - dims[i]: the dimension of the covariates
d = scm_gen.generate(1000)
sub_gscale = np.var(d.exp) + np.var(d.cov) + np.var(d.rsp)
# Next, we define the arms, beginning with the frontdoor arm
arm_list = [
arms.SampleSplittingArm(
'frontdoor',
eta=parameters.FrontDoorLinearFiniteZ(
n_features=z_dim[0],
min_units=5,
min_overlap=.25,
),
data_transformer=select_z0_fn,
ci_algorithm=ci_algorithm,
cost=z0_cost,
sub_gscale=sub_gscale,
tau_bound=2,
burn_in=1000,
rho=1,
)
]
# And including all the back_door arms (skipping the first element that
# corresponded to Z0.
for fn, name, cost, dim in zip(selection_fns, names, costs, dims):
arm_list.append(
arms.SampleSplittingArm(
name,
eta=parameters.AIPWLinearLogistic(
n_features=dim, min_units=5, min_overlap=.25),
data_transformer=fn,
ci_algorithm=ci_algorithm,
cost=cost,
sub_gscale=sub_gscale,
tau_bound=2,
burn_in=1000,
rho=1,
))
# And the three bandit algorithms are defined below.
lucb_bandit = bandits.LUCBAlgorithm(
arm_list,
prob_model=scm_gen,
error_prob=.05,
sample_limit=sample_limit,
units_per_round=units_per_round,
)
se_bandit = bandits.SuccessiveEliminationAlgorithm(
arm_list,
prob_model=scm_gen,
error_prob=.05,
sample_limit=sample_limit,
units_per_round=units_per_round,
)
uniform_bandit = bandits.UniformAlgorithm(
arm_list,
prob_model=scm_gen,
error_prob=.05,
sample_limit=sample_limit,
units_per_round=units_per_round,
)
return (
scm_gen,
arm_list,
lucb_bandit,
se_bandit,
uniform_bandit,
)
| abcei_mab-main | causal_effect_bandits/paper_utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is needed to execute the simulations of
# Malek, Chiappa. "Asymptotically Best Causal Effect
# Identification with Multi-Armed Bandits." NeurIPS, 2021.
# https://proceedings.neurips.cc/paper/2021/...
# hash/b8102d1fa5df93e62cf26cd4400a0727-Abstract.html
"""Classes that estimate the asymptotic variance."""
import enum
from typing import Callable, Tuple
from causal_effect_bandits import data
from causal_effect_bandits import parameters
import numpy as np
import scipy
class VarianceEstimatorArm:
"""Calculate a confidence interval on the variance under nuisance.
It uses a NuisanceParameter class, with its influence function, and
builds a variance estimator on top.
Data is persisted, so only incremental data updates should be
provided. There are three technical pieces required:
1. A model to fit the nuisance parameter
2. Data processing that removes the unnecessary covarites from the data
2. A meta algorithm that uses the influence function and nuisance estimator
and creates an estimate of the variance
Specific meta-algorithms will be implemented by sub-classes.
Attributes:
eta: parameters.NuisanceParameter The nuisance parameter
_data_transformer: function processes an ExpData object into the format
expected by
the nuissanceParameter object: it must map nd.array of shape (n_units,
n_features) to nd.array of shape (n_units, n_features_eta)
_data: accumulated data (data.ExpData)
ci: (float, float) The most recently calculated confidence interval
delta: The delta used in calculating _ci.
var_est: The most recent variance estimate.
ate: a float of the average treatment effect estimate.
name: str the name of the formula
num_pulled: an int for the number of times the arm has been updated
cost: the cost of sampling from this estimator
Methods:
update(new_data: data.ExpData): appends the data and recomputes the ci
get_ci: returns the confidence interval
"""
def __init__(self,
name: str,
eta: parameters.NuisanceParameter,
data_transformer: Callable[[data.ExpData], data.ExpData],
cost: float = 1):
self._name = name
self._ci = (-np.inf, np.inf)
self._eta = eta
self._data_transformer = data_transformer
self._ate = None
self._var_est = None
self._num_pulled = 0
self._split_data = None
self.cost = cost
def update(self, new_data: data.ExpData, delta: float) -> Tuple[float, float]:
"""Appends new_data to the dataset and recomputes the variance ci.
Args:
new_data: data.ExpData to append
delta: the error tolerance
Returns:
A confidence interval that holds with probability 1-delta.
"""
del new_data, delta # for subclassing
self._ci = (0, np.inf) # default ci.
self.num_pulled += 1
return self._ci
def reset(self) -> None:
"""Resets the arm to its default state."""
self._ci = (None, None)
self._eta.reset()
self._ate = None
self._var_est = None
self._num_pulled = 0
self._split_data = None
def get_num_units(self) -> int:
"""Returns the number of units seen by this arm."""
return 0
@property
def name(self) -> str:
return self._name
@property
def ci(self) -> Tuple[float, float]:
"""Returns the most recent confidence interval."""
return self._ci
@property
def var_est(self) -> float:
return self._var_est
@property
def num_pulled(self) -> int:
return self._num_pulled
@property
def ate(self) -> float:
return self._ate
@property
def eta(self) -> parameters.NuisanceParameter:
return self._eta
class CIAlgorithm(enum.Enum):
CHI2 = "chi2"
CLT = "CLT"
FINITE_SAMPLE = "finite_sample"
class SampleSplittingArm(VarianceEstimatorArm):
"""Implements the sample-splitting variance estimator in the paper.
This class assumes a uncentered influence function.
Data is split into two folds
- Fold 1 is used to fit eta
- Fold 2 is used to estimate the variance.
Further, the calculation of the confidence interval has several options.
Attributes:
stable_splits: bool incrementally add data to either split vs. resplitting
all the data every update.
sub_gscale: float the subGaussian parameter for phi(W, eta)
eta_sub_gscale: float the subGaussian parameter for eta
tau_bound: flout A bound on the maximum tau.
burn_in: int When to start the confidence sequence. Equal to m in the paper.
eta: a parameters.NuisanceParameter class
delta: the high probability tolerance
n_splits: the number of splits, ethier 2 or 3. A 3-way split will use a
separate fold of data to estimate ATE.
rho: hyperparameter for the Gaussian mixture cofidence sequence.
"""
def __init__(
self,
name: str,
eta: parameters.NuisanceParameter,
data_transformer: Callable[[data.ExpData], data.ExpData],
ci_algorithm: CIAlgorithm = CIAlgorithm.FINITE_SAMPLE,
cost: float = 1,
stable_splits: bool = True,
sub_gscale: float = 1,
eta_sub_gscale: float = 1,
tau_bound: float = 1,
burn_in: float = 0,
rho: float = .1,
):
super().__init__(name, eta, data_transformer, cost)
self._ci_algorithm = ci_algorithm
self._stable_splits = stable_splits
self._sub_gscale = sub_gscale
self._eta_sub_gscale = eta_sub_gscale
self._tau_bound = tau_bound
self._burn_in = burn_in
self._rho = rho
def update(
self,
new_data: data.ExpData,
delta: float,
) -> Tuple[float, float]:
"""Appends new_data to the dataset and recomputes the variance ci.
Args:
new_data: data.ExpData to append
delta: the error tolerance
Returns:
A confidence interval that holds with probability 1-delta.
"""
if not 0 < delta < 1:
raise ValueError(f"delta={delta}, but expected to be in (0,1)")
self._num_pulled += 1
self.delta = delta
if self._split_data is None:
self._split_data = self._data_transformer(new_data).k_fold(2)
else:
if self._stable_splits:
new_split = self._data_transformer(new_data).k_fold(2)
for i in range(2):
self._split_data[i].append(new_split[i])
else:
self._split_data = self._data_transformer(new_data).k_fold(2)
# Use the first split to train eta
self._eta.fit(self._split_data[0])
# We need to check that all folds and all control, treatment groups
# Have at least one unit. Otherwise, return [0, np.inf) for the confidence.
if not self._eta.has_enough_samples():
self._ci = (0, np.inf)
self._ate = np.nan
self._var_est = np.inf
return self._ci
# Use the second split to learn the ATE
scores = self._eta.calculate_score(self._split_data[1])
self._ate = np.mean(scores)
# Use the second or third split to calculate the variance
self._var_est = np.mean(
(self._eta.calculate_score(self._split_data[1]) - self._ate)**2)
if np.isnan(self._var_est):
raise ValueError("var_est is a nan!")
if self._ci_algorithm is CIAlgorithm.CHI2:
n = self._split_data[1].n_units
ci_low = self.var_est * (n - 1) / scipy.stats.chi2.ppf(
1 - delta / 2, df=n - 1)
ci_high = self.var_est * (n - 1) / scipy.stats.chi2.ppf(
delta / 2, df=n - 1)
self._ci = (ci_low, ci_high)
elif self._ci_algorithm is CIAlgorithm.CLT:
clt_delta = delta / self._num_pulled**2
n = self._split_data[1].n_units
scores = self._eta.calculate_score(self._split_data[1])
var_of_var = np.var((scores - self._ate)**2)
ci_low = max(
self.var_est +
scipy.stats.norm.ppf(clt_delta / 2) * np.sqrt(var_of_var / n), 0)
ci_high = self.var_est + scipy.stats.norm.ppf(1 - clt_delta /
2) * np.sqrt(var_of_var / n)
self._ci = (ci_low, ci_high)
elif self._ci_algorithm is CIAlgorithm.FINITE_SAMPLE:
if self._tau_bound <= 0 or self._burn_in <= 1 or self._sub_gscale <= 0:
raise ValueError("tau_bound, burn_in, and subgScale must be positive")
eta_cs_width = self._eta.calculate_cs_width(
delta / 2,
sub_g=self._eta_sub_gscale,
rho=self._rho,
)
n = len(self._split_data[1])
if self._burn_in > n:
self._ci = (0, np.inf)
return self._ci
# See the paper, section 3.5 for a justification of this formula
lambdap = max(self._sub_gscale, 8 * self._sub_gscale**2)
width = 5 / 8 * (np.sqrt(2 * self._sub_gscale) + self._tau_bound)
width *= np.sqrt(2 * np.log(lambdap * n / self._burn_in) / n + .5 +
np.log(4 / delta)) / np.sqrt(n)
width += eta_cs_width**2
self._ci = (max(self.var_est - width, 0), self.var_est + width)
else:
raise ValueError(f"{self._ci_algorithm} is an unknown CI algorithm name.")
return self._ci
def get_num_units(self) -> int:
"""Returns the number of units seen by this arm."""
if self._split_data is None:
return 0
return sum(len(data) for data in self._split_data)
def reset(self) -> None:
super().reset()
self._split_data = None
class BootstrapArm(VarianceEstimatorArm):
"""Uses a bootstrap to estimate the variance.
The bootstrap only has asymptotic guarantees but
it could yield good empirical performance.
"""
def __init__(
self,
name: str,
eta: parameters.NuisanceParameter,
data_transformer: Callable[[data.ExpData], data.ExpData],
n_bootstraps: int = 0,
stable_splits: bool = True,
):
super().__init__(name, eta, data_transformer)
self.n_bootstraps = n_bootstraps
self.n_splits = 2
self._stable_splits = stable_splits
def update(
self,
new_data: data.ExpData,
delta: float,
) -> Tuple[float, float]:
"""Appends new_data to the dataset and recomputes the variance ci.
Args:
new_data: data.ExpData to append
delta: the error tolerance
Returns:
A confidence interval that holds with probability 1-delta.
"""
if not 0 < delta < 1:
raise ValueError(f"Given delta={delta}, but 0<delta<1 is required.")
self._num_pulled += 1
self.delta = delta
if self._split_data is None:
self._split_data = self._data_transformer(new_data).k_fold(self.n_splits)
else:
if self._stable_splits:
new_split = self._data_transformer(new_data).k_fold(self.n_splits)
for i in range(self.n_splits):
self._split_data[i].append(new_split[i])
else:
self._split_data = self._data_transformer(new_data).k_fold(
self.n_splits)
# Use the first split to train eta
self._eta.fit(self._split_data[0])
if self.n_bootstraps == 0:
n_bootstraps = int(np.ceil(4 / delta))
else:
n_bootstraps = self.n_bootstraps
# Compute tau
b_samples = []
for _ in range(n_bootstraps):
# Compute the data with the bth fold missing
(in_data, out_data) = self._split_data[0].subsample(1.0 / n_bootstraps)
# Calculate tau on this fold
self._eta.fit(in_data)
b_samples.append(np.var(self._eta.calculate_score(out_data)))
# Next, we need to calculate the percentiles of the bootstrap samples
b_samples = np.array(b_samples)
b_samples.sort()
ci_low = b_samples[int(np.floor(delta / 2 * n_bootstraps))]
ci_high = b_samples[int(np.ceil((1 - delta / 2) * n_bootstraps)) - 1]
self._ci = (ci_low, ci_high)
# Use the second split to learn the ATE and variance
self._ate = np.mean(self._eta.calculate_score(self._split_data[1]))
self._var_est = np.var(self._eta.calculate_score(self._split_data[1]))
return self._ci
| abcei_mab-main | causal_effect_bandits/arms.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is needed to execute the simulations of
# Malek, Chiappa. "Asymptotically Best Causal Effect
# Identification with Multi-Armed Bandits." NeurIPS, 2021.
# https://proceedings.neurips.cc/paper/2021/...
# hash/b8102d1fa5df93e62cf26cd4400a0727-Abstract.html
"""init file for causal_effect_bandits package."""
from causal_effect_bandits import arms
from causal_effect_bandits import bandits
from causal_effect_bandits import data
from causal_effect_bandits import nonlinear_utils
from causal_effect_bandits import paper_utils
from causal_effect_bandits import parameters
from causal_effect_bandits import plotting_utils
from causal_effect_bandits import scm
| abcei_mab-main | causal_effect_bandits/__init__.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is needed to execute the simulations of
# Malek, Chiappa. "Asymptotically Best Causal Effect
# Identification with Multi-Armed Bandits." NeurIPS, 2021.
# https://proceedings.neurips.cc/paper/2021/...
# hash/b8102d1fa5df93e62cf26cd4400a0727-Abstract.html
"""A unit test for the back_door formula."""
from absl.testing import absltest
from causal_effect_bandits import data
from causal_effect_bandits import example_scm
import numpy as np
class ExpDataTest(absltest.TestCase):
def test_k_fold(self):
z = np.arange(8).reshape(4, 2)
x = np.arange(4) - 2
y = np.arange(4) * 2
new_data = data.ExpData(z, x, y)
folds = new_data.k_fold(1)
assert len(folds) == 1
np.testing.assert_array_equal(folds[0].cov, new_data.cov)
np.testing.assert_array_equal(folds[0].exp, new_data.exp)
np.testing.assert_array_equal(folds[0].rsp, new_data.rsp)
np.random.seed(0)
folds = new_data.k_fold(2)
assert len(folds) == 2
np.testing.assert_array_equal(folds[0].cov, [[0, 1], [6, 7]])
np.testing.assert_array_equal(folds[1].cov, [[2, 3], [4, 5]])
np.testing.assert_array_equal(folds[0].exp, [-2, 1])
np.testing.assert_array_equal(folds[1].exp, [-1, 0])
np.testing.assert_array_equal(folds[0].rsp, [0, 6])
np.testing.assert_array_equal(folds[1].rsp, [2, 4])
class TestTabularCPD(absltest.TestCase):
def test_fit(self):
table = data.TabularCPD(10)
x = np.array([0, 1, 2] * 3)
z = np.array([3] * 3 + [4] * 3 + [5] * 3)
table.fit(x, z)
np.testing.assert_array_equal(table.x_support(), [0, 1, 2])
np.testing.assert_array_equal(table.z_support(), [3, 4, 5])
for xi in [0, 1, 2]:
for zi in [3, 4, 5]:
assert table.predict([xi], [zi]) == 1 / 3.0
class SCMTest(absltest.TestCase):
def test_scm_frontdoor_statistics(self):
"""Tests scm generating by checking statistics."""
(scm_gen, params) = example_scm.frontdoor_scm()
n = 100000
new_data = scm_gen.generate(n)
tol = 4 * max(new_data.rsp)**2 / np.sqrt(n)
## Test the statistics of the distribution
# check means
np.testing.assert_allclose(np.mean(new_data.exp), params["mu_x"], atol=tol)
np.testing.assert_allclose(np.mean(new_data.rsp), params["mu_y"], atol=tol)
np.testing.assert_allclose(
np.mean(new_data.cov[:, 1]), params["mu_z"], atol=tol)
np.testing.assert_allclose(
np.mean(new_data.cov[:, 0]), params["mu_u"], atol=tol)
# check variances
np.testing.assert_allclose(
np.var(new_data.cov[:, 0]), params["var_u"], atol=tol * params["var_u"])
np.testing.assert_allclose(
np.var(new_data.cov[:, 1]), params["var_z"], atol=tol * params["var_z"])
np.testing.assert_allclose(
np.var(new_data.exp), params["var_x"], atol=tol * params["var_x"])
np.testing.assert_allclose(
np.var(new_data.rsp), params["var_y"], atol=4 * tol * params["var_y"])
def test_scm_back_door_statistics(self):
(scm_gen, params) = example_scm.back_door_scm()
# Test the statistics of the distribution
n = 1000000
new_data = scm_gen.generate(n)
tol = np.mean(new_data.rsp**2) / np.sqrt(n)
assert params["tau"] == 10
# check means
np.testing.assert_allclose(np.mean(new_data.exp), params["mu_x"], tol)
np.testing.assert_allclose(np.mean(new_data.rsp), params["mu_y"], tol)
np.testing.assert_allclose(np.mean(new_data.cov), params["mu_z"], tol)
# check variances
np.testing.assert_allclose(
np.var(new_data.exp), params["var_x"], atol=tol * params["var_x"])
np.testing.assert_allclose(
np.var(new_data.cov), params["var_z"], atol=tol * params["var_z"])
np.testing.assert_allclose(
np.var(new_data.rsp), params["var_y"], atol=2 * tol * params["var_y"])
if __name__ == "__main__":
absltest.main()
| abcei_mab-main | causal_effect_bandits/data_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is needed to execute the simulations of
# Malek, Chiappa. "Asymptotically Best Causal Effect
# Identification with Multi-Armed Bandits." NeurIPS, 2021.
# https://proceedings.neurips.cc/paper/2021/...
# hash/b8102d1fa5df93e62cf26cd4400a0727-Abstract.html
"""Implementations of bandit algorithms over Arms."""
import dataclasses
from typing import List, Mapping, Optional
from causal_effect_bandits import arms
from causal_effect_bandits import data
import numpy as np
@dataclasses.dataclass
class BanditData:
"""Stores the results of running a bandit algorithm.
Attributes
lcb_by_arm: dict(arm object, np.array)
A list of the lcbs returned by of the arm for every round
ucb_by_arm: dict(arm object, np.array)
A list of the ucbs returned by of the arm for every round
var_est_by_arm: dict(arm object, np.array)
A list of the variance estimates returned by of the arm for every round.
Is populated even when the arm is not updated.
best_arm: List[arm]
A list of arms with the lowest ucb of all the arms with the lowest
estimate of the variance. Could be more than one.
samples_by_arm: Mapping(arm object, np.array)
a dict of the samples allocated to each arm during each period
cum_samples: np.array
a list of the cumulative number of samples used for all arms.
"""
lcb_by_arm: Mapping[arms.VarianceEstimatorArm, List[float]]
ucb_by_arm: Mapping[arms.VarianceEstimatorArm, List[float]]
var_est_by_arm: Mapping[arms.VarianceEstimatorArm, List[float]]
samples_by_arm: Mapping[arms.VarianceEstimatorArm, List[float]]
cum_samples: List[float]
best_arm: List[arms.VarianceEstimatorArm]
class BAIBanditAlgorithm:
"""A class for best arm identification in bandit problems.
Parameters:
_arms: a list of arms.VarianceEstimatorArms
The arms that will be pulled by this algorithm
_num_arms: int
The number of arms.
_prob_model: data.DataGenerator
a way to generate i.i.d. (X, T, Y) data
error_prob: float in (0,1)
the desired probability of correctness (usually denoted delta)
error_tol: float > 0
the allowed suboptimality of the returned arm (usually denoted epsilon)
confidence: float in (0,1)
the desired probability of returning the best arm.
Set to None if we want a fixed budget problem.
sample_limit: int > 0
the maximum number of rounds. Set to None if you want at fixed
confidence problem.
units_per_round: int
The number of units sampled every banit round.
Methods:
run:
runs the bandit until the completion criteria are met
collect_bounds:
Assembles the ci and var_est of the arms into three np.arrays.
_check_fixed_conf_end_condition():
checks if the end condition condition of a fixed confidence bandit are met
_select_arm_with_mask(mask)
given a List[boolean] with num_arms entries, this method returns a sublist
of the arms where the corresponding entry in mask is True.
"""
def __init__(
self,
arm_list: List[arms.VarianceEstimatorArm],
prob_model: data.DataGenerator,
error_prob: float = .05,
error_tol: float = 0,
confidence: float = .05,
sample_limit=np.inf,
units_per_round: int = 10,
):
self._arms = arm_list
self._num_arms = len(self._arms)
self._prob_model = prob_model
if not 0.0 < error_prob < 1.0:
raise ValueError(
f"error_prob={error_prob}; must be in the unit interval.")
self.error_prob = error_prob
if error_tol < 0.0:
raise ValueError(f"error_tol={error_tol}; must be nonnegative.")
self.error_tol = error_tol
if confidence is None and np.isinf(sample_limit):
raise ValueError(
"Either the confidence or the number of rounds must be specified")
if confidence is not None:
if not 0 < confidence < 1:
raise ValueError(
f"confidence={confidence}; must be in the unit interval")
self.confidence = confidence
self.sample_limit = sample_limit
self.units_per_round = units_per_round
def reset(self):
"""Resets the bandit to a just-instantiated state."""
for a in self._arms:
a.reset()
def _check_fixed_conf_end_condition(self):
"""Contains the logic to determine if the bandit algorithm is done.
Uses the fixed confidence setting.
The game is done if the ucb of the best arm (the lowest ucb of all the arms
that have the minimum variance estimate) is lower than lcb of the remaining
arms (all arms that do not share the minimum variance estimate).
Returns:
boolean: is the best arm (epsilon, delta)-PAC?
best_arm: the best arm
"""
if self.error_prob is None: # we are a fixed budget bandit
return (False, [])
lcbs, ucbs, var_ests = self.collect_bounds()
if any(np.isinf(ucbs)): # one of the ucbs is infinity
return (False, [])
min_var = min(var_ests)
if np.isnan(min_var): # one of the arms returned a nan
return (False, [])
# find the arm with the smallest variance.
min_var_arm = np.argmin(var_ests)
# check to see if the lcb of the remaining arms is higher than the ucb
# of min_var_arm.
remaining_lcbs = [
lcbs[i] for i in range(len(self._arms)) if i != min_var_arm
]
return (ucbs[min_var_arm] < min(remaining_lcbs) - self.error_tol,
[self._arms[min_var_arm]])
def _select_arms_with_mask(self, boolean_mask):
"""Returns the arms where boolean_mask is true.
Args:
boolean_mask: a list of booleans
Returns:
the arms corresponding to the true positions in the mask.
"""
assert len(boolean_mask) == self._num_arms
return [a for (a, m) in zip(self._arms, boolean_mask) if m]
def collect_bounds(self):
"""Collects the upper and lower confidence intervals and variance estimates.
Returns:
Lists of the lcbs, ucbs, and point estimates of the asymptotic variances.
"""
ucbs = np.array([a.cost * a.ci[1] for a in self._arms])
lcbs = np.array([a.cost * a.ci[0] for a in self._arms])
sigma2s = np.array([a.cost * a.var_est for a in self._arms])
return (lcbs, ucbs, sigma2s)
class UniformAlgorithm(BAIBanditAlgorithm):
"""Implements the trivial algorithm that just pulls all arms equally."""
def run(self, all_data: Optional[data.ExpData] = None) -> BanditData:
"""Runs the bandit algorithm until termination.
Terminations by happen if either the confidence is reached or
sample_limit is reached.
If all_data is passed, then we assume we should sample in stable order:
every arm will sequentially read data from the beginning of data
independently of the other arms or sample selection strategy.
This ensures that the confidence sequences for an arm are identical across
different algorithms, making a comparison easier.
Args:
all_data: optional data.ExpData specifying all the samples the algorithm
could use. The algorithm could terminate before consuming all the data.
If all_data is not passed, then data is generated from self.prob_model.
Returns:
A BanditData dataclass.
"""
stable_order = all_data is not None
# Allocate variables for tracking the history
lcb_by_arm = {a: [] for a in self._arms}
ucb_by_arm = {a: [] for a in self._arms}
var_est_by_arm = {a: [] for a in self._arms}
cum_samples = []
samples_by_arm = {a: [] for a in self._arms}
total_samples = 0
while True:
if stable_order:
# All remaining arms have been pulled the same number of times.
n_start = self._arms[0].get_num_units()
new_samples = all_data[n_start:n_start + self.units_per_round]
else:
new_samples = self._prob_model.generate(self.units_per_round)
# We need to calculate the necessary delta:
for arm in self._arms:
arm.update(new_samples, self.error_prob / len(self._arms))
lcb_by_arm[arm].append(arm.cost * arm.ci[0])
ucb_by_arm[arm].append(arm.cost * arm.ci[1])
var_est_by_arm[arm].append(arm.cost * arm.var_est)
samples_by_arm[arm].append(self.units_per_round)
total_samples += self._num_arms * self.units_per_round
cum_samples.append(total_samples)
if total_samples > self.sample_limit:
_, _, var_ests = self.collect_bounds()
# Select the arms with the lowest var estimates.
best_arms = self._select_arms_with_mask(
[v == min(var_ests) for v in var_ests])
break
# Check end condition
if self.error_prob is not None: # This is a fixed confidence bandit
(game_ends, best_arms) = self._check_fixed_conf_end_condition()
if game_ends:
break
return BanditData(lcb_by_arm, ucb_by_arm, var_est_by_arm, samples_by_arm,
cum_samples, best_arms)
class LUCBAlgorithm(BAIBanditAlgorithm):
"""Implements the LUCB method."""
def __init__(
self,
arm_list: List[arms.VarianceEstimatorArm],
prob_model: data.DataGenerator,
error_prob: float = .05,
error_tol: float = 0,
confidence=None,
sample_limit=None,
units_per_round=200,
):
super().__init__(arm_list, prob_model, error_prob, error_tol, confidence,
sample_limit, units_per_round)
def run(self, all_data: Optional[data.ExpData] = None) -> BanditData:
"""Runs the bandit algorithm until termination.
Terminations by happen if either the confidence is reached or
sample_limit is reached.
If all_data is passed, then we assume we should sample in stable order:
every arm will sequentially read data from the beginning of data
independently of the other arms or sample selection strategy.
This ensures that the confidence sequences for an arm are identical across
different algorithms, making a comparison easier.
Args:
all_data: optional data.ExpData specifying all the samples the algorithm
could use. The algorithm could terminate before consuming all the data.
If all_data is not passed, then data is generated from self.prob_model.
Returns:
lcb_by_arm: dict(arm object, np.array)
A list of the lcbs returned by of the arm for every round
ucb_by_arm: dict(arm object, np.array)
A list of the ucbs returned by of the arm for every round
var_est_by_arm: dict(arm object, np.array)
A list of the variance estimates returned by of the arm for every round
best_arm: List[arm]
A list of arms with the lowest ucb of all the arms with the lowest
estimate of the variance. Could be more than one.
samples_by_arm: Mapping(arm object, np.array)
a dict of the samples allocated to each arm during each period
cum_samples: np.array
a list of the cumulative number of samples used
"""
stable_order = all_data is not None
if stable_order:
new_samples = all_data[:self.units_per_round]
else:
new_samples = self._prob_model.generate(self.units_per_round)
# Allocate variables for tracking the history
lcb_by_arm = {a: [] for a in self._arms}
ucb_by_arm = {a: [] for a in self._arms}
var_est_by_arm = {a: [] for a in self._arms}
cum_samples = []
samples_by_arm = {a: [] for a in self._arms}
# Update all arms once
total_samples = self._num_arms * self.units_per_round
for arm in self._arms:
arm.update(new_samples, self.error_prob / len(self._arms))
lcb_by_arm[arm].append(arm.cost * arm.ci[0])
ucb_by_arm[arm].append(arm.cost * arm.ci[1])
var_est_by_arm[arm].append(arm.cost * arm.var_est)
samples_by_arm[arm].append(self.units_per_round)
cum_samples.append(total_samples)
while True:
if total_samples > self.sample_limit:
_, _, var_ests = self.collect_bounds()
# Select the arms with the lowest var estimates.
best_arm = self._select_arms_with_mask(
[v == min(var_ests) for v in var_ests])
break
arms_to_update = self.select_arms()
for arm in arms_to_update:
if not stable_order:
new_samples = self._prob_model.generate(self.units_per_round)
else:
n_start = arm.get_num_units()
new_samples = all_data[n_start:n_start + self.units_per_round]
arm.update(new_samples, self.error_prob / len(self._arms))
# Record the arm outputs
for arm in self._arms:
lcb_by_arm[arm].append(arm.cost * arm.ci[0])
ucb_by_arm[arm].append(arm.cost * arm.ci[1])
var_est_by_arm[arm].append(arm.cost * arm.var_est)
samples_by_arm[arm].append(self.units_per_round if arm in
arms_to_update else 0)
total_samples += 2 * self.units_per_round
cum_samples.append(total_samples)
# Check end condition
if self.error_prob is not None: # This is a fixed confidence bandit
(game_ends, best_arm) = self._check_fixed_conf_end_condition()
if game_ends:
break
return BanditData(lcb_by_arm, ucb_by_arm, var_est_by_arm, samples_by_arm,
cum_samples, best_arm)
def select_arms(self) -> List[arms.VarianceEstimatorArm]:
"""Picks the arms to sample next.
For LUCB, we choose between the arm with the lowest upper confidence bound,
or the arm among the remaining arms with the highest lower confidence bound.
Returns:
A list of arms selected by the algorithm.
"""
lcbs, ucbs, var_ests = self.collect_bounds()
if any([v is None for v in var_ests]):
return np.random.choice(self._arms, 2, replace=False) # choose at random
lowest_var = min(var_ests)
if np.isnan(lowest_var):
print(
"Warning: some of the confidence intervals are nan; choosing the next arms uniformly at random"
)
return np.random.choice(self._arms, size=2, replace=False)
best_arm_mask = [v == lowest_var for v in var_ests]
# Select the arm in this set with the highest ucb
high_ucb = max(ucbs[best_arm_mask])
high_ucb_mask = [u == high_ucb for u in ucbs]
# Also select the arm with the lowest lcb of the remaining arms
low_lcb = min(lcbs[[not i for i in best_arm_mask]])
low_lcb_mask = [l == low_lcb for l in lcbs]
# We will pick between these two arms
u_index = np.random.choice(np.arange(self._num_arms)[high_ucb_mask])
l_index = np.random.choice(np.arange(self._num_arms)[low_lcb_mask])
return [self._arms[u_index], self._arms[l_index]]
class SuccessiveEliminationAlgorithm(BAIBanditAlgorithm):
"""Implements the successive elimination algorithm.
Attributes:
arms: list of arms.VarianceEstimatorArm objects
prob_model: data.DataGenerator object
sample_limit: int an upper bound on the total number of samples
units_per_round: int the number of units to sample for every epoch of the
algorithm
stable_order: whether the algorithm reads the data sequentially or not.
"""
def __init__(
self,
arm_list: List[arms.VarianceEstimatorArm],
prob_model: data.DataGenerator,
error_prob=.05,
error_tol=0,
confidence=None,
sample_limit=None,
units_per_round=200,
):
super().__init__(arm_list, prob_model, error_prob, error_tol, confidence,
sample_limit, units_per_round)
def run(self, all_data: Optional[data.ExpData] = None):
"""Runs the bandit algorithm until termination.
Terminations by happen if either the confidence is reached or
sample_limit is reached.
If all_data is passed, then we assume we should sample in stable order:
every arm will sequentially read data from the beginning of data
independently of the other arms or sample selection strategy.
This ensures that the confidence sequences for an arm are identical across
different algorithms, making a comparison easier.
Args:
all_data: optional data.ExpData specifying all the samples the algorithm
could use. The algorithm could terminate before consuming all the data.
If all_data is not passed, then data is generated from self.prob_model.
Returns:
lcb_by_arm: dict(arm object, np.array)
A list of the lcbs returned by of the arm for every round
ucb_by_arm: dict(arm object, np.array)
A list of the ucbs returned by of the arm for every round
var_est_by_arm: dict(arm object, np.array)
A list of the variance estimates returned by of the arm for every round
best_arm: List[arm]
A list of arms with the lowest ucb of all the arms with the lowest
estimate of the variance. Could be more than one.
samples_by_arm: Mapping(arm object, np.array)
a dict of the samples allocated to each arm during each period
cum_samples: np.array
a list of the cumulative number of samples used
"""
stable_order = all_data is not None
# Allocate variables for tracking the history
lcb_by_arm = {a: [] for a in self._arms}
ucb_by_arm = {a: [] for a in self._arms}
var_est_by_arm = {a: [] for a in self._arms}
cum_samples = []
samples_by_arm = {a: [] for a in self._arms}
total_samples = 0
# Initialize
candidate_arms = list(self._arms) # copy arms.
epoch = 1
while len(candidate_arms) > 1:
epoch += 1
if total_samples > self.sample_limit:
_, _, var_ests = self.collect_bounds()
# Select the arms with the lowest var estimates.
candidate_arms = self._select_arms_with_mask(
[v == min(var_ests) for v in var_ests])
break
for arm in candidate_arms:
if not stable_order:
new_samples = self._prob_model.generate(self.units_per_round)
else:
n_start = arm.get_num_units()
new_samples = all_data[n_start:n_start + self.units_per_round]
arm.update(new_samples, self.error_prob / len(self._arms))
total_samples += len(candidate_arms) * self.units_per_round
# Record the arm outputs
cum_samples.append(total_samples)
for arm in self._arms:
lcb_by_arm[arm].append(arm.cost * arm.ci[0])
ucb_by_arm[arm].append(arm.cost * arm.ci[1])
var_est_by_arm[arm].append(arm.cost * arm.var_est)
samples_by_arm[arm].append(0)
for arm in candidate_arms:
samples_by_arm[arm][-1] = self.units_per_round
# Now we decide the ending condition
# trim down the candidate set by finding the lowest ucb of the arms
# with the minimum var_est.
candidate_var_ests = [var_est_by_arm[arm][-1] for arm in candidate_arms]
lowest_var = min(candidate_var_ests)
low_var_mask = [v == lowest_var for v in candidate_var_ests]
lowest_ucb = min([
ucb_by_arm[arm][-1]
for i, arm in enumerate(candidate_arms)
if low_var_mask[i]
])
# We now eliminate all arms with lcbs that are larger than lowest_ucb.
new_candidate_arms = []
for arm in candidate_arms:
if lcb_by_arm[arm][-1] < lowest_ucb:
new_candidate_arms.append(arm)
candidate_arms = new_candidate_arms
# Check end condition
if len(candidate_arms) == 1:
break
return BanditData(lcb_by_arm, ucb_by_arm, var_est_by_arm, samples_by_arm,
cum_samples, candidate_arms)
| abcei_mab-main | causal_effect_bandits/bandits.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is needed to execute the simulations of
# Malek, Chiappa. "Asymptotically Best Causal Effect
# Identification with Multi-Armed Bandits." NeurIPS, 2021.
# https://proceedings.neurips.cc/paper/2021/...
# hash/b8102d1fa5df93e62cf26cd4400a0727-Abstract.html
"""A unit test for VarianceEstimator arms."""
from absl.testing import absltest
from causal_effect_bandits import data
from causal_effect_bandits import example_scm
from causal_effect_bandits import parameters
import numpy as np
class EstimatorTest(absltest.TestCase):
def test_frontdoorlinearfinitez(self):
"""Tests the FrontDoorLinearFiniteZ NuisanceParameter."""
np.random.seed(0)
scm_gen, params = example_scm.frontdoor_scm()
n = 100000
eta = parameters.FrontDoorLinearFiniteZ(
n_features=1, min_ratio_to_uniform=5)
# Now we check the data generation process and the estimators
d = scm_gen.generate(n)
eta.fit(data.get_remove_coordinates_fn(0)(d))
tol = .1
def g(x):
return 1 / (1 + np.exp(-x))
# Check the model is being fit correctly.
one = np.ones(1, dtype=int) # Tablular must have matching d-types
zero = np.zeros(1, dtype=int)
np.testing.assert_allclose(eta.exp_prob, params["mu_x"],
tol * params["mu_x"])
np.testing.assert_allclose(
eta.cov_given_exp.predict(x=one, z=one), g(params["beta"]), tol)
np.testing.assert_allclose(
eta.cov_given_exp.predict(x=one, z=zero), 1 - g(params["beta"]), tol)
np.testing.assert_allclose(
eta.cov_given_exp.predict(x=zero, z=one), .5, tol)
np.testing.assert_allclose(
eta.cov_given_exp.predict(x=zero, z=zero), .5, tol)
np.testing.assert_allclose(
eta.y_response.predict([
[0, 0],
[0, 1],
[1, 0],
[1, 1],
]), [
0, params["gamma"], params["alpha_2"] * params["mu_u"],
params["gamma"] + params["alpha_2"]
],
atol=.2)
def test_AIPWLinearLogistic(self):
"""Tests the AIPWLinearLogistic NuisanceParameter."""
np.random.seed(0)
scm_gen, params = example_scm.back_door_scm()
n = 100000
eta = parameters.AIPWLinearLogistic(n_features=1)
# Now we check the data generation process and the estimators
d = scm_gen.generate(n)
eta.fit(d)
tol = .1
np.testing.assert_allclose(eta.get_response_parameters()[0],
[params["tau"], params["beta"]], tol)
if __name__ == "__main__":
absltest.main()
| abcei_mab-main | causal_effect_bandits/parameters_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is needed to execute the simulations of
# Malek, Chiappa. "Asymptotically Best Causal Effect
# Identification with Multi-Armed Bandits." NeurIPS, 2021.
# https://proceedings.neurips.cc/paper/2021/...
# hash/b8102d1fa5df93e62cf26cd4400a0727-Abstract.html
"""A unit test for VarianceEstimator arms."""
from absl.testing import absltest
from causal_effect_bandits import arms
from causal_effect_bandits import data
from causal_effect_bandits import example_scm
from causal_effect_bandits import parameters
from causal_effect_bandits import test_utils
import numpy as np
from sklearn import linear_model
class ArmTest(absltest.TestCase):
def test_frontdoor_arm(self):
"""Test the statistics of the distribution."""
np.random.seed(0)
scm_gen, args = example_scm.frontdoor_scm()
n = 50000
samples = scm_gen.generate(n)
arm = arms.SampleSplittingArm(
'test_arm',
eta=parameters.FrontDoorLinearFiniteZ(
n_features=1, min_ratio_to_uniform=5),
# remove the U coordinate
data_transformer=data.get_remove_coordinates_fn(0),
ci_algorithm=arms.CIAlgorithm.FINITE_SAMPLE,
sub_gscale=1,
tau_bound=2,
burn_in=1000,
rho=1,
)
# Next, we run the arm on a sequence of data and make sure it approaches
# a sensible value.
arm_output = test_utils.run_single_arm(
arm, data_gen=scm_gen, max_samples=10000, increment=1500)
def g(x):
return 1 / (1 + np.exp(-x))
# Set the nuisance parameters to their true values
arm._eta.exp_prob = args['mu_x']
beta = args['beta']
one = np.ones(1, dtype=int) # Tabular must have matching d-types
zero = np.zeros(1, dtype=int)
arm._eta.cov_given_exp.table[one.tobytes()][one.tobytes()] = g(beta)
arm._eta.cov_given_exp.table[one.tobytes()][zero.tobytes()] = 1 - g(beta)
arm._eta.cov_given_exp.table[zero.tobytes()][one.tobytes()] = .5
arm._eta.cov_given_exp.table[zero.tobytes()][zero.tobytes()] = .5
if isinstance(arm._eta.y_response, linear_model.Ridge):
arm._eta.y_response.coef_ = np.array([args['alpha_2'], args['gamma']])
arm._eta.y_response.intercept_ = args['alpha_2'] * args['mu_u']
var_approx = np.var(
arm._eta.calculate_score(data.get_remove_coordinates_fn(0)(samples)))
# Check estimators
np.testing.assert_allclose(arm.var_est, var_approx, rtol=.05)
# Check the LCB is less than the var_estimate
np.testing.assert_array_less(arm_output['LCBs'],
arm_output['var_estimates'])
# Check the UCB is greater than the var_estimate
np.testing.assert_array_less(arm_output['var_estimates'],
arm_output['UCBs'])
# Check the number of samples is correct
np.testing.assert_equal(arm_output['n_samples'], 1500 * np.arange(1, 8))
def test_back_door_arm(self):
np.random.seed(0)
scm_gen, _ = example_scm.back_door_scm()
arm = arms.SampleSplittingArm(
'test_arm',
eta=parameters.AIPWLinearLogistic(n_features=1),
data_transformer=data.get_identity_fn(),
ci_algorithm=arms.CIAlgorithm.FINITE_SAMPLE,
sub_gscale=1,
tau_bound=10,
burn_in=1000,
rho=1,
)
# Next, we run the arm on a sequence of data and make sure it approaches
# a sensible value.
arm_output = test_utils.run_single_arm(
arm, data_gen=scm_gen, max_samples=10000, increment=1500)
# Check the LCB is less than the var_estimate
np.testing.assert_array_less(arm_output['LCBs'],
arm_output['var_estimates'])
# Check the UCB is greater than the var_estimate
np.testing.assert_array_less(arm_output['var_estimates'],
arm_output['UCBs'])
# Check the number of samples is correct
np.testing.assert_equal(arm_output['n_samples'], 1500 * np.arange(1, 8))
if __name__ == '__main__':
absltest.main()
| abcei_mab-main | causal_effect_bandits/arms_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is needed to execute the simulations of
# Malek, Chiappa. "Asymptotically Best Causal Effect
# Identification with Multi-Armed Bandits." NeurIPS, 2021.
# https://proceedings.neurips.cc/paper/2021/...
# hash/b8102d1fa5df93e62cf26cd4400a0727-Abstract.html
"""A unit test for VarianceEstimator arms."""
from typing import Callable, Tuple
from absl.testing import absltest
from causal_effect_bandits import arms
from causal_effect_bandits import bandits
from causal_effect_bandits import data
from causal_effect_bandits import example_scm
import numpy as np
class ConstantArm(arms.VarianceEstimatorArm):
"""A simple arm used for testing the bandit algs.
Always returns a constant var_est, and the CI width is width_fn(n),
where n is the total number of samples.
"""
def __init__(
self,
name: str,
var_est: float,
width_fn: Callable[[int], float],
):
self._name = name
self._var_est = var_est
self.width_fn = width_fn
self.n_samples = 0
self._ci = (0, np.inf)
self.cost = 1
def update(self, new_data: data.ExpData, delta: float) -> Tuple[float, float]:
self.n_samples += len(new_data)
self._ci = (self.var_est - self.width_fn(self.n_samples),
self.var_est + self.width_fn(self.n_samples))
return self._ci
class BanditTest(absltest.TestCase):
def test_uniform_bandit(self):
"""Tests the UniformAlgorithm bandit on easy data.
This method verifies that the correct arms are pulled
the correct number of times and that the confidence
intervals are correctly accumulated.
"""
scm_gen, _ = example_scm.frontdoor_scm()
def width_fn(n):
return np.sqrt(20) / np.sqrt(n)
arm_list = [
ConstantArm('arm1', var_est=1, width_fn=width_fn),
ConstantArm('arm2', var_est=2, width_fn=width_fn),
ConstantArm('arm3', var_est=2, width_fn=width_fn)
]
bandit = bandits.UniformAlgorithm(
arm_list,
prob_model=scm_gen,
confidence=.05,
sample_limit=50,
units_per_round=5,
)
bdata = bandit.run()
# Verify the correct best arm is returned
np.testing.assert_equal(bdata.best_arm, [arm_list[0]])
# Test that the correct var estimates are returned
np.testing.assert_allclose(bdata.var_est_by_arm[arm_list[0]], [1] * 4)
np.testing.assert_allclose(bdata.var_est_by_arm[arm_list[1]], [2] * 4)
np.testing.assert_allclose(bdata.var_est_by_arm[arm_list[2]], [2] * 4)
# Test that the correct number of samples are returned: every arm is
# sampled every time
correct_samples = [5] * 4
np.testing.assert_allclose(bdata.samples_by_arm[arm_list[0]],
correct_samples)
np.testing.assert_allclose(bdata.samples_by_arm[arm_list[1]],
correct_samples)
np.testing.assert_allclose(bdata.samples_by_arm[arm_list[2]],
correct_samples)
# Test that the lower confidence bounds are correct
correct_width = [width_fn(n) for n in 5 * np.arange(1, 5)]
np.testing.assert_allclose(bdata.lcb_by_arm[arm_list[0]],
np.ones(4) - correct_width)
np.testing.assert_allclose(bdata.lcb_by_arm[arm_list[1]],
2 * np.ones(4) - correct_width)
np.testing.assert_allclose(bdata.lcb_by_arm[arm_list[2]],
2 * np.ones(4) - correct_width)
# Test that the upper confidence bounds are correct
correct_width = [width_fn(n) for n in 5 * np.arange(1, 5)]
np.testing.assert_allclose(bdata.ucb_by_arm[arm_list[0]],
np.ones(4) + correct_width)
np.testing.assert_allclose(bdata.ucb_by_arm[arm_list[1]],
2 * np.ones(4) + correct_width)
np.testing.assert_allclose(bdata.ucb_by_arm[arm_list[2]],
2 * np.ones(4) + correct_width)
def test_successive_elimination_bandit(self):
"""Tests the SuccessiveEliminationAlgorithm bandit on easy data.
This method verifies that the correct arms are pulled
the correct number of times and that the confidence
intervals are correctly accumulated.
"""
scm_gen, _ = example_scm.frontdoor_scm()
def width_fn(n):
return np.sqrt(20) / np.sqrt(n)
arm_list = [
ConstantArm('arm1', var_est=1, width_fn=width_fn),
ConstantArm('arm2', var_est=2.25, width_fn=width_fn),
ConstantArm('arm3', var_est=3, width_fn=width_fn)
]
bandit = bandits.SuccessiveEliminationAlgorithm(
arm_list,
prob_model=scm_gen,
confidence=.05,
sample_limit=100,
units_per_round=10,
)
bdata = bandit.run()
# Test that the correct best arm is returned
np.testing.assert_equal(bdata.best_arm, [arm_list[0]])
# Test that the variance estimates are correct
np.testing.assert_allclose(bdata.var_est_by_arm[arm_list[0]], [1] * 5)
np.testing.assert_allclose(bdata.var_est_by_arm[arm_list[1]], [2.25] * 5)
np.testing.assert_allclose(bdata.var_est_by_arm[arm_list[2]], [3] * 5)
# Test that the correct number of samples are returned
np.testing.assert_allclose(bdata.samples_by_arm[arm_list[0]], [10] * 5)
np.testing.assert_allclose(bdata.samples_by_arm[arm_list[1]], [10] * 5)
np.testing.assert_allclose(bdata.samples_by_arm[arm_list[2]],
[10, 10, 0, 0, 0])
correct_width_1 = [width_fn(n) for n in 10 * np.arange(1, 6)]
correct_width_2 = [width_fn(n) for n in 10 * np.arange(1, 6)]
correct_width_3 = [width_fn(n) for n in [10, 20, 20, 20, 20]]
# Test that the lower confidence bounds are correct
np.testing.assert_allclose(bdata.lcb_by_arm[arm_list[0]],
np.ones(5) - correct_width_1)
np.testing.assert_allclose(bdata.lcb_by_arm[arm_list[1]],
2.25 * np.ones(5) - correct_width_2)
np.testing.assert_allclose(bdata.lcb_by_arm[arm_list[2]],
3 * np.ones(5) - correct_width_3)
# Test that the upper confidence bounds are correct
np.testing.assert_allclose(bdata.ucb_by_arm[arm_list[0]],
np.ones(5) + correct_width_1)
np.testing.assert_allclose(bdata.ucb_by_arm[arm_list[1]],
2.25 * np.ones(5) + correct_width_2)
np.testing.assert_allclose(bdata.ucb_by_arm[arm_list[2]],
3 * np.ones(5) + correct_width_3)
def test_lucb_bandit(self):
"""Tests the LUCBAlgorithm bandit on easy data.
This method verifies that the correct arms are pulled
the correct number of times and that the confidence
intervals are correctly accumulated.
"""
scm_gen, _ = example_scm.frontdoor_scm()
def width_fn(n):
return np.sqrt(20) / np.sqrt(n)
arm_list = [
ConstantArm('arm1', var_est=1, width_fn=width_fn),
ConstantArm('arm2', var_est=2, width_fn=width_fn),
ConstantArm('arm3', var_est=2.5, width_fn=width_fn)
]
bandit = bandits.LUCBAlgorithm(
arm_list,
prob_model=scm_gen,
confidence=.05,
sample_limit=50,
units_per_round=5,
)
bdata = bandit.run()
# Test that the correct best arm is returned
np.testing.assert_equal(bdata.best_arm, [arm_list[0]])
# Test that the variance estimates are correct
np.testing.assert_allclose(bdata.var_est_by_arm[arm_list[0]], [1] * 5)
np.testing.assert_allclose(bdata.var_est_by_arm[arm_list[1]], [2] * 5)
np.testing.assert_allclose(bdata.var_est_by_arm[arm_list[2]], [2.5] * 5)
# Test that the correct number of samples are returned
np.testing.assert_allclose(bdata.samples_by_arm[arm_list[0]], [5] * 5)
np.testing.assert_allclose(bdata.samples_by_arm[arm_list[1]],
[5, 5, 0, 5, 5])
np.testing.assert_allclose(bdata.samples_by_arm[arm_list[2]],
[5, 0, 5, 0, 0])
correct_width_1 = [width_fn(n) for n in 5 * np.arange(1, 6)]
correct_width_2 = [width_fn(n) for n in [5, 10, 10, 15, 20]]
correct_width_3 = [width_fn(n) for n in [5, 5, 10, 10, 10]]
# Test that the lower confidence bounds are correct
np.testing.assert_allclose(bdata.lcb_by_arm[arm_list[0]],
np.ones(5) - correct_width_1)
np.testing.assert_allclose(bdata.lcb_by_arm[arm_list[1]],
2 * np.ones(5) - correct_width_2)
np.testing.assert_allclose(bdata.lcb_by_arm[arm_list[2]],
2.5 * np.ones(5) - correct_width_3)
# Test that the upper confidence bounds are correct
np.testing.assert_allclose(bdata.ucb_by_arm[arm_list[0]],
np.ones(5) + correct_width_1)
np.testing.assert_allclose(bdata.ucb_by_arm[arm_list[1]],
2 * np.ones(5) + correct_width_2)
np.testing.assert_allclose(bdata.ucb_by_arm[arm_list[2]],
2.5 * np.ones(5) + correct_width_3)
if __name__ == '__main__':
absltest.main()
| abcei_mab-main | causal_effect_bandits/bandits_test.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is needed to execute the simulations of
# Malek, Chiappa. "Asymptotically Best Causal Effect
# Identification with Multi-Armed Bandits." NeurIPS, 2021.
# https://proceedings.neurips.cc/paper/2021/...
# hash/b8102d1fa5df93e62cf26cd4400a0727-Abstract.html
"""Contains the class definitions for the estimators."""
from typing import Tuple
from absl import logging
from causal_effect_bandits import data
import numpy as np
import sklearn
from sklearn import linear_model
from typing_extensions import Protocol
class NuisanceParameter(Protocol):
"""Encodes the interface for nuisance parameters.
This class contains all the code to fit, model, and compute a nuisance
parameter from data, its influence function, and the ATE from it.
The definition of eta and phi depend on the structure of the
nuisance parameter, so we encapulate them together.
Attributes:
_nus_dim: int the output dimension of the nuisance parameter
_cov_dim: int the dimension of the covariates
_model: object
models of the nuisance parameters (eta). Maps from:
from: ExpData: n_units * (_cov_dim + treatment_dim + response_dim)
to: n_units * _nus_dim
_data: ExpData the last ExpData that we used
_phi: function
the phi function: accepts n_units * _nus_dim, returns n_units * 1
_has_been_fit: boolean True if the model has been fit on enough data to make
a sensible prediction, e.g. both control and treatment groups have units
in them.
recent_data: ExpData The most recently fit data.
Methods to be fit by sub classes:
fit: fits the model using an ExpData object.
transform: returns an element-wise nuisance parameter evaluated on ExpData
calculate_score: returns elementwise phi(calculate_score)
has_enough_samples: True if the model has seen enough data to be fit.
reset: resets all the models to their default state
set_to-truth: sets the nuisance parameters to their true value.
"""
def fit(self, d: data.ExpData) -> None:
"""Fits eta to the provided experimental data.
Args:
d: ExpData used to fit the nuisance parameter. Expected shapes are
d.cov: (n_units, n_features)
d.exp: (n_units,)
d.rsp: (n_units,)
"""
...
def reset(self) -> None:
...
def _phi(self, eta: np.ndarray, d: data.ExpData) -> np.ndarray:
"""Calculates phi(W, eta) for all W in d and eta(W).
Args:
eta: np.ndarray of shape (data.n_units, _nus_dim), which has been
evaluated on W.
d: ExpData of shape (data.n_units, _cov_dim)
Returns:
np.array of shape (data.n_units, )
"""
...
def transform(self, d: data.ExpData) -> np.ndarray:
"""Return eta(W) for all W in d.
Args:
d: ExpData to be evaluated with n_units
Returns:
a np.ndarray of shape (n_units,_nus_dim) for eta(W), W in d.
"""
...
def has_enough_samples(self) -> bool:
...
def calculate_cs_width(self, delta: float, sub_g: float, rho: float) -> float:
"""Calculates the width of a confidence sequence on the norm of eta error.
Guaranteed to hold with probability delta.
See the paper for a justification of the formula.
Args:
delta: the error probability tolerance
sub_g: the subGaussian parameter
rho: hyperparameter for the mixture of the boundary
Returns:
The width of a confidence sequence
"""
...
def set_to_truth(self, data_gen: data.DataGenerator) -> None:
"""Sets eta to the true value given a DataGenerator object.
Assumes it is identifiable in the framework of this nuisance parameter.
Args:
data_gen: a DataGenerator that is used to fit the true parameters.
"""
...
def calculate_score(self, d: data.ExpData) -> np.ndarray:
"""Calculates eta(W) and phi(W, eta) for all W in d.
Args:
d: ExpData of shape (d.n_units, _cov_dim)
Returns:
np.array of shape (d.n_units, )
"""
...
class LinearParameterBinaryTreatment(NuisanceParameter):
"""We model each conditional response as a linear function.
Specifically, we will fit
mu(1,x) = X * beta_treat
mu(0,x) = X * beta_cont
The corresponding phi function is
phi(W,eta) = mu(1,X) - mu(0,X)
"""
def __init__(self, n_features):
super().__init__()
self._cov_dim = n_features
self._nus_dim = 2 # The dimension of the nuisance parameter
self.treat_response = linear_model.Ridge(alpha=1.0)
self.cont_response = linear_model.Ridge(alpha=1.0)
self._model = (self.cont_response, self.treat_response)
self._data = None
self._has_been_fit = False
self.recent_data = None
def reset(self):
self.__init__(self._cov_dim)
def fit(self, d: data.ExpData) -> None:
"""Fits eta to the provided experimental data.
Args:
d: ExpData used to fit the nuisance parameter. Expected shapes are
d.cov: (n_units, n_features)
d.exp: (n_units,)
d.rsp: (n_units,)
"""
if d.n_features != self._cov_dim:
raise ValueError(
f"Data has {d.n_features} dimension but {self._cov_dim} was expected")
# Build the two datasets
treat_data = []
cont_data = []
treat_labels = []
cont_labels = []
for i, x in enumerate(d.cov):
if d.exp[i] == 1:
treat_data.append(x)
treat_labels.append(d.rsp[i])
else:
cont_data.append(x)
cont_labels.append(d.rsp[i])
self.recent_data = d
if treat_data == 0 or cont_data == 0:
logging.warning(
"Nuisance parameter was fit on data not including treatment and control"
)
logging.warning("Not fitting the models yet")
return
self.treat_response.fit(treat_data, treat_labels)
self.cont_response.fit(cont_data, cont_labels)
self._has_been_fit = True
def transform(self, d: data.ExpData) -> np.ndarray:
"""Return eta(W) for all W in d.
Args:
d: ExpData to be evaluated with n_units
Returns:
a np.ndarray of shape (n_units,_nus_dim) for eta(W), W in d.
"""
if not self._has_been_fit:
logging.warning("Model has not been properly fit yet")
nan_array = np.empty((d.n_units, self._nus_dim))
nan_array[:] = np.nan
return nan_array
if d.n_features != self._cov_dim:
raise ValueError(
f"Data has {d.n_features} dimension but {self._cov_dim} was expected")
eta = np.array(
[self.cont_response.predict(d.cov),
self.treat_response.predict(d.cov)]).transpose()
return eta
def _phi(self, eta: np.ndarray, d: data.ExpData) -> np.ndarray:
"""Calculates phi(W, eta) for all W in d and eta(W).
Args:
eta: np.ndarray of shape (d.n_units, _nus_dim), which has been evaluated
on W.
d: ExpData of shape (d.n_units, _cov_dim)
Returns:
np.array of shape (d.n_units, )
"""
if eta.shape[1] != self._nus_dim:
raise ValueError(
f"eta has dimension {eta.shape[1]} but {self._nus_dim} was expected.")
return eta[:, 1] - eta[:, 0]
def calculate_score(self, d: data.ExpData) -> np.ndarray:
"""Calculates eta(W) and phi(W, eta) for all W in d.
Args:
d: ExpData of shape (d.n_units, _cov_dim)
Returns:
np.array of shape (d.n_units, )
"""
eta = self.transform(d)
return self._phi(eta, d)
def set_to_truth(self, data_gen: data.LinearDataGenerator):
"""Sets eta to the true value given a DataGenerator object.
Assumes it is identifiable in the framework of this nuisance parameter.
Args:
data_gen: a DataGenerator that is used to fit the true parameters.
"""
self.treat_response.intercept_ = data_gen.tau
self.treat_response.coef_ = data_gen.beta
self.cont_response.coef_ = data_gen.beta
self.cont_response.intercept_ = 0
class Predictor(Protocol):
def fit(self, x: np.ndarray, y: np.ndarray) -> None:
...
def predict(self, x: np.ndarray) -> np.ndarray:
...
class ProbPredictor(Protocol):
def fit(self, x: np.ndarray, y: np.ndarray) -> None:
...
def predict_proba(self, x: np.ndarray) -> np.ndarray:
...
class AIPW(NuisanceParameter):
"""An AIWP estimator with arbitrary sklearn-style predictors.
We use any sklearn-style estimator to model the conditional
response and the propensity functions.
Attributes:
response: a Predictor that models mu(z,x) = Ex[Y|Z=z,X=x]
propensity: a ProbPredictor that models e(z) = P[X=1|Z=z] The subclasses
AIPWLinearLogistic and AIPWKernelRidgeLogistic are specific choices of
regressor and propensity.
"""
def __init__(
self,
n_features,
response: Predictor,
propensity: ProbPredictor,
min_units=5,
min_overlap=.05,
):
super().__init__()
self._model = None # The model used
self._has_been_fit = False
self.recent_data = None
self._cov_dim = n_features
self.response = response
self.propensity = propensity
self._nus_dim = 4 # The dimension of the nuisance parameter
self._min_units = min_units
self._min_overlap = min_overlap
def reset(self):
self.response = sklearn.clone(self.response)
self.propensity = sklearn.clone(self.propensity)
self._has_been_fit = False
self.recent_data = None
def fit(self, d: data.ExpData):
"""Fits eta to the provided experimental d.
Args:
d: ExpData used to fit the nuisance parameter. Expected shapes are
d.cov: (n_units, n_features)
d.exp: (n_units,)
d.rsp: (n_units,)
"""
n_units = len(d)
if d.n_features != self._cov_dim:
raise ValueError(
f"Data has {d.n_features} dimension but {self._cov_dim} was expected")
self.recent_data = d
if sum(d.exp) < self._min_units or sum(d.exp) > n_units - self._min_units:
message = (f"{sum(d.exp)} treatment units of {n_units} total units is not"
" enough units in control or treatment to fit eta.")
logging.warning(message)
return
self.response.fit(np.c_[d.exp, d.cov], d.rsp)
self.propensity.fit(d.cov, d.exp)
self._has_been_fit = True
def calculate_cs_width(self, delta, sub_g, rho) -> float:
"""Calculates the width of a confidence sequence on the norm of eta error.
Guaranteed to hold with probability delta.
See the paper for a justification of the formula.
Args:
delta: the error probability tolerance
sub_g: the subGaussian parameter
rho: hyperparameter for the mixture of the boundary
Returns:
The width of a confidence sequence
"""
x = self.recent_data.cov
d = self._cov_dim
v = x.transpose().dot(x)
numerator = sub_g * np.log(np.linalg.det(sub_g * v + rho * np.identity(d)))
denominator = sub_g * np.log(rho**d * delta**2)
if numerator <= denominator: # not enough samples have been reached
return np.inf
radius = np.sqrt(numerator - denominator)
norm_matrix = v.dot(np.linalg.inv(v + rho / sub_g * np.identity(d))).dot(v)
w, _ = np.linalg.eig(norm_matrix)
mu_radius = radius / np.sqrt(min(w))
eta_radius = mu_radius
return mu_radius + eta_radius * (1 / self._min_overlap - 2)
def transform(self, d: data.ExpData) -> np.ndarray:
"""Return eta(Z) for all Z in d.
Args:
d: ExpData to be evaluated with n_units
Returns:
a np.ndarray of shape (n_units,_nus_dim) for eta(W), W in d.
"""
if not self._has_been_fit:
logging.warning("Warning: model has not been properly fit yet")
nan_array = np.empty((d.n_units, self._nus_dim))
nan_array[:] = np.nan
return nan_array
if d.n_features != self._cov_dim:
raise ValueError(
f"Data has {d.n_features} dimension but {self._cov_dim} was expected")
n_units = d.n_units
predicted_probs = self.propensity.predict_proba(d.cov)[:, 1]
predicted_probs = np.clip(predicted_probs, self._min_overlap,
1 - self._min_overlap)
eta = np.array([
self.response.predict(np.c_[np.zeros(n_units), d.cov]),
self.response.predict(np.c_[np.ones(n_units), d.cov]),
self.response.predict(np.c_[d.exp, d.cov]), predicted_probs
]).transpose()
return eta
def _phi(self, eta: np.ndarray, d: data.ExpData) -> np.ndarray:
"""Calculates phi(W, eta) for all W in d and eta(W).
Args:
eta: np.ndarray of shape (d.n_units, _nus_dim), which has been evaluated
on W.
d: ExpData of shape (d.n_units, _cov_dim)
Returns:
np.array of shape (d.n_units, )
"""
if eta.shape[1] != self._nus_dim:
raise ValueError(
f"eta has dimension {eta.shape[1]} but {self._nus_dim} was expected.")
if any(eta[:, 3] < 1e-15) or any(eta[:, 3] > 1 - 1e-15):
raise ValueError("Eta is to close to exiting the unit interval")
cond_difference = eta[:, 1] - eta[:, 0]
ipw = (d.rsp - eta[:, 2]) * (
d.exp / eta[:, 3] - (1 - d.exp) / (1 - eta[:, 3]))
return cond_difference + ipw
def calculate_score(self, d: data.ExpData) -> np.ndarray:
"""Calculates eta(W) and phi(W, eta) for all W in d.
Args:
d: ExpData of shape (d.n_units, _cov_dim)
Returns:
np.array of shape (d.n_units, )
"""
eta = self.transform(d)
return self._phi(eta, d)
def set_to_truth(self, data_gen: data.DataGenerator):
"""Sets eta to the true value given a DGenerator object.
Assumes it is identifiable in the framework of this nuisance parameter.
Args:
data_gen: a DGenerator that is used to fit the true parameters.
"""
if not isinstance(data_gen, data.LinearDataGenerator):
raise ValueError(("The DataGenerator is not a LinearDataGenerator and is "
"not identifiable in this model"))
self.response.coef_ = np.r_[data_gen.tau, data_gen.beta]
self.propensity.coef_ = data_gen.gamma.reshape(1, self._cov_dim)
self.propensity.intercept_ = 0
def has_enough_samples(self) -> bool:
return self._has_been_fit
class AIPWLinearLogistic(AIPW):
"""AIPW where the response is linear and the propensity logistic.
Specifically, we will fit
mu(z,x) = z * beta + x tau
e(Z) = logit(Z*gamma)
The corresponding phi function is
phi(W,eta) = mu(1,Z) - mu(0,Z) + (Y-mu(X,Z))(X/e(Z) - (1-X)/(1-e(Z)))
"""
def __init__(self, n_features, min_units=5, min_overlap=.05):
super().__init__(
n_features,
response=linear_model.Ridge(alpha=1.0, fit_intercept=False),
propensity=linear_model.LogisticRegression(warm_start=True),
min_units=min_units,
min_overlap=min_overlap,
)
def get_response_parameters(self) -> Tuple[np.ndarray, np.ndarray]:
if isinstance(self.response, linear_model.Ridge):
return (self.response.coef_, self.response.intercept_)
else:
return (np.nan, np.nan)
class AIPWKernelRidgeLogistic(AIPW):
"""AIPW with kernel ridge regression, logistic regression.
Specifically, we will fit
mu(X,x) = Z * beta + X * tau
e(Z) = logit(Z*gamma)
The corresponding phi function is
phi(W,eta) = mu(1,Z) - mu(0,Z) + (Y-mu(X,Z))(X/e(Z) - (1-X)/(1-e(Z)))
"""
def __init__(self, n_features, min_units=5, min_overlap=.05):
super().__init__(
n_features,
response=sklearn.kernel_ridge.KernelRidge(alpha=1.0, kernel="rbf"),
propensity=linear_model.LogisticRegression(warm_start=True),
min_units=min_units,
min_overlap=min_overlap,
)
class AIPWKernelGradientBoosting(AIPW):
"""An AIPW with GP response and gradient boosted propensity."""
def __init__(self, n_features, min_units=5, min_overlap=.05):
super().__init__(
n_features,
response=sklearn.kernel_ridge.KernelRidge(alpha=1.0, kernel="rbf"),
propensity=sklearn.ensemble.GradientBoostingClassifier(
n_estimators=5, learning_rate=1.0, max_depth=2, random_state=0),
min_units=min_units,
min_overlap=min_overlap,
)
class FrontDoorLinearFiniteZ(NuisanceParameter):
"""This nuisance parameter estimates for the frontdoor formula.
We must fit models for:
- mu_Y(Z,X) : y_response
- P(Z|X): cov_given_
- P(X)
The corresponding phi function is
phi(W,eta) =
(Y-mu_Y(Z,X))(P(Z|X=1) - P(Z|X=0)) / P(Z|X)
+ sum_x' mu_Y(x',X)(P(Z=x'|X=1) - P(Z=x'|X=0))
+ (X/P(X) - (1-X)(1-P(X)) * {
sum_{t'} mu_Y(Z,t')f(t')
- sum_{x', t'} mu_Y(x', t')P(Z|t')P(t')
}
We assume a binary X. We thus need to estimate two densities, P(Z|X=1) and
P(Z|X=0), and we need to be able to integrate over these densities.
If we assume that $Z$ has finite support, the simplest density estimator is
a (smoothed) histogram.
"""
def __init__(
self,
n_features,
min_units=5,
min_overlap: float = .05,
min_ratio_to_uniform: float = 10,
):
super().__init__()
self._cov_dim = 0
self._nus_dim = 0 # The dimension of the nuisance parameter maps to
self._data = None
self._has_been_fit = False
self.recent_data = None
self._cov_dim = n_features
self._min_overlap = min_overlap
self._min_units = min_units
self._min_ratio_to_uniform = min_ratio_to_uniform
self._nus_dim = 8 # The dimension of the nuisance parameter
self.y_response = linear_model.Ridge(
alpha=1.0, fit_intercept=True) # E[Y|Z, X]
self.cov_given_exp = data.TabularCPD(min_ratio_to_uniform) # P(Z|X)
self.exp_prob = .5 # P(X)
self._model = (self.y_response, self.cov_given_exp, self.exp_prob)
def reset(self):
self.__init__(
self._cov_dim,
self._min_units,
self._min_overlap,
self._min_ratio_to_uniform,
)
self._has_been_fit = False
self.recent_data = None
def fit(self, d: data.ExpData):
"""Fits eta to the provided experimental d.
Args:
d: ExpData used to fit the nuisance parameter. Expected shapes are
d.cov: (n_units, n_features)
d.exp: (n_units,)
d.rsp: (n_units,)
"""
if d.n_features != self._cov_dim:
raise ValueError(
f"Data has {d.n_features} dimension but {self._cov_dim} was expected")
self.recent_data = d
if sum(d.exp) < self._min_units or sum(d.exp) > d.n_units - self._min_units:
print(
f"---Frontdoor: {sum(d.exp)} treatment units of {d.n_units} total units "
"is not enough units in control or treatment to fit eta.")
return
self.y_response.fit(np.array(np.c_[d.exp, d.cov]), d.rsp)
self.exp_prob = np.clip(
np.mean(d.exp), self._min_overlap, 1 - self._min_overlap)
self.cov_given_exp.fit(d.exp, d.cov)
self._has_been_fit = True
def calculate_cs_width(self, delta, sub_g, rho):
"""Calculates the width of a confidence sequence on the norm of eta error.
Guaranteed to hold with probability delta.
A linear regression CS is generated for both, then map it through
the sigmoid.
See the paper for a justification of the formula.
Args:
delta: the error probability tolerance
sub_g: the subGaussian parameter
rho: hyperparameter for the mixture of the boundary
Returns:
The width of a confidence sequence
"""
x = np.array(np.c_[self.recent_data.exp, self.recent_data.cov])
d = self._cov_dim + 1
v = x.transpose().dot(x)
delta = delta / 3 # we are making three confidence sequences
numerator = sub_g * np.log(np.linalg.det(sub_g * v + rho * np.identity(d)))
denominator = sub_g * np.log(rho**d * delta**2)
if numerator <= denominator: # not enough samples have been reached
return np.inf
radius = np.sqrt(numerator - denominator)
norm_matrix = v.dot(np.linalg.inv(v + rho / sub_g * np.identity(d))).dot(v)
w, _ = np.linalg.eig(norm_matrix)
mu_radius = radius / np.sqrt(min(w))
def uniform_boundary(n, delta):
# check if the radius is well defined
if n <= delta**2 / 4 / rho:
return np.inf
else:
return np.sqrt(1 / n * (.25 + rho / n) * np.log(
(n / 4 + rho) / (delta**2 * rho)))
t_radius = uniform_boundary(len(x), delta / 3)
cov_given_exp_radius = 2 * self.cov_given_exp.support_size() * t_radius
if np.isnan(t_radius) or np.isnan(cov_given_exp_radius) or np.isnan(
mu_radius):
print("Warning, Nans found when calculating CS width.")
return (mu_radius + t_radius * (1 / self._min_overlap - 1) + mu_radius * 2 *
(1 / self._min_overlap - 1))
def transform(self, d: data.ExpData):
"""Returns the nuisance parameter evaluated on data d.
computes, for every W_i in d
eta_i = (
0: mu_Y(z_i, x_i),
1: P(z_i|X=1),
2: P(z_i|X=0),
3: P(z_i|x_i),
4: W(z_i) = (z_i / P(X=1) - (1-z_i) / P(X=0))),
5: f_1(x_i) = sum_z' mu_Y(z',x_i)(P(Z=z'|X=1) - P(Z=z'|X=0)),
6: f_2(z_i) = sum_{x'} mu_Y(z_i,x') P(X=x'),
7: f_3(z_i) = sum_{z', x'} mu_Y(z', x') P(z_i|X=x') P(X=x'),
)
The corresponding phi function is
phi(W,eta) =
(Y-mu_Y(Z,X))(P(Z|X=1) - P(Z|X=0)) / P(Z|X)
+ sum_z' mu_Y(z',X)(P(Z=z'|X=1) - P(Z=z'|X=0))
+ (X/P(X) - (1-X)(1-P(X)) * {
sum_{x'} mu_Y(Z,x')P(x')
- sum_{z', x'} mu_Y(z', x')P(Z|x')P(x')
}
Which translates to
phi(W,eta) =
(Y-eta[:,0]) * (eta[:,1] / eta[:,3] - eta[:,2] / eta[:, 3])
+ eta[:, 5]
+ eta[:,4] * (eta[:,6] - eta[:,7])
Args:
d: ExpData of shape (d.n_units, _cov_dim)
Returns:
np.array of shape (d.n_units, 7)
"""
if d.n_features != self._cov_dim:
raise ValueError(
f"Data is of dimension {d.n_features} but {self._cov_dim} is expected."
)
if not self._has_been_fit:
print("Warning: model has not been properly fit yet.")
nan_array = np.empty((d.n_units, self._nus_dim))
nan_array[:] = np.nan
return nan_array
n_units = len(d)
mu_y_zx = self.y_response.predict(np.array(np.c_[d.exp, d.cov]))
p_z_x1 = self.cov_given_exp.predict(np.ones(n_units, dtype=int), d.cov)
p_z_x0 = self.cov_given_exp.predict(np.zeros(n_units, dtype=int), d.cov)
p_z_x = self.cov_given_exp.predict(d.exp, d.cov)
p_z_x1 = np.clip(p_z_x1, self._min_overlap, 1 - self._min_overlap)
p_z_x0 = np.clip(p_z_x0, self._min_overlap, 1 - self._min_overlap)
p_z_x = np.clip(p_z_x, self._min_overlap, 1 - self._min_overlap)
z_support = self.cov_given_exp.z_support()
mu_yz_ix1 = self.y_response.predict(
np.array(np.c_[np.ones(len(d), dtype=int), d.cov])) # mu_y(z_i, X=1)
mu_yz_ix0 = self.y_response.predict(
np.array(np.c_[np.zeros(len(d), dtype=int), d.cov])) # mu_y(z_i, X=0)
f_2 = self.exp_prob * mu_yz_ix1 + (1 - self.exp_prob) * mu_yz_ix0
f_1 = np.zeros(len(d))
f_3 = np.zeros(len(d))
for z in z_support:
mu_yzx_i = self.y_response.predict(
np.array(np.c_[d.exp,
np.tile(z,
(len(d), 1))])) # mu_y(z, X=1), z in support
f_1 += mu_yzx_i * (
self.cov_given_exp.predict(np.array([1]), z) -
self.cov_given_exp.predict(np.array([0]), z))
f_3 += (
self.y_response.predict([np.insert(z, 0, 1)]) * self.exp_prob * p_z_x1
+ self.y_response.predict([np.insert(z, 0, 0)]) *
(1 - self.exp_prob) * p_z_x0)
w = d.exp / self.exp_prob - (1 - d.exp) / (1 - self.exp_prob)
eta = np.array([
mu_y_zx,
p_z_x1,
p_z_x0,
p_z_x,
w,
f_1,
f_2,
f_3,
]).transpose()
return eta
def _phi(self, eta: np.ndarray, d: data.ExpData):
"""Maps eta to phi(eta).
Recall that
eta_i(z_i,x_i) = (
0: mu_Y(z_i, x_i),
1: P(z_i|X=1),
2: P(z_i|X=0),
3: P(z_i|x_i),
4: W(x_i) = (x_i / P(X=1) - (1-x_i) / P(X=0))),
5: f_1(x_i) = sum_z' mu_Y(z',x_i)(P(Z=z'|X=1) - P(Z=z'|X=0)),
6: f_2(z_i) = sum_{x'} mu_Y(z_i,x') P(X=x'),
7: f_3(z_i) = sum_{z', x'} mu_Y(z', x') P(Z=z_i|X=x') P(X=x'),
)
The corresponding phi function is
phi(W,eta) =
(Y-mu_Y(Z,X))(P(Z|X=1) - P(Z|X=0)) / P(Z|X)
+ sum_z' mu_Y(z',X)(P(Z=z'|X=1) - P(Z=z'|X=0))
+ (X/P(X) - (1-X)(1-P(X)) * {
sum_{x'} mu_Y(Z,x')P(x')
- sum_{z', x'} mu_Y(z', x')P(Z|x')P(x')
}
Which translates to
phi(W,eta) =
(Y-eta[:,0]) * (eta[:,1] / eta[:,3] - eta[:,2] / eta[:, 3])
+ eta[:, 5]
+ eta[:,4] * (eta[:,6] - eta[:,7])
Args:
eta: np.ndarray of shape (d.n_units, _nus_dim), which has been evaluated
on W.
d: ExpData of shape (d.n_units, _cov_dim)
Returns:
np.array of shape (d.n_units, )
"""
if eta.shape[1] != self._nus_dim:
raise ValueError(
f"eta has dimension {eta.shape[1]} but {self._nus_dim} was expected.")
phi = ((d.rsp - eta[:, 0]) *
(eta[:, 1] / eta[:, 3] - eta[:, 2] / eta[:, 3]) + eta[:, 5] +
eta[:, 4] * (eta[:, 6] - eta[:, 7]))
return phi
def calculate_score(self, d: data.ExpData) -> np.ndarray:
"""Calculates eta(W) and phi(W, eta) for all W in d.
Args:
d: ExpData of shape (d.n_units, _cov_dim)
Returns:
np.array of shape (d.n_units, )
"""
eta = self.transform(d)
return self._phi(eta, d)
def has_enough_samples(self) -> bool:
return self._has_been_fit
class LinearRegressionParameter(NuisanceParameter):
"""Fits a linear model where X can be continuous.
We assume the model
Y = Z beta_1 + X tau + epsilon_1
and so fitting tau corresponds to using linearly regressing Y on (X, Z) then
returning the first coordinate of the parameter vector (corresponding to the
X component); this vector is beta_hat.
Because of the linear structure, the influence function is simply
beta_hat - tau,
meaning that phi is the identity and the nuisance function is eta = beta_hat.
However, for the purpose of estimating the variance, let's consider estimating
the covariance matrix separately; this covariance estimation is what is
abstracted by this NuisanceParameter subclass.
While linear regression is defined for a single set of samples, the analysis
will appreciate the flexibility of letting the covariance matrix be fit on
a separate sample. Hence, the nuisance parameter eta will be the inverse
covariance matrix of [X Z] and phi(W, ) will evaluate
e_1^T eta [X Z]^T Y
"""
def __init__(self, n_features, l_reg=0.1):
super().__init__()
self._model = None # The model used
self._cov_dim = 0
self._nus_dim = 0 # The dimension of the nuisance parameter maps to
self._data = None
self._has_been_fit = False
self.recent_data = None
self._cov_dim = n_features
# The dimension of the nuisance parameter
self._nus_dim = (n_features + 1)**2
# The model is just the covariance matrix
self._model = np.zeros(((n_features + 1), (n_features + 1)))
self.l_reg = l_reg
def fit(self, d: data.ExpData):
"""Fits eta to the provided experimental d.
Args:
d: ExpData used to fit the nuisance parameter. Expected shapes are
d.cov: (n_units, n_features)
d.exp: (n_units,)
d.rsp: (n_units,)
"""
if d.n_units < 1: # not enough data to fit
return
data_matrix = np.array(np.c_[d.exp, d.cov])
self._model = np.linalg.inv(data_matrix.transpose().dot(data_matrix) +
self.l_reg *
np.identity(self._cov_dim + 1)) * d.n_units
self._has_been_fit = True
self.recent_data = d
def transform(self, d: data.ExpData):
return self._model
def _phi(self, eta: np.ndarray, d: data.ExpData):
"""Evaluates e_1^T eta [X Z]^T Y for a vector X, Z, Y.
Args:
eta: np.ndarray of shape (d.n_units, _nus_dim), which has been evaluated
on W.
d: ExpData of shape (d.n_units, _cov_dim)
Returns:
np.array of shape (d.n_units, )
"""
xzy = np.c_[d.exp, d.cov].transpose().dot(np.diag(d.rsp))
return eta.dot(xzy)[0, :]
def calculate_score(self, d: data.ExpData) -> np.ndarray:
"""Calculates eta(W) and phi(W, eta) for all W in d.
Args:
d: ExpData of shape (d.n_units, _cov_dim)
Returns:
np.array of shape (d.n_units, )
"""
eta = self.transform(d)
return self._phi(eta, d)
def has_enough_samples(self) -> bool:
return self._has_been_fit
| abcei_mab-main | causal_effect_bandits/parameters.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is needed to execute the simulations of
# Malek, Chiappa. "Asymptotically Best Causal Effect
# Identification with Multi-Armed Bandits." NeurIPS, 2021.
# https://proceedings.neurips.cc/paper/2021/...
# hash/b8102d1fa5df93e62cf26cd4400a0727-Abstract.html
"""Implements a Structural Causal Model (SCM).
Also includes many common distributions and conditional
probability distributions (CPDs) as building blocks.
"""
from typing import List
from causal_effect_bandits import data
import numpy as np
class SCM(data.DataGenerator):
"""Implements a SCM model that can generate ExpData.
An SCM is specified by a DAG and CPD for each node.
The pgmpy class will encode this model and be responsible for sampling
from it.
Attributes:
var_names: List[str] List of all variable names in the model, presented in
lexicographic order
parents: Mapping[str:List[str]] A dictionary with keys in var_names.
parents[var_name] is a list of the keys corresponding to the parents of
node var_name.
cov_variables: List[str] List of all variable names that can be used as
covariates
treatment_variable: str name of variable that corresponds to the treatment
response_variable: str name of variable that corresponds to the treatment
cpds: a Mapping of functions A dict with var_name as keys; each accepts
(n_samples, parent_samples) as arguments and returns a np.array of samples
from the variable.
"""
def __init__(
self,
name,
var_names: List[str],
parents,
cpds,
cov_variables: List[str],
treatment_variable: str,
response_variable: str,
):
super().__init__(name)
self._n_nodes = len(var_names)
self.var_names = np.array(var_names)
self.parents = parents
if not set(var_names).issubset(parents):
raise ValueError("Some variables do not have keys in parents")
if not set(var_names).issubset(cpds):
raise ValueError("some variables do not have keys in cpbs")
if treatment_variable not in var_names:
raise ValueError("The treatment variable must be a variable")
if response_variable not in var_names:
raise ValueError("The response variable must be a variable")
if not set(cov_variables).issubset(var_names):
raise ValueError("The covariate variable must be variables")
self.treatment_variable = treatment_variable
self.response_variable = response_variable
self.cov_variables = cov_variables
self.cpds = cpds
def generate(self, n_samples: int):
samples = {}
for var_name in self.var_names:
# Build parents sample
if not set(self.parents[var_name]).issubset(samples):
raise ValueError("var_names are not in lexicographic order")
parent_samples = [samples[p] for p in self.parents[var_name]]
samples[var_name] = self.cpds[var_name](n_samples, parent_samples)
# ExpData was only designed to have a single covariate; hence, we
# concatenate the samples corresponding to each covariate.
# Perhaps in a future version we can do something better.
data_to_concat = [samples[v] for v in self.cov_variables]
x_data = np.column_stack(data_to_concat)
return data.ExpData(
x_data,
samples[self.treatment_variable],
samples[self.response_variable],
)
def compute_treatment_effect(self, n_samples: int):
"""Estmimates the treatment effect by changing the graph.
Args:
n_samples: the number of samples to use
Returns:
An estimate of the ATE of the SCM.
"""
old_t_cpd = self.cpds[self.treatment_variable]
treatment_cpd = lambda n, parents: self.deterministic_cpd(n, 1)
self.cpds[self.treatment_variable] = treatment_cpd
treatment_sample = self.generate(n_samples)
control_cpd = lambda n, parents: self.deterministic_cpd(n, 0)
self.cpds[self.treatment_variable] = control_cpd
control_sample = self.generate(n_samples)
self.cpds[self.treatment_variable] = old_t_cpd
return np.mean(treatment_sample.rsp) - np.mean(control_sample.rsp)
def deterministic_cpd(n_samples, value):
return np.full(n_samples, value)
def categorical_conditioning(parents, conditioning_on_idx, distributions):
"""A way to implement conditioning by a categorical random variable.
When parents[treatment_idx] = i, returns distributions[i](parents, betas).
Args:
parents: value of parents
conditioning_on_idx: the index used to select the distribution
distributions: a list of distributions
Returns:
Samples where the each sample is selected from the distribution
corresponding to the respective conditioning_on_idx and evaluated
on the parents.
"""
if len(parents) <= conditioning_on_idx:
raise ValueError(
"Treatment_idx is greater than the number of distributions.")
# Remove treatment_idx from parents
distribution_indices = parents[conditioning_on_idx]
del parents[conditioning_on_idx]
samples = []
for i, d in enumerate(distribution_indices):
# Build the parents for this unit
parent_sample = [p[i] for p in parents]
samples.append(distributions[int(d)](1, parent_sample)[0])
return np.array(samples)
def normal_cpd(parents, betas, mean, cov):
"""Simulates a linear gaussian based on the parents, betas.
Args:
parents: n_parent long list of np.array of shape (n_units, dim_i)
betas: n_parent long list of np.arrays of shape (dim_i)
mean: Float the mean of the random variable
cov: the covariance matrix of the random variable
Returns:
n samples with
X_j = sum_i parents[i,j].dot(gammas[i]) + epsilon
with epsilon ~ normal(mean, cov)
"""
n_units = len(parents[0])
total_mean = parents[0].dot(betas[0])
for i in range(1, len(parents)):
total_mean += parents[i].dot(betas[i])
if total_mean.ndim == 1: # Y is univariate
return total_mean + np.random.normal(mean, np.sqrt(cov), size=n_units)
else:
return total_mean + np.random.multivariate_normal(mean, cov, size=n_units)
def add_gaussian_noise(x, mean, cov):
"""Adds gaussian noise to x with the correct shape.
Args:
x: np.array with shape (n_units,) or (n_units, x_dim)
mean: float or np.array with shape (1,) or (x_dim,1)
cov: float or np.array with shape (x_dim, x_dim)
Returns:
np.array with shape:
(n_units,) if mean is a float or shape (1,)
(n_units, x_dim), otherwise.
"""
n_units = len(x)
if np.ndim(x) < 2 or (np.ndim(x) == 2 and
x.shape[1] == 1): # variable is one-dimensional
if np.ndim(mean) > 1:
raise ValueError("The dimensions of x and mean are not compatible")
return x.flatten() + np.random.normal(mean, np.sqrt(cov), size=n_units)
else: # variable is multi-dimensional
if x.shape[1] != len(mean):
raise ValueError("The dimensions of x and mean are not compatible")
return x + np.random.multivariate_normal(mean, cov, size=n_units)
def transformed_normal_cpd(parents, betas, fns, mean, cov):
"""Simulates a transformed linear Gaussian.
Specifically, it simulates
Y = sum_i f_i(parents[i,j].dot(gammas[i])) + epsilon
with epsilon ~ normal(mean, cov)
Args:
parents: n_parent long list of np.array of shape (n_units, dim_i)
betas: n_parent long list of np.arrays of shape (dim_i)
fns: n_parent long list of vector-to-vector mappings
mean: Float the mean of the random variable
cov: the variance of the random variable
Returns:
Samples of Y.
"""
n_units = len(parents[0])
if len(parents) != len(fns):
raise ValueError("parents and fns should be the same length.")
total_mean = fns[0](parents[0].dot(betas[0]))
for i in range(1, len(parents)):
total_mean += fns[i](parents[i].dot(betas[i]))
# infer whether the output should be 1 or 2 dim from mean
if np.ndim(mean) == 0: # we should return (n_units,) array
return total_mean.flatten() + np.random.normal(
mean, np.sqrt(cov), size=n_units)
else:
return total_mean + np.random.multivariate_normal(mean, cov, size=n_units)
def structural_equation_with_noise(parents, f, mean, cov):
"""Simulates a random variable as a noisy function of the parents.
Args:
parents: n_parent long list of np.array of shape (n_units, dim_i)
f: mapping we assume fn takes vectors (n_units, n_features) to (n_units,
out_dim)
mean: nd.array of shape (out_dim,) or scalar the mean of the random variable
cov: nd.array of shape (out_dim, out_dim) or scalar the (co)variance of the
random variable
Returns:
Samples of
X_j = fn(parents[i,j]) + epsilon
with epsilon ~ normal(mean, cov)
The returned shape is inferred from the shape of mean; if mean has shape
(d, ) or (d, 1) then a vector of shape (n_units, d) is returned.
If mean is a scalar, then a vector or shape (n_units,) is returned.
"""
n_units = len(parents[0])
f_out = f(np.column_stack(parents))
if np.ndim(mean) < 1: # the output is a scalar
return f_out.flatten() + np.random.normal(mean, np.sqrt(cov), size=n_units)
else:
return f_out + np.random.multivariate_normal(mean, cov, size=n_units)
def logistic_bernoulli_cpd(parents, f):
"""Returns n Bernoulli samples with mean softmax of parents.
X_1,...,X_n ~ Bernoulli(sigmoid(f(parents))
Args:
parents: List(ndarray) n_parent long list of np.array of shape (n_units,
dim_i)
f: mapping This mapping takes a list of (n_unit, dim_i) arrays and returns a
(n_unit,) list. It is up to the user to make sure that the dimensions are
compatible.
"""
params = f(parents)
means = 1 / (1 + np.exp(-params))
return np.random.binomial(1, means)
def logistic_linear_bernoulli_cpd(parents, gammas):
"""Returns n Bernoulli samples with mean softmax of linear of parents.
X_j ~ Bernoulli(sigmoid(sum_i parents[i,j].dot(gammas[i]))
Args:
parents: n_parent long list of np.array of shape (n_units, dim_i)
gammas: n_parent long list of np.arrays of shape (dim_i)
"""
def f(parents):
params = 0
for i in range(len(parents)):
params += parents[i].dot(gammas[i])
return params
return logistic_bernoulli_cpd(parents, f)
| abcei_mab-main | causal_effect_bandits/scm.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is needed to execute the simulations of
# Malek, Chiappa. "Asymptotically Best Causal Effect
# Identification with Multi-Armed Bandits." NeurIPS, 2021.
# https://proceedings.neurips.cc/paper/2021/...
# hash/b8102d1fa5df93e62cf26cd4400a0727-Abstract.html
"""Utilities for generating random nonlinear functions."""
import itertools
from typing import Callable, Optional
from absl import logging
import numpy as np
from sklearn import gaussian_process
def softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
def generate_random_nd_function(
dim: int,
out_dim: int = 1,
*,
kernel: Optional[gaussian_process.kernels.Kernel] = None,
min_x: float = -1,
max_x: float = 1,
n_points: int = 10,
seed: Optional[int] = None,
alpha: float = 100,
n_point_warning: int = 1500) -> Callable[[np.ndarray], np.ndarray]:
"""A multidimensional version of generate_random_function.
Each component is R -> R^out_dim, and there and dim
components. Each component is sampled from a Gaussian process prior
over n_points. The prior is then fit to these points, which specifies
a sample of the function.
Args:
dim: the input dimension
out_dim: the output dimension
kernel: which kernel to use for the GP
min_x: the minimum value of x
max_x: the maximum value of x
n_points: the number of points used to fit the GP
seed: random seed
alpha: kernel hyperparameter
n_point_warning: the number of points that can be used in the GP without
raining a warning.
Returns:
mapping from (n, dim) to (n, out_dim)
"""
if kernel is None:
kernel = gaussian_process.kernels.ExpSineSquared(
length_scale=1,
periodicity=5.0,
length_scale_bounds=(0.1, 10.0),
periodicity_bounds=(1.0, 10.0))
if n_points**dim > n_point_warning:
logging.warning("n_points**dim=%i>%i; GaussianProcessRegressor may crash.",
n_points**dim, n_point_warning)
# Specify Gaussian Process
x1d = np.linspace(0, 1, n_points)
# create a cartesian product
x = np.array(list(itertools.product(*[x1d] * dim))) * (max_x - min_x) + min_x
# Choose a random starting state
fns = []
for _ in range(out_dim):
fns.append(gaussian_process.GaussianProcessRegressor(kernel, alpha=alpha))
if seed is None:
seed = np.random.randint(10000)
# Sample from a prior
y = fns[-1].sample_y(x, 1, random_state=seed)
# Fit the GP to this prior
fns[-1].fit(x, y)
# Now we need to map (n_units, n_dim)
def out_f(x):
output = []
for d in range(out_dim):
output.append(fns[d].predict(x.reshape(x.shape[0], -1)))
# We want to return (n_units, out_dim)
return np.column_stack(output)
return out_f
| abcei_mab-main | causal_effect_bandits/nonlinear_utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This code is needed to execute the simulations of
# Malek, Chiappa. "Asymptotically Best Causal Effect
# Identification with Multi-Armed Bandits." NeurIPS, 2021.
# https://proceedings.neurips.cc/paper/2021/...
# hash/b8102d1fa5df93e62cf26cd4400a0727-Abstract.html
"""Class definitions for data holding objects."""
import collections
import dataclasses
from typing import Callable, List
import numpy as np
@dataclasses.dataclass
class ExpData:
"""A class to hold observational data.
In experiments, we will routinely have three types of data:
covariates cov: n_units * n_features
exposure exp: n_features * 1
response rsp: n_features * 1
This is a wrapper class collecting all three. Data are assumed
to be np.ndarrays.
Methods:
append: appends another ExpData object to this one
k_fold(k): returns k roughly equally large ExpData objects
"""
cov: np.ndarray
exp: np.ndarray
rsp: np.ndarray
def __init__(self, cov: np.ndarray, exp: np.ndarray, y: np.ndarray):
if cov is None:
# Make an empty ExpData
self.n_units = 0
self.n_features = 0
else:
self.n_units = cov.shape[0]
if len(cov.shape) == 1: # one dimensional
self.n_features = 1
else:
self.n_features = cov.shape[1]
self.cov = cov
self.rsp = y
self.exp = exp
def __len__(self):
return self.n_units
def append(self, new_data: "ExpData"):
"""Appends the new data."""
if new_data.n_features != self.n_features:
raise ValueError("The new data has incompatible shape")
self.cov = np.r_[self.cov, new_data.cov]
if self.exp is not None:
if new_data.exp is None:
raise ValueError("new_data.exp is missing but expected")
self.exp = np.r_[self.exp, new_data.exp]
if self.rsp is not None:
if new_data.rsp is None:
raise ValueError("new_data.rsp is missing but expected")
self.rsp = np.r_[self.rsp, new_data.rsp]
self.n_units = self.cov.shape[0]
def k_fold(self, k: int):
"""Breaks the data into k folds according to some random permutation."""
if k < 1:
raise ValueError(
f"Number of folds must be a positive integer, but {k} was provided.")
folds = np.random.permutation(np.mod(np.arange(self.n_units), k))
data_folds = []
for i in range(k):
cov_fold = self.cov[folds == i]
if self.exp is not None:
exp_fold = self.exp[folds == i]
else:
exp_fold = None
if self.rsp is not None:
rsp_fold = self.rsp[folds == i]
else:
rsp_fold = None
data_folds.append(ExpData(cov_fold, exp_fold, rsp_fold))
return data_folds
def subsample(self, p: float):
"""Randomly partitions the data into two groups.
Args:
p: the proportion to allocate to the first folt.
Returns:
Two ExpData objects with approximate size n_units*p and n_units*(1-p).
"""
if not 0 <= p <= 1:
raise ValueError(f"p={p} provided, but should be in the unit interval.")
mask = np.random.choice(a=[True, False], size=(self.n_units), p=[p, 1 - p])
if self.exp is not None:
exp_in = self.exp[mask]
exp_out = self.exp[~mask]
else:
exp_in = None
exp_out = None
if self.rsp is not None:
rsp_in = self.rsp[mask]
rsp_out = self.rsp[~mask]
else:
rsp_in = None
rsp_out = None
in_data = ExpData(self.cov[mask], exp_in, rsp_in)
ouexp_data = ExpData(self.cov[~mask], exp_out, rsp_out)
return (in_data, ouexp_data)
def __str__(self):
string = f"An ExpData class with {int(self.n_units)} units. The data are:"
string += "\n covariates:" + str(self.cov)
string += "\n exposures:" + str(self.exp)
string += "\n response:" + str(self.rsp)
return string
def __getitem__(self, key):
new_cov = self.cov[key]
new_exp = None if self.exp is None else self.exp[key]
new_rsp = None if self.rsp is None else self.rsp[key]
return ExpData(new_cov, new_exp, new_rsp)
# Below are some common data_transformer functions.
def get_identity_fn() -> Callable[[ExpData], ExpData]:
"""This function returns the identity function for use as a data_transformer.
Returns:
The identity function for use as a data_transformer.
"""
def fn(x):
return x
return fn
def get_remove_coordinates_fn(
idx_to_remove: int,) -> Callable[[ExpData], ExpData]:
"""Returns a function that maps an ExpData to another ExpData.
Args:
idx_to_remove: an index to remove
Returns:
A function that maps ExpData to ExpData with the corresponding index of cov
removed.
Suitable for use as a data_transformer.
"""
def fn(data):
new_cov = np.delete(data.cov, idx_to_remove, axis=1)
if new_cov.ndim == 1: # new covariate is one dimensional
new_cov = new_cov.reshape(-1, 1)
return ExpData(new_cov, data.exp, data.rsp)
return fn
def get_coordinate_mask_fn(
idx_to_include: List[int],) -> Callable[[ExpData], ExpData]:
"""Returns a function that maps an ExpData to another ExpData.
Args:
idx_to_include: indices to include
Returns:
A function that maps ExpData to ExpData with only indices
in idx_to_include remaining.
Suitable for use as a data_transformer.
"""
def fn(d):
return ExpData(d.cov[..., idx_to_include], d.exp, d.rsp)
return fn
class DataGenerator:
"""Generates data from some joint probability model on X, T, and Y.
The data are
covariates X in R^d
binary treatment T
response Y in R
Specific probability models, like SCMs or linear models, will be implemented
as subclasses.
Methods:
generate(n_units): returns an ExpData with n_units
"""
def __init__(self, name: str):
self._name = name
def generate(self, n_samples: int):
return ExpData(
np.zeros(n_samples, 1),
np.zeros(n_samples),
np.zeros(n_samples),
)
class LinearDataGenerator(DataGenerator):
"""Generates data according to simple linear model.
Specifically, the linear model is specified by
Y = X beta + T*tau + epsilon
T = Bernoulli(sigmoid(X gamma))
where epsilon has a distribution given by noise_model and
T is binary but correlated with the features X.
Attributes:
n_features: int the feature dimension of X. Features have a standard normal
distribution
beta: np.array of shape (n_features,)
tau: float
gamma: np.array of shape (n_features,)
noise_model: a function accepting an integer n where noise_model(n) returns
a np.array of length n.
"""
def __init__(
self,
name: str,
n_features: int,
beta: np.ndarray,
tau: float,
noise_model: Callable[[int], np.ndarray] = np.random.randn,
gamma=None,
):
super().__init__(name)
self._beta = beta
self._tau = tau
self._noise_model = noise_model
if gamma is None: # Default is no dependence between X and T.
self._gamma = np.zeros(n_features)
elif gamma.shape != (n_features,):
raise ValueError(
f"shape of gamma is {gamma.shape}, but ({n_features},) expected.")
else:
self._gamma = gamma
self._n_features = n_features
def generate(self, n_samples: int):
x = np.random.randn(n_samples, self._n_features)
t_means = 1 / (1 + np.exp(-x.dot(self._gamma)))
t = np.random.binomial(1, t_means)
noise = self._noise_model(n_samples)
if len(noise) != n_samples:
raise ValueError(
f"noise_model's output is {len(noise)} but should be {n_samples}.")
y = x.dot(self._beta) + self._tau * t + self._noise_model(n_samples)
return ExpData(x, t, y)
@property
def tau(self):
return self._tau
@property
def beta(self):
return self._beta
@property
def gamma(self):
return self._gamma
class TabularCPD:
"""Estimates a tabular CPD.
Given finite support random variables Z and X, this class fits an estimator
of P(Z|X).
Because np.arrays are not hashable, all keys will use .tobytes() instead.
Therefore, we need to keep dicts of .tobytes() to np.array values.
Attributes:
min_ratio_to_uniform: The model is fit so that the minimum ration between
probabilities and a uniform distribution over the estimated support is at
least this value. Larger values allows for smaller predicted probabilities
an the expense of larger variance.
n_units: and integer number of units
table: a table, implemented as a dict, of counts
x_marginal: marginal probabilities by value of X
x_values: all the unique values of X seen
z_values: all the unique values of Z seen.
min_prob: the minimum probability in the table.
"""
def __init__(self, min_ratio_to_uniform=20):
default_fn = lambda: collections.defaultdict(lambda: 0.0)
self.n_units = 0
self.table = collections.defaultdict(default_fn)
self.x_marginal = collections.defaultdict(lambda: 0.0)
self.x_values = {}
self.z_values = {}
self.min_ratio_to_uniform = min_ratio_to_uniform
def fit(self, x, z):
"""Fits the tabular CPD to data.
Args:
x: an np.ndarray of x observations of size (n_units,). Should have finite
support
z: an np.ndarray of z observations of size (n_units, n_features). Should
have finite support.
"""
x = np.asarray(x)
z = np.asarray(z)
if len(x) != len(z):
raise ValueError("z and x must be the same length")
self.n_units = len(x)
default_fn = lambda: collections.defaultdict(lambda: 0.0)
self.table = collections.defaultdict(default_fn)
self.x_marginal = collections.defaultdict(lambda: 0.0)
for xi, zi in zip(x, z):
self.table[xi.tobytes()][
zi.tobytes()] = self.table[xi.tobytes()][zi.tobytes()] + 1
self.x_values[xi.tobytes()] = xi
self.z_values[zi.tobytes()] = zi
# adjust min_ratio_to_uniform based on the perceived support size
# Note: we use a naive missing mass estimatior, but
# we could use a better one, e.g. Good-Turning.
support_size_estimate = len(self.z_values)
self.min_prob = 1 / support_size_estimate / self.min_ratio_to_uniform
if self.min_prob > 1:
self.min_prob = .2
# Next, we normalize the probabilities for every x
for x_key in self.x_values:
x_table = self.table[x_key]
num_samples = sum(x_table.values())
self.x_marginal[x_key] = num_samples / self.n_units
non_violating_z = []
total_violations = 0
for z_key in self.z_values:
x_table[z_key] = x_table[z_key] / num_samples
if x_table[z_key] < self.min_prob:
total_violations += self.min_prob - x_table[z_key]
x_table[z_key] = self.min_prob
elif x_table[z_key] > 1 - self.min_prob:
total_violations += x_table[z_key] - (1 - self.min_prob)
x_table[z_key] = 1 - self.min_prob
else:
non_violating_z.append(z_key)
# Now, we adjust non_violating_z to make P(Z|x = x_key) sum to 1
for z_key in non_violating_z:
x_table[z_key] = (
x_table[z_key] - total_violations / len(non_violating_z))
def x_support(self):
return list(self.x_values.values())
def z_support(self):
return list(self.z_values.values())
def support_size(self):
return len(self.z_values)
def predict(self, x, z):
"""Returns P(z|x) for all elements in zip(z, x).
Args:
x: treatments to evaluate
z: covariates to evaluate.
Returns:
A list of conditional probabilities of z|x.
"""
probs = []
x = np.asarray(x)
z = np.asarray(z)
for zi, xi in zip(z, x):
probs.append(self.table[xi.tobytes()][zi.tobytes()])
return np.array(probs)
def __str__(self):
s = ""
for x in self.table.keys():
s += f"Row for x = {self.x_values[x]}\n"
for z in self.table[x].keys():
s += f" z={self.z_values[z]} : {self.table[x][z]}\n"
s += "\n"
return s
| abcei_mab-main | causal_effect_bandits/data.py |
"""Memory module for Kanerva Machines.
Functions of the module always take inputs with shape:
[seq_length, batch_size, ...]
Examples:
# Initialisation
memory = KanervaMemory(code_size=100, memory_size=32)
prior_memory = memory.get_prior_state(batch_size)
# Update memory posterior
posterior_memory, _, _, _ = memory.update_state(z_episode, prior_memory)
# Read from the memory using cues z_q
read_z, dkl_w = memory.read_with_z(z_q, posterior_memory)
# Compute the KL-divergence between posterior and prior memory
dkl_M = memory.get_dkl_total(posterior_memory)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import sonnet as snt
import tensorflow as tf
import tensorflow_probability as tfp
MemoryState = collections.namedtuple(
'MemoryState',
# Mean of memory slots, [batch_size, memory_size, word_size]
# Covariance of memory slots, [batch_size, memory_size, memory_size]
('M_mean', 'M_cov'))
EPSILON = 1e-6
# disable lint warnings for cleaner algebraic expressions
# pylint: disable=invalid-name
class KanervaMemory(snt.AbstractModule):
"""A memory-based generative model."""
def __init__(self,
code_size,
memory_size,
num_opt_iters=1,
w_prior_stddev=1.0,
obs_noise_stddev=1.0,
sample_w=False,
sample_M=False,
name='KanervaMemory'):
"""Initialise the memory module.
Args:
code_size: Integer specifying the size of each encoded input.
memory_size: Integer specifying the total number of rows in the memory.
num_opt_iters: Integer specifying the number of optimisation iterations.
w_prior_stddev: Float specifying the standard deviation of w's prior.
obs_noise_stddev: Float specifying the standard deviation of the
observational noise.
sample_w: Boolean specifying whether to sample w or simply take its mean.
sample_M: Boolean specifying whether to sample M or simply take its mean.
name: String specfying the name of this module.
"""
super(KanervaMemory, self).__init__(name=name)
self._memory_size = memory_size
self._code_size = code_size
self._num_opt_iters = num_opt_iters
self._sample_w = sample_w
self._sample_M = sample_M
self._w_prior_stddev = tf.constant(w_prior_stddev)
with self._enter_variable_scope():
log_w_stddev = snt.TrainableVariable(
[], name='w_stddev',
initializers={'w': tf.constant_initializer(np.log(0.3))})()
if obs_noise_stddev > 0.0:
self._obs_noise_stddev = tf.constant(obs_noise_stddev)
else:
log_obs_stddev = snt.TrainableVariable(
[], name='obs_stdddev',
initializers={'w': tf.constant_initializer(np.log(1.0))})()
self._obs_noise_stddev = tf.exp(log_obs_stddev)
self._w_stddev = tf.exp(log_w_stddev)
self._w_prior_dist = tfp.distributions.MultivariateNormalDiag(
loc=tf.zeros([self._memory_size]),
scale_identity_multiplier=self._w_prior_stddev)
def _build(self):
raise ValueError('`_build()` should not be called for this module since'
'it takes no inputs and all of its variables are'
'constructed in `__init__`')
def _get_w_dist(self, mu_w):
return tfp.distributions.MultivariateNormalDiag(
loc=mu_w, scale_identity_multiplier=self._w_stddev)
def sample_prior_w(self, seq_length, batch_size):
"""Sample w from its prior.
Args:
seq_length: length of sequence
batch_size: batch size of samples
Returns:
w: [batch_size, memory_size]
"""
return self._w_prior_dist.sample([seq_length, batch_size])
def read_with_z(self, z, memory_state):
"""Query from memory (specified by memory_state) using embedding z.
Args:
z: Tensor with dimensions [episode_length, batch_size, code_size]
containing an embedded input.
memory_state: Instance of `MemoryState`.
Returns:
A tuple of tensors containing the mean of read embedding and the
KL-divergence between the w used in reading and its prior.
"""
M = self.sample_M(memory_state)
w_mean = self._solve_w_mean(z, M)
w_samples = self.sample_w(w_mean)
dkl_w = self.get_dkl_w(w_mean)
z_mean = self.get_w_to_z_mean(w_samples, M)
return z_mean, dkl_w
def wrap_z_dist(self, z_mean):
"""Wrap the mean of z as an observation (Gaussian) distribution."""
return tfp.distributions.MultivariateNormalDiag(
loc=z_mean, scale_identity_multiplier=self._obs_noise_stddev)
def sample_w(self, w_mean):
"""Sample w from its posterior distribution."""
if self._sample_w:
return self._get_w_dist(w_mean).sample()
else:
return w_mean
def sample_M(self, memory_state):
"""Sample the memory from its distribution specified by memory_state."""
if self._sample_M:
noise_dist = tfp.distributions.MultivariateNormalFullCovariance(
covariance_matrix=memory_state.M_cov)
# C, B, M
noise = tf.transpose(noise_dist.sample(self._code_size),
[1, 2, 0])
return memory_state.M_mean + noise
else:
return memory_state.M_mean
def get_w_to_z_mean(self, w_p, R):
"""Return the mean of z by reading from memory using weights w_p."""
return tf.einsum('sbm,bmc->sbc', w_p, R) # Rw
def _read_cov(self, w_samples, memory_state):
episode_size, batch_size = w_samples.get_shape().as_list()[:2]
_, U = memory_state # cov: [B, M, M]
wU = tf.einsum('sbm,bmn->sbn', w_samples, U)
wUw = tf.einsum('sbm,sbm->sb', wU, w_samples)
wUw.get_shape().assert_is_compatible_with([episode_size, batch_size])
return wU, wUw
def get_dkl_total(self, memory_state):
"""Compute the KL-divergence between a memory distribution and its prior."""
R, U = memory_state
B, K, _ = R.get_shape().as_list()
U.get_shape().assert_is_compatible_with([B, K, K])
R_prior, U_prior = self.get_prior_state(B)
p_diag = tf.matrix_diag_part(U_prior)
q_diag = tf.matrix_diag_part(U) # B, K
t1 = self._code_size * tf.reduce_sum(q_diag / p_diag, -1)
t2 = tf.reduce_sum((R - R_prior)**2 / tf.expand_dims(
p_diag, -1), [-2, -1])
t3 = -self._code_size * self._memory_size
t4 = self._code_size * tf.reduce_sum(tf.log(p_diag) - tf.log(q_diag), -1)
return t1 + t2 + t3 + t4
def _get_dkl_update(self, memory_state, w_samples, new_z_mean, new_z_var):
"""Compute memory_kl after updating prior_state."""
B, K, C = memory_state.M_mean.get_shape().as_list()
S = w_samples.get_shape().as_list()[0]
# check shapes
w_samples.get_shape().assert_is_compatible_with([S, B, K])
new_z_mean.get_shape().assert_is_compatible_with([S, B, C])
delta = new_z_mean - self.get_w_to_z_mean(w_samples, memory_state.M_mean)
_, wUw = self._read_cov(w_samples, memory_state)
var_z = wUw + new_z_var + self._obs_noise_stddev**2
beta = wUw / var_z
dkl_M = -0.5 * (self._code_size * beta
- tf.reduce_sum(tf.expand_dims(beta / var_z, -1)
* delta**2, -1)
+ self._code_size * tf.log(1 - beta))
dkl_M.get_shape().assert_is_compatible_with([S, B])
return dkl_M
@snt.reuse_variables
def _get_prior_params(self):
log_var = snt.TrainableVariable(
[], name='prior_var_scale',
initializers={'w': tf.constant_initializer(
np.log(1.0))})()
self._prior_var = tf.ones([self._memory_size]) * tf.exp(log_var) + EPSILON
prior_cov = tf.matrix_diag(self._prior_var)
prior_mean = snt.TrainableVariable(
[self._memory_size, self._code_size],
name='prior_mean',
initializers={'w': tf.truncated_normal_initializer(
mean=0.0, stddev=1.0)})()
return prior_mean, prior_cov
@property
def prior_avg_var(self):
"""return the average of prior memory variance."""
return tf.reduce_mean(self._prior_var)
def _solve_w_mean(self, new_z_mean, M):
"""Minimise the conditional KL-divergence between z wrt w."""
w_matrix = tf.matmul(M, M, transpose_b=True)
w_rhs = tf.einsum('bmc,sbc->bms', M, new_z_mean)
w_mean = tf.matrix_solve_ls(
matrix=w_matrix, rhs=w_rhs,
l2_regularizer=self._obs_noise_stddev**2 / self._w_prior_stddev**2)
w_mean = tf.einsum('bms->sbm', w_mean)
return w_mean
def get_prior_state(self, batch_size):
"""Return the prior distribution of memory as a MemoryState."""
prior_mean, prior_cov = self._get_prior_params()
batch_prior_mean = tf.stack([prior_mean] * batch_size)
batch_prior_cov = tf.stack([prior_cov] * batch_size)
return MemoryState(M_mean=batch_prior_mean,
M_cov=batch_prior_cov)
def update_state(self, z, memory_state):
"""Update the memory state using Bayes' rule.
Args:
z: A tensor with dimensions [episode_length, batch_size, code_size]
containing a sequence of embeddings to write into memory.
memory_state: A `MemoryState` namedtuple containing the memory state to
be written to.
Returns:
A tuple containing the following elements:
final_memory: A `MemoryState` namedtuple containing the new memory state
after the update.
w_mean_episode: The mean of w for the written episode.
dkl_w_episode: The KL-divergence of w for the written episode.
dkl_M_episode: The KL-divergence between the memory states before and
after the update.
"""
episode_size, batch_size = z.get_shape().as_list()[:2]
w_array = tf.TensorArray(dtype=tf.float32, size=episode_size,
element_shape=[1, batch_size, self._memory_size])
dkl_w_array = tf.TensorArray(dtype=tf.float32, size=episode_size,
element_shape=[1, batch_size])
dkl_M_array = tf.TensorArray(dtype=tf.float32, size=episode_size,
element_shape=[1, batch_size])
init_var = (0, memory_state, w_array, dkl_w_array, dkl_M_array)
cond = lambda i, m, d_2, d_3, d_4: i < episode_size
def loop_body(i, old_memory, w_array, dkl_w_array, dkl_M_array):
"""Update memory step-by-step."""
z_step = tf.expand_dims(z[i], 0)
new_memory = old_memory
for _ in xrange(self._num_opt_iters):
w_step_mean = self._solve_w_mean(z_step, self.sample_M(new_memory))
w_step_sample = self.sample_w(w_step_mean)
new_memory = self._update_memory(old_memory,
w_step_mean,
z_step, 0)
dkl_w_step = self.get_dkl_w(w_step_mean)
dkl_M_step = self._get_dkl_update(old_memory,
w_step_sample,
z_step, 0)
return (i+1,
new_memory,
w_array.write(i, w_step_sample),
dkl_w_array.write(i, dkl_w_step),
dkl_M_array.write(i, dkl_M_step))
_, final_memory, w_mean, dkl_w, dkl_M = tf.while_loop(
cond, loop_body, init_var)
w_mean_episode = w_mean.concat()
dkl_w_episode = dkl_w.concat()
dkl_M_episode = dkl_M.concat()
dkl_M_episode.get_shape().assert_is_compatible_with(
[episode_size, batch_size])
return final_memory, w_mean_episode, dkl_w_episode, dkl_M_episode
def _update_memory(self, old_memory, w_samples, new_z_mean, new_z_var):
"""Setting new_z_var=0 for sample based update."""
old_mean, old_cov = old_memory
wR = self.get_w_to_z_mean(w_samples, old_memory.M_mean)
wU, wUw = self._read_cov(w_samples, old_memory)
sigma_z = wUw + new_z_var + self._obs_noise_stddev**2 # [S, B]
delta = new_z_mean - wR # [S, B, C]
c_z = wU / tf.expand_dims(sigma_z, -1) # [S, B, M]
posterior_mean = old_mean + tf.einsum('sbm,sbc->bmc', c_z, delta)
posterior_cov = old_cov - tf.einsum('sbm,sbn->bmn', c_z, wU)
# Clip diagonal elements for numerical stability
posterior_cov = tf.matrix_set_diag(
posterior_cov,
tf.clip_by_value(tf.matrix_diag_part(posterior_cov), EPSILON, 1e10))
new_memory = MemoryState(M_mean=posterior_mean, M_cov=posterior_cov)
return new_memory
def get_dkl_w(self, w_mean):
"""Return the KL-divergence between posterior and prior weights w."""
posterior_dist = self._get_w_dist(w_mean)
dkl_w = posterior_dist.kl_divergence(self._w_prior_dist)
dkl_w.get_shape().assert_is_compatible_with(
w_mean.get_shape().as_list()[:-1])
return dkl_w
| dynamic-kanerva-machines-master | memory.py |
# Copyright 2018-2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Setup for pip package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['six', 'absl-py', 'numpy', 'matplotlib',
'tensorflow>=1.13.0']
EXTRA_PACKAGES = {
'tensorflow with gpu': ['tensorflow-gpu>=1.8.0'],
}
def spin_test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('spectral_nets/tests',
pattern='*_test.py')
return test_suite
setup(
name='spectral_nets',
version='0.1',
description='A library to train spectral inference networks.',
url='https://github.com/deepmind/spectral_inference_networks',
author='DeepMind',
author_email='[email protected]',
# Contained modules and scripts.
packages=find_packages(),
package_data={
'spectral_inference_networks': ['examples/atari_episodes/*.npz']
},
install_requires=REQUIRED_PACKAGES,
extras_require=EXTRA_PACKAGES,
platforms=['any'],
license='Apache 2.0',
test_suite='setup.spin_test_suite',
)
| spectral_inference_networks-master | setup.py |
# Copyright 2018-2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Imports for Spectral Inference Networks module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from spectral_inference_networks.src.spin import * # pylint: disable=wildcard-import
__version__ = '0.1.0'
| spectral_inference_networks-master | spectral_inference_networks/__init__.py |
# Copyright 2018-2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for spectral_inference_networks.spin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import spectral_inference_networks as spin
import tensorflow as tf
class SpinTest(tf.test.TestCase):
def _small_matrix(self, size=5, neig=3, batch_size=1000, niter=2000,
deterministic=False, use_pfor=False, per_example=False):
"""Test SpIN on small matrix."""
tf.set_random_seed(0)
np.random.seed(0)
global_step = tf.Variable(0.0, trainable=False)
update_global_step = tf.assign(global_step, global_step+1)
mat = np.random.randn(size, size).astype(np.float32)
xx = np.dot(mat.transpose(), mat) # Symmetrize the matrix
params = [tf.Variable(tf.random_normal([size, neig]))]
if deterministic:
decay = 0.0
# Data is all combinations of rows and columns of the matrix.
data = tf.concat((tf.tile(tf.eye(size), (size, 1)),
tf.reshape(tf.tile(tf.eye(size), (1, size)),
(size**2, size))), axis=0)
optim = tf.train.GradientDescentOptimizer(1.0)
else:
decay = 0.9
data = tf.one_hot(tf.cast(tf.floor(
tf.random_uniform([batch_size]) * size), tf.int32), size)
optim = tf.train.GradientDescentOptimizer(1.0 / global_step)
data *= np.sqrt(size) # Make rows unit norm
def _network(x):
return tf.matmul(x, params[0])
def _kernel(x1, x2):
return tf.reduce_sum(x1 * tf.matmul(x2, xx), axis=1, keepdims=True)
operator = spin.KernelOperator(_kernel)
spec_net = spin.SpectralNetwork(
operator, _network, data, params, decay=decay, use_pfor=use_pfor,
per_example=per_example)
step = optim.apply_gradients(zip(spec_net.gradients, params))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
sess.run(update_global_step)
for _ in range(niter):
sess.run(step)
if deterministic:
eigvec, eigval = sess.run([spec_net.features, spec_net.eigenvalues])
else:
eigval = np.zeros(neig)
n = 1000
for _ in range(n):
eigval += sess.run(spec_net.eigenvalues)
eigval /= n
eigvec = sess.run(params[0])
eigvec, _ = np.linalg.qr(eigvec)
eigvec = eigvec[:int(size)]
true_eigval, true_eigvec = np.linalg.eig(xx)
idx = np.argsort(true_eigval)
print(eigval)
print(np.sort(true_eigval)[:neig])
if deterministic:
atol = 1e-5
else:
atol = 1e-1 # Stochastic case is quite noisy
np.testing.assert_allclose(eigval, np.sort(true_eigval)[:neig], atol=atol)
# Compute dot product between true eigenvectors and learned ones.
cross_cov = np.dot(eigvec.transpose(), true_eigvec[:, idx[:neig]])
cross_cov -= np.diag(np.diag(cross_cov))
np.testing.assert_allclose(cross_cov, np.zeros((neig, neig)), atol=atol)
def test_small_matrix_stochastic_use_pfor_false_per_example_false(self):
self._small_matrix(deterministic=False, use_pfor=False, per_example=False)
def test_small_matrix_stochastic_use_pfor_true_per_example_false(self):
self._small_matrix(deterministic=False, use_pfor=True, per_example=False)
def test_small_matrix_stochastic_use_pfor_true_per_example_true(self):
self._small_matrix(deterministic=False, use_pfor=True, per_example=True)
def test_small_matrix_deterministic_use_pfor_false_per_example_false(self):
self._small_matrix(deterministic=True, use_pfor=False, per_example=False)
def test_small_matrix_deterministic_use_pfor_true_per_example_false(self):
self._small_matrix(deterministic=True, use_pfor=True, per_example=False)
def test_small_matrix_deterministic_use_pfor_true_per_example_true(self):
self._small_matrix(deterministic=True, use_pfor=True, per_example=True)
if __name__ == '__main__':
tf.test.main()
| spectral_inference_networks-master | spectral_inference_networks/tests/spin_test.py |
# Copyright 2018-2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Atari example for SpIN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from spectral_inference_networks.examples import atari
import tensorflow as tf
class AtariTest(tf.test.TestCase):
def test_atari(self):
atari.train(
iterations=10,
batch_size=4,
lr=1e-4,
neig=2,
shards=1,
game='montezuma_revenge')
def test_atari_with_per_example_and_pfor(self):
atari.train(
iterations=10,
batch_size=4,
lr=1e-4,
neig=2,
shards=1,
game='montezuma_revenge',
use_pfor=True,
per_example=True)
if __name__ == '__main__':
tf.test.main()
| spectral_inference_networks-master | spectral_inference_networks/tests/atari_test.py |
# Copyright 2018-2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the hydrogen example for SpIN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from spectral_inference_networks.examples import hydrogen
import tensorflow as tf
class HydrogenTest(tf.test.TestCase):
def test_hydrogen(self):
hydrogen.train(
iterations=50,
batch_size=8,
lr=1e-4,
apply_boundary=True,
neig=4)
def test_hydrogen_exact_lapl(self):
hydrogen.train(
iterations=50,
batch_size=8,
lr=1e-4,
apply_boundary=True,
neig=4,
laplacian_eps=0.0)
def test_hydrogen_with_pfor_and_per_example(self):
hydrogen.train(
iterations=50,
batch_size=8,
lr=1e-4,
apply_boundary=True,
neig=4,
use_pfor=True,
per_example=True)
def test_hydrogen_exact_lapl_with_pfor_and_per_example(self):
hydrogen.train(
iterations=50,
batch_size=8,
lr=1e-4,
apply_boundary=True,
neig=4,
laplacian_eps=0.0,
use_pfor=True,
per_example=True)
if __name__ == '__main__':
tf.test.main()
| spectral_inference_networks-master | spectral_inference_networks/tests/hydrogen_test.py |
# Copyright 2018-2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Demo of SpIN on Atari dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import matplotlib.pyplot as plt
import numpy as np
import spectral_inference_networks as spin
import tensorflow as tf
EXAMPLES_ROOT = os.path.dirname(__file__)
flags.DEFINE_integer(
'neig', 5, 'Number of Eigen values to compute. Must be greater than 1.')
flags.DEFINE_integer('niter', 100000, 'Number of iterations.')
flags.DEFINE_integer('batch_size', 128, 'Self-explanatory.')
flags.DEFINE_float('lr', 1e-3, 'Learning rate.')
flags.DEFINE_float('decay', 0.01, 'Decay rate of moving averages.')
flags.DEFINE_float('rmsprop_decay', 0.1, 'Decay param for RMSprop.')
flags.DEFINE_boolean('step_lr', False, 'Step down learning rate exponentially.')
flags.DEFINE_boolean('show_plots', True, 'Show pyplot plots.')
flags.DEFINE_boolean('use_pfor', True, 'Use parallel_for.')
flags.DEFINE_boolean(
'per_example', True,
'Use a different strategy for computing covariance jacobian.')
flags.DEFINE_string(
'data_dir', None, 'Directory to load game data from. If unspecified, this '
'will default to the enclosed example data.')
flags.DEFINE_string('game', 'montezuma_revenge',
'(montezuma_revenge|space_invaders|beam_rider).')
flags.DEFINE_integer('log_image_every', 10,
'No need to write images for this experiment.')
flags.DEFINE_integer(
'save_params_every', 50000,
'Save parameters to checkpoint after this many iteration.')
flags.DEFINE_integer('shards', 50, 'Number of shards to load, for speed.')
FLAGS = flags.FLAGS
_NFRAMES = 10000
_IMAGE_SIZE = 84 # Image side length.
_KERNEL_SIZE = 6
def train(iterations,
lr,
batch_size,
neig,
shards,
game,
step_lr=False,
decay=0.01,
rmsprop_decay=0.1,
log_image_every=10,
save_params_every=50000,
use_pfor=False,
per_example=False,
data_dir=None,
show_plots=False):
"""Sets up and starts training for SpIN on Atari video data."""
if data_dir is None:
data_dir = os.path.join(EXAMPLES_ROOT, 'atari_episodes')
conv_size = [64, 64, 64] # number of channels in each conv layer
conv_stride = [2, 2, 2] # stride of each conv layer
# number of units in fully connected layers
fc_size = [6400, 128, neig]
paddings = ['VALID', 'SAME', 'SAME']
nc_ = 4 # initial number of channels
ws = []
bs = []
for nc in conv_size:
stddev = 1 / np.sqrt(nc_ * _KERNEL_SIZE**2)
ws.append(
tf.Variable(
tf.truncated_normal([_KERNEL_SIZE, _KERNEL_SIZE, nc_, nc],
stddev=stddev)))
bs.append(tf.Variable(tf.zeros([nc])))
nc_ = nc
for i in range(1, len(fc_size)):
ws.append(tf.Variable(tf.truncated_normal([fc_size[i-1], fc_size[i]],
stddev=1/np.sqrt(fc_size[i-1]))))
bs.append(tf.Variable(tf.zeros([fc_size[i]])))
params = ws + bs
saver_path = '/tmp'
logging_config = {
'config': {
'lr': lr,
'decay': decay,
'batch_size': batch_size,
'rmsprop_decay': rmsprop_decay,
'game': game,
},
'log_image_every': log_image_every,
'save_params_every': save_params_every,
'saver_path': saver_path,
'saver_name': game + '_params',
}
stats_hooks = {
'create': spin.util.create_default_stats,
'update': spin.util.update_default_stats,
}
def _create_plots():
"""Hook to set up plots at start of run."""
frame_fig, frame_ax = plt.subplots(2, neig, figsize=(neig * 8, 8))
frame_im = []
for i in range(2):
for j in range(neig):
frame_ax[i, j].axis('off')
frame_im.append(frame_ax[i, j].imshow(
np.zeros((_IMAGE_SIZE, _IMAGE_SIZE)),
interpolation='none',
cmap='gray', vmin=0.0, vmax=255.0))
_, loss_ax = plt.subplots(1, 1)
return frame_fig, frame_im, loss_ax
def _update_plots(t,
outputs,
inputs,
frame_fig,
frame_im,
loss_ax,
losses=None,
eigenvalues=None,
eigenvalues_ma=None):
"""Hook to update the plots periodically."""
del losses
del eigenvalues
for i in range(neig):
ordered = np.argsort(outputs[:, i+1]) # sort features for this minibatch
frame_im[i].set_data(inputs[ordered[0], ..., -1])
frame_im[i+neig].set_data(inputs[ordered[-1], ..., -1])
frame_fig.canvas.draw()
frame_fig.canvas.flush_events()
loss_ax.cla()
loss_ax.plot(eigenvalues_ma[:t])
if t > 0:
ymin = eigenvalues_ma[max(0, t-1000):t].min()
ymax = eigenvalues_ma[max(0, t-1000):t].max()
ydiff = ymax - ymin
loss_ax.set_ylim([ymin-0.1*ydiff, ymax+0.1*ydiff])
plotting_hooks = {
'create': _create_plots,
'update': _update_plots,
}
global_step = tf.Variable(0.0, trainable=False)
def network_builder(x):
return spin.util.make_conv_network(x, conv_stride, paddings, ws, bs)
if step_lr:
lr = tf.train.exponential_decay(lr * decay, global_step, 100 / decay, 0.8)
optim = tf.train.RMSPropOptimizer(
lr, decay=(1.0 - decay * rmsprop_decay), centered=True)
logging.info('Loading game %s', game)
episodes = np.load(os.path.join(data_dir, '{}.npz'.format(game)))
frames = episodes['frames']
episode_starts = episodes['episode_starts']
batch = np.zeros((batch_size + 1, _IMAGE_SIZE, _IMAGE_SIZE, 4),
dtype=np.float32)
def _reader():
idx = np.random.randint(0, (_NFRAMES * shards) - batch_size - 4)
while np.any(episode_starts[idx+1:idx+batch_size+4]):
idx = np.random.randint(0, (_NFRAMES * shards) - batch_size - 4)
for i in range(batch_size+1):
batch[i] = frames[idx+i:idx+i+4].transpose((1, 2, 0))
return batch
data = tf.py_func(_reader, [], [tf.float32])[0]
data.set_shape([batch_size + 1, _IMAGE_SIZE, _IMAGE_SIZE, 4])
spectral_net = spin.SpectralNetwork(
spin.SlownessOperator(),
network_builder,
data,
params,
decay=decay,
use_pfor=use_pfor,
per_example=per_example)
spectral_net.train(
optim,
iterations,
logging_config,
stats_hooks,
plotting_hooks=plotting_hooks,
show_plots=show_plots,
global_step=global_step,
data_for_plotting=data)
def main(argv):
del argv
if FLAGS.neig < 2:
raise ValueError('Number of Eigen values must be at least 2.')
train(
iterations=FLAGS.niter,
lr=FLAGS.lr,
batch_size=FLAGS.batch_size,
neig=FLAGS.neig,
shards=FLAGS.shards,
step_lr=FLAGS.step_lr,
decay=FLAGS.decay,
rmsprop_decay=FLAGS.rmsprop_decay,
game=FLAGS.game,
log_image_every=FLAGS.log_image_every,
save_params_every=FLAGS.save_params_every,
use_pfor=FLAGS.use_pfor,
per_example=FLAGS.per_example,
data_dir=FLAGS.data_dir,
show_plots=FLAGS.show_plots)
if __name__ == '__main__':
app.run(main)
| spectral_inference_networks-master | spectral_inference_networks/examples/atari.py |
# Copyright 2018-2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library to approximate eigenfunctions by stochastic gradient descent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| spectral_inference_networks-master | spectral_inference_networks/examples/__init__.py |
# Copyright 2018-2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""2D Hydrogen atom example from SpIN paper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import matplotlib.pyplot as plt
import numpy as np
import spectral_inference_networks as spin
import tensorflow as tf
flags.DEFINE_integer('neig', 9, 'Number of eigenvalues to compute')
flags.DEFINE_integer('niter', 1000000, 'Number of iterations')
flags.DEFINE_integer('ndim', 2, 'Dimension of space')
flags.DEFINE_integer('batch_size', 128, 'Self-explanatory')
flags.DEFINE_float('laplacian_eps', 0.0,
'Finite difference step for Laplacian Operator.')
flags.DEFINE_integer('log_image_every', 10,
'Write image of wavefn to log after this many iterations')
flags.DEFINE_integer('save_params_every', 50000,
'Save parameters to checkpoint after this many iterations')
flags.DEFINE_float('lim', 50.0, 'Limit of box')
flags.DEFINE_float('lr', 1e-5, 'Learning rate')
flags.DEFINE_float('decay', 0.01, 'Decay rate of moving averages')
flags.DEFINE_boolean('boundary', True, 'Force zero boundary condition')
flags.DEFINE_boolean('show_plots', True,
'Show pyplot plots. 2D slices at z=0 are used for ndim=3.')
flags.DEFINE_boolean('use_pfor', True, 'Use parallel_for.')
flags.DEFINE_boolean(
'per_example', False,
'Use a different strategy for computing covariance Jacobian')
flags.DEFINE_float('charge', 1.0, 'Nuclear charge of atom.')
FLAGS = flags.FLAGS
def train(iterations,
batch_size,
lr,
ndim=2,
apply_boundary=False,
neig=1,
decay=0.01,
laplacian_eps=0.1,
lim=20,
log_image_every=50000,
save_params_every=50000,
show_plots=False,
use_pfor=False,
per_example=False,
charge=0.5):
"""Configures and runs training loop."""
logging_config = {
'config': {
'lr': lr,
'decay': decay,
'batch_size': batch_size,
},
'log_image_every': log_image_every,
'save_params_every': save_params_every,
'saver_path': '/tmp',
'saver_name': 'hydrogen_params',
}
npts = 128
def _create_plots():
"""Hook to set up plots at start of run."""
nfig = max(2, int(np.ceil(np.sqrt(neig))))
psi_fig, psi_ax = plt.subplots(nfig, nfig, figsize=(10, 10))
psi_im = []
for i in range(nfig**2):
psi_ax[i // nfig, i % nfig].axis('off')
for i in range(neig):
psi_im.append(psi_ax[i // nfig, i % nfig].imshow(
np.zeros((npts, npts)), interpolation='none', cmap='plasma'))
_, loss_ax = plt.subplots(1, 1)
return psi_fig, psi_ax, psi_im, loss_ax
def _update_plots(t, outputs, inputs, psi_fig, psi_ax, psi_im, loss_ax,
losses=None, eigenvalues=None, eigenvalues_ma=None):
"""Hook to update the plots periodically."""
del inputs
del losses
del eigenvalues
nfig = max(2, int(np.ceil(np.sqrt(neig))))
loss_ax.cla()
loss_ax.plot(eigenvalues_ma[:t])
if ndim == 2:
# E(n;Z) = - Z^2 / [2*(n+1/2)^2]
# Quantum numbers: n=0, 1, ...; m_l = -n, -n+1, ... n
# degeneracy: 2n+1. Use k^2 as an upper bound to \sum 2n+1.
max_n = int(np.ceil(np.sqrt(neig))) + 1
tmp = []
for n in range(0, max_n):
for _ in range(2 * n + 1):
tmp.append(n)
quantum_nos = np.array(tmp)
ground_truth = -charge**2 / (2*(quantum_nos[:neig] + 0.5)**2)
elif ndim == 3:
# E(n;Z) = - Z^2 / (2n^2)
# Quantum numbers: n=1, 2, ...; l = 0, 1, ..., n-1; m_l = -l, -l+1, ... l
# degeneracy: n^2. Use k^3 as an upper bound to \sum n^2.
max_n = int(np.ceil(neig**(1./3))) + 1
tmp = []
for n in range(1, max_n):
for _ in range(n * n):
tmp.append(n)
quantum_nos = np.array(tmp)
ground_truth = - charge**2 / (2*quantum_nos[:neig]**2)
ground_truth /= 2.0 # convert back to units in the paper
for i in range(neig):
loss_ax.plot([0, t], [ground_truth[i], ground_truth[i]], '--')
loss_ax.set_ylim([1.0, ground_truth[0]-1])
for i in range(neig):
pimg = outputs[:, i].reshape(npts, npts)
psi_im[i].set_data(pimg)
psi_im[i].set_clim(pimg.min(), pimg.max())
psi_ax[i//nfig, i%nfig].set_title(eigenvalues_ma[t, i])
psi_fig.canvas.draw()
psi_fig.canvas.flush_events()
plotting_hooks = {
'create': _create_plots,
'update': _update_plots,
}
stats_hooks = {
'create': spin.util.create_default_stats,
'update': spin.util.update_default_stats,
}
k = neig
hid = (64, 64, 64, k)
h_ = ndim
ws = []
bs = []
for h in hid:
ws.append(tf.Variable(tf.random_normal([h_, h])/tf.sqrt(float(h_))))
bs.append(tf.Variable(tf.random_normal([h])))
h_ = h
params = ws + bs
def network_builder(x):
return spin.util.make_network(x, hid, ws, bs, apply_boundary, lim,
custom_softplus=not per_example)
if laplacian_eps == 0.0:
kinetic = -spin.ExactLaplacianOperator()
else:
kinetic = -spin.LaplacianOperator(eps=laplacian_eps)
potential = spin.DiagonalOperator(
lambda x: -charge / tf.norm(x, axis=1, keepdims=True))
hamiltonian = kinetic + potential
global_step = tf.Variable(0.0, trainable=False)
optim = tf.train.RMSPropOptimizer(lr, decay=0.999)
data_for_plotting = spin.util.grid_reader(ndim, lim, npts)
data = tf.random_uniform([batch_size, ndim], minval=-lim, maxval=lim)
spectral_net = spin.SpectralNetwork(
hamiltonian,
network_builder,
data,
params,
decay=decay,
use_pfor=use_pfor,
per_example=per_example)
stats = spectral_net.train(
optim,
iterations,
logging_config,
stats_hooks,
plotting_hooks=plotting_hooks,
show_plots=show_plots,
global_step=global_step,
data_for_plotting=data_for_plotting)
return stats
def main(argv):
del argv
if FLAGS.per_example and FLAGS.laplacian_eps == 0.0:
raise ValueError('Exact Laplacian is incompatible '
'with per-example Jacobian')
train(
iterations=FLAGS.niter,
batch_size=FLAGS.batch_size,
lr=FLAGS.lr,
ndim=FLAGS.ndim,
apply_boundary=FLAGS.boundary,
neig=FLAGS.neig,
decay=FLAGS.decay,
laplacian_eps=FLAGS.laplacian_eps,
lim=FLAGS.lim,
log_image_every=FLAGS.log_image_every,
save_params_every=FLAGS.save_params_every,
show_plots=FLAGS.show_plots,
use_pfor=FLAGS.use_pfor,
per_example=FLAGS.per_example,
charge=FLAGS.charge)
if __name__ == '__main__':
app.run(main)
| spectral_inference_networks-master | spectral_inference_networks/examples/hydrogen.py |
# Copyright 2018-2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package implementing Spectral Inference Networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl import logging
import matplotlib.pyplot as plt
from spectral_inference_networks.src import util
import tensorflow as tf
from tensorflow.python.ops import parallel_for as pfor # pylint: disable=g-direct-tensorflow-import
assert util, ('spectral_inference_networks.src.util must be imported.')
def _collapse_first_dim(x):
new_shape = tf.concat([[-1], tf.shape(x)[2:]], axis=0)
return tf.reshape(x, new_shape)
def _objective_grad(xx, obj, grad_loss, grad_eigval, grad_chol):
"""Symbolic form of the gradient of the objective with stop_gradients."""
del grad_eigval
del grad_chol
with tf.name_scope('objective_grad'):
chol = tf.cholesky(xx)
choli = tf.linalg.inv(chol)
rq = tf.matmul(choli, tf.matmul(obj, choli, transpose_b=True))
dl = tf.diag(tf.matrix_diag_part(choli))
triu = tf.matrix_band_part(tf.matmul(rq, dl), 0, -1)
gxx = -1.0 * tf.matmul(choli, triu, transpose_a=True)
gobj = tf.matmul(choli, dl, transpose_a=True)
return grad_loss * gxx, grad_loss * gobj
@tf.custom_gradient
def _objective(xx, obj):
"""Objective function as custom op so that we can overload gradients."""
with tf.name_scope('objective'):
chol = tf.cholesky(xx)
choli = tf.linalg.inv(chol)
rq = tf.matmul(choli, tf.matmul(obj, choli, transpose_b=True))
eigval = tf.matrix_diag_part(rq)
loss = tf.trace(rq)
grad = functools.partial(_objective_grad, xx, obj)
return (loss, eigval, chol), grad
@tf.custom_gradient
def _covariance(x, y):
"""Covariance function as custom op."""
with tf.name_scope('covariance'):
cov = tf.matmul(x, y, transpose_a=True) / tf.cast(tf.shape(x)[0], x.dtype)
def _cov_grad(grad):
with tf.name_scope('cov_grad'):
return (tf.matmul(y, grad) / tf.cast(tf.shape(x)[0], x.dtype),
tf.matmul(x, grad) / tf.cast(tf.shape(x)[0], x.dtype))
return cov, _cov_grad
class LinearOperator(object):
"""Base class for different linear operators that can be combined."""
def __init__(self, op=None):
self._op = op
def build_network(self, f, x):
"""Build network from a builder f'n for the network 'f' and data 'x'."""
self.f = f(x)
return self.f
def build_op(self, x, logpdf=None):
"""Build op from data 'x'."""
del x
del logpdf
raise ValueError('build_op not implemented in derived class.')
def build(self, f, x):
"""Combines build_network and build_op."""
fx = self.build_network(f, x)
# For per_example Jacobian computation, the features and the Jacobian
# of the features must be created at the same time.
# Note that this only works if:
# 1) build_network is never called externally
# 2) build is never overridden by a child class
if isinstance(fx, tuple):
self._jac = fx[1]
self.f = fx[0]
op = self.build_op(x)
return self.f, op
@property
def op(self):
return self._op
@property
def jac(self):
# Only exists if we are computing the Jacobian per-example.
return self._jac
def __add__(self, x):
return AddOperator(self, x)
def __sub__(self, x):
return AddOperator(self, -x)
def __mul__(self, c):
return ScaleOperator(c, self)
def __rmul__(self, c):
return ScaleOperator(c, self)
def __neg__(self):
return -1*self
# Comparison operators:
# Only used to decide order precedence for calling build_network.
def __lt__(self, x):
# Only Laplacians or things made of Laplacians take precedence.
if isinstance(x, ScaleOperator):
return self < x.x
if isinstance(x, AddOperator):
return self < x.x or self < x.y
return (isinstance(x, LaplacianOperator) or
isinstance(x, ExactLaplacianOperator))
def __le__(self, x):
return True # only override this for super-classes.
def __gt__(self, x):
return False
def __ge__(self, x):
return not self < x
class ScaleOperator(LinearOperator):
"""Linear operator formed by scaling."""
def __init__(self, c, x):
super(ScaleOperator, self).__init__()
self.c = c
self.x = x
def build_network(self, f, x):
self.f = self.x.build_network(f, x)
return self.f
def build_op(self, x):
self._op_x = self.x.build_op(x)
self._op = self.c * self._op_x
return self._op
def __lt__(self, x):
return self.x < x
def __le__(self, x):
return self.x <= x
def __gt__(self, x):
return self.x > x
def __ge__(self, x):
return self.x >= x
class AddOperator(LinearOperator):
"""Linear operator formed by adding two operators together."""
def __init__(self, x, y):
super(AddOperator, self).__init__()
self.x = x
self.y = y
def build_network(self, f, x):
# Use comparison to choose precedence for order of building network.
if self.x >= self.y:
self.f = self.x.build_network(f, x)
self.y.f = self.f
else:
self.f = self.y.build_network(f, x)
self.x.f = self.f
return self.f
def build_op(self, x, logpdf=None):
self._op_x = self.x.build_op(x)
self._op_y = self.y.build_op(x)
self._op = self._op_x + self._op_y
return self._op
def __lt__(self, x):
return self.x < x and self.y < x
def __le__(self, x):
return self.x <= x and self.y <= x
def __gt__(self, x):
return not self <= x
class LaplacianOperator(LinearOperator):
"""Finite difference Laplacian operator."""
def __init__(self, eps):
super(LaplacianOperator, self).__init__()
self._eps = eps
def _perturbation(self, x, eps):
ndim = x.shape.as_list()[1] # dimension of position vector (i.e. 1,2,3).
xs = [x]
for i in range(ndim):
xs.append(x + eps * tf.one_hot(i, ndim))
xs.append(x - eps * tf.one_hot(i, ndim))
return tf.concat(xs, axis=0)
def build_network(self, f, x):
"""Build operator from a builder f'n for the network 'f' and data 'x'."""
xs = self._perturbation(x, self._eps)
fx = f(xs) # build network, then return it at the end.
ndim = x.shape.as_list()[1]
if isinstance(fx, tuple):
jac = [tf.split(j, 2*ndim+1, axis=0)[0] for j in fx[1]]
fx = fx[0]
else:
jac = None
# Split into [f(x), f(x+eps*e_i), f(x-eps*e_i), ...] for basis
# vectors {e_i}.
self._fxs = tf.split(fx, 2*ndim+1, axis=0)
if jac is not None:
self.f = (self._fxs[0], jac)
else:
self.f = self._fxs[0]
return self.f
def build_op(self, x):
"""Build operator from a builder f'n for the network 'f' and data 'x'."""
ndim = x.shape.as_list()[1]
# d^2/dx_i^2 for each basis vector using finite differences.
lapl = 0.0
for i in range(ndim):
lapl += self._fxs[2*i+1] + self._fxs[2*i+2] - 2*self._fxs[0]
lapl /= self._eps**2
self._op = _covariance(self._fxs[0], lapl)
return self._op
def __lt__(self, x):
return False
def __le__(self, x):
if isinstance(x, ScaleOperator):
return self <= x.x
if isinstance(x, AddOperator):
return self <= x.x or self <= x.y
return (isinstance(x, LaplacianOperator) or
isinstance(x, ExactLaplacianOperator))
def __gt__(self, x):
return not self <= x
def __ge__(self, x):
return True
def laplacian(f, x):
"""Computes exact Laplacian of f(x). Beware - scales poorly with x."""
if isinstance(x, list):
raise ValueError('Input to laplacian must be a single tensor')
if len(f.shape) == 2:
return tf.stack(
[laplacian(f[:, i], x) for i in range(f.shape.as_list()[1])], axis=1)
elif len(f.shape) == 1:
dx = tf.reshape(tf.gradients(f, x)[0],
(x.get_shape()[0], -1)) # first dim is batch
ddx = []
for i in range(dx.get_shape().as_list()[1]):
ddx.append(tf.reshape(tf.gradients(dx[:, i], x)[0],
(x.get_shape()[0], -1))[:, i])
lapl = tf.add_n(ddx)
return lapl
else:
raise ValueError('Shape of batch must be 1D or 2D')
class ExactLaplacianOperator(LinearOperator):
"""Exact difference Laplacian operator."""
def __init__(self):
super(ExactLaplacianOperator, self).__init__()
def build_op(self, x):
"""Builds operator from a builder f'n for the network 'f' and data 'x'."""
if isinstance(self.f, tuple):
f = self.f[0]
else:
f = self.f
lapl = laplacian(f, x)
self._op = _covariance(f, lapl)
return self._op
def __lt__(self, x):
return False
def __le__(self, x):
if isinstance(x, ScaleOperator):
return self <= x.x
if isinstance(x, AddOperator):
return self <= x.x or self <= x.y
return (isinstance(x, LaplacianOperator) or
isinstance(x, ExactLaplacianOperator))
def __gt__(self, x):
return not self <= x
def __ge__(self, x):
return True
class DiagonalOperator(LinearOperator):
"""Operator equivalent to diagonal matrix."""
def __init__(self, builder):
super(DiagonalOperator, self).__init__()
self._builder = builder
def build_op(self, x):
kx = self._builder(x)
if isinstance(self.f, tuple):
self._op = _covariance(self.f[0], kx * self.f[0])
else:
self._op = _covariance(self.f, kx * self.f)
return self._op
class KernelOperator(LinearOperator):
"""Operator from a symmetric kernel."""
def __init__(self, kernel):
super(KernelOperator, self).__init__()
self._kernel = kernel
def build_op(self, x):
x1, x2 = tf.split(x, 2, axis=0)
fx1, fx2 = tf.split(self.f, 2, axis=0)
kval = self._kernel(x1, x2)
self._op = _covariance(fx1, kval * fx2)
return self._op
class SlownessOperator(LinearOperator):
"""Kernel for slow feature analysis."""
def build_op(self, x):
diff = self.f[:-1] - self.f[1:]
self._op = _covariance(diff, diff)
return self._op
class SpectralNetwork(object):
"""Class that constructs operators for SpIN and includes training loop."""
def __init__(self, operator, network, data, params,
decay=0.0, use_pfor=True, per_example=False):
"""Creates all ops and variables required to train SpIN.
Args:
operator: The linear operator to diagonalize.
network: A function that returns the TensorFlow op for the output of the
spectral inference network when provided an op for the input.
data: A TensorFlow op for the input to the spectral inference network.
params: The trainable parameters of the model built by 'network'.
decay (optional): The decay parameter for the moving average of the
network covariance and Jacobian.
use_pfor (optional): If true, use the parallel_for package to compute
Jacobians. This is often faster but has higher memory overhead.
per_example (optional): If true, computes the Jacobian of the network
output covariance using a more complicated but often faster method.
This interacts badly with anything that uses custom_gradients, so needs
to be avoided for some code branches.
"""
self.operator = operator
self.data = data
self.params = params
self.decay = decay
self.use_pfor = use_pfor
self.per_example = per_example
if per_example and decay != 0.0:
def network_builder(x):
"""Wraps the function 'network' to compute per-example."""
def loop_fn(i):
x_i = tf.expand_dims(tf.gather(x, i), 0)
features = network(x_i)
jac = pfor.jacobian(features, params, use_pfor=use_pfor)
return features, jac
if use_pfor:
features, jac = pfor.pfor(loop_fn, x.shape[0])
else:
loop_fn_dtypes = [tf.float32, [tf.float32] * len(params)]
features, jac = pfor.for_loop(loop_fn, loop_fn_dtypes, data.shape[0])
raise NotImplementedError(
'use_pfor=False + per_example=True is not yet working.')
features = _collapse_first_dim(features)
features.set_shape(network(x).shape)
jac = [_collapse_first_dim(y) for y in jac]
for p, j in zip(params, jac):
j.set_shape(features.shape.as_list() + p.shape.as_list())
# Note: setting rank=2 so that we use matmul for covariance below
# instead of batch_matmul.
return features, jac
else:
network_builder = network
self.network_builder = network_builder
self.features, self.sigma, self.pi = self._covariances(
operator, network_builder, data)
feat_jac = None
if per_example and decay != 0.0:
feat_jac = operator.jac
outputs = self._training_update(
self.sigma,
self.pi,
self.params,
decay=decay,
use_pfor=use_pfor,
features=self.features,
jac=feat_jac)
self.loss, self.gradients, self.eigenvalues, self.chol = outputs
def _moving_average(self, x, c):
"""Creates moving average operation.
Args:
x: The tensor or list of tensors of which to take a moving average.
c: The decay constant of the moving average, between 0 and 1.
0.0 = the moving average is constant
1.0 = the moving averge has no memory
Returns:
ma: Moving average variables.
ma_update: Op to update moving average.
"""
if isinstance(x, list):
mas = [self._moving_average(y, c) for y in x]
return [m[0] for m in mas], [m[1] for m in mas]
if len(x.shape) == 2 and x.shape[0] == x.shape[1]:
ma = tf.Variable(tf.eye(x.shape.as_list()[0]), trainable=False)
else:
ma = tf.Variable(tf.zeros_like(x), trainable=False)
ma_update = tf.assign(ma, (1-c)*ma + c*tf.reshape(x, ma.shape))
return ma, ma_update
def _covariances(self, operator, network, x):
"""Constructs loss with custom gradient for SpIN.
Args:
operator: The linear operator to diagonalize.
network: A function that returns the TensorFlow op for the output of the
spectral inference network when provided an op for the input.
x: The data used as input to network.
Returns:
u: The output of the spectral inference network.
sigma: The covariance of the outputs of the network.
pi: The matrix of network output covariances multiplied by the linear
operator to diagonalize. See paper for explicit definition.
"""
u, pi = operator.build(network, x)
sigma = _covariance(u, u)
sigma.set_shape((u.shape[1], u.shape[1]))
pi.set_shape((u.shape[1], u.shape[1]))
return u, sigma, pi
def _training_update(self,
sigma,
pi,
params,
decay=0.0,
use_pfor=False,
features=None,
jac=None):
"""Makes gradient and moving averages.
Args:
sigma: The covariance of the outputs of the network.
pi: The matrix of network output covariances multiplied by the linear
operator to diagonalize. See paper for explicit definition.
params: The trainable parameters.
decay (optional): The decay parameter for the moving average of the
network covariance and Jacobian.
use_pfor (optional): If true, use the parallel_for package to compute
Jacobians. This is often faster but has higher memory overhead.
features (optional): The output features of the spectral inference
network. Only necessary if per_example=True.
jac (optional): The Jacobian of the network. Only necessary if
per_example=True.
Returns:
loss: The loss function for SpIN - the sum of eigenvalues.
gradients: The approximate gradient of the loss using moving averages.
eigvals: The full array of eigenvalues, rather than just their sum.
chol: The Cholesky decomposition of the covariance of the network outputs,
which is needed to demix the network outputs.
"""
if isinstance(decay, float):
assert decay >= 0.0 and decay < 1.0
if decay == 0.0:
# Equivalent to not using the moving averages at all.
loss, eigval, chol = _objective(sigma, pi) # pylint: disable=unbalanced-tuple-unpacking
gradients = tf.gradients(loss, params)
else:
if jac is not None:
sig_feat_jac = pfor.jacobian(sigma, features, use_pfor=use_pfor)
sigma_jac = [tf.tensordot(sig_feat_jac, y, axes=2) for y in jac]
else:
sigma_jac = pfor.jacobian(sigma, params, use_pfor=use_pfor)
for p, sj in zip(params, sigma_jac):
sj.set_shape(sigma.shape.as_list() + p.shape.as_list())
sigma_avg, update_sigma = self._moving_average(sigma, decay)
sigma_jac_avg, update_sigma_jac = self._moving_average(sigma_jac, decay)
n = tf.reduce_prod(tf.shape(sigma))
with tf.control_dependencies(update_sigma_jac + [update_sigma]):
loss, eigval, chol = _objective(sigma_avg, pi) # pylint: disable=unbalanced-tuple-unpacking
sigma_back = tf.gradients(loss, sigma_avg)[0]
gradients = []
for s, p, g in zip(sigma_jac_avg, params, tf.gradients(loss, params)):
gradients.append(
tf.reshape(
tf.matmul(
tf.reshape(sigma_back,
(1, n)), tf.reshape(s, (n, -1))), p.shape) + g)
return loss, gradients, eigval, chol
def train(
self,
optim,
iterations,
logging_config,
stats_hooks,
plotting_hooks=None,
show_plots=False,
global_step=None,
data_for_plotting=None):
"""Training loop for SpIN, with hooks for logging and plotting.
Args:
optim: The TensorFlow optimizer to minimize the SpIN loss.
iterations: The number of iterations to train for.
logging_config: A dictionary for logging. The field 'config' is logged
at the beginning of training, with metadata about the run, while the
fields 'saver_path' and 'saver_name' are for setting up checkpointing
and 'log_image_every' and 'save_params_every' set the number of
iterations after which logging and checkpoint saving occur.
stats_hooks: A dictionary with two fields, 'create' and 'update', both of
which are functions that take no arguments. 'create' sets up the data
structures for logging stats while 'update' updates them.
plotting_hooks (optional): If show_plots is true, this dictionary must be
provided. Has the same format as 'stats_hooks'.
show_plots (optional): A boolean. If true, will plot results to the GUI.
global_step (optional): A TensorFlow op that tracks the number of
iterations. If none is provided, one is created.
data_for_plotting (optional): If different data is needed for updating
plots than for training, this op will return that data.
Returns:
A dictionary of statistics accumulated during the training run.
"""
if show_plots:
plt.ion()
if plotting_hooks is None:
raise ValueError('Plotting hooks are required if show_plots=True')
plots = plotting_hooks['create']()
saver_path = logging_config['saver_path']
saver = tf.train.Saver(var_list=self.params)
if global_step is None:
global_step = tf.Variable(0.0, trainable=False, name='global_step')
if data_for_plotting is not None:
features_for_plotting = self.network_builder(data_for_plotting)
# If per_example is true, the network builder will return a
# (features, jacobian) tuple. For plotting, we can discard the latter.
if isinstance(features_for_plotting, tuple):
features_for_plotting = features_for_plotting[0]
features_for_plotting = tf.transpose(
tf.matrix_triangular_solve(self.chol,
tf.transpose(features_for_plotting)))
neig = self.features.shape.as_list()[-1]
stats = stats_hooks['create'](iterations, neig)
logging.info(logging_config['config'])
update_global_step = tf.assign(global_step, global_step + 1)
step = optim.apply_gradients(zip(self.gradients, self.params))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('Initialized variables')
for t in range(iterations):
sess.run(update_global_step)
loss_, eigenvalues_, _ = sess.run([self.loss, self.eigenvalues, step])
stats_hooks['update'](t, loss_, eigenvalues_, **stats)
current_stats = dict((key, stats[key][t]) for key in stats)
logging.info(current_stats)
if t % logging_config['save_params_every'] == 0:
saver.save(sess,
saver_path + '/' + logging_config['saver_name'],
global_step=t)
if t % logging_config['log_image_every'] == 0:
if data_for_plotting is not None and show_plots:
outputs = sess.run(features_for_plotting)
inputs = sess.run(data_for_plotting)
plotting_hooks['update'](t, outputs, inputs, *plots, **stats)
plt.show()
plt.pause(0.01)
return stats
| spectral_inference_networks-master | spectral_inference_networks/src/spin.py |
# Copyright 2018-2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for SpIN - mostly plotting, logging and network building."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
import tensorflow as tf
def create_default_stats(iterations, neig):
"""Default stats creation function to be passed in as stats hook.
To be passed in to training loop and called back once the number of Eigen
values have been determined.
Args:
iterations: Number of iterations in training loop.
neig: Number of Eigen values to track.
Returns:
Dict of numpy arrays keyed by stat name.
"""
losses = np.zeros((iterations), dtype=np.float32)
eigenvalues = np.zeros((iterations, neig), dtype=np.float32)
eigenvalues_ma = np.zeros((iterations, neig), dtype=np.float32)
return {
'losses': losses,
'eigenvalues': eigenvalues,
'eigenvalues_ma': eigenvalues_ma,
}
def update_default_stats(t, current_loss, current_eigenvalues, losses,
eigenvalues, eigenvalues_ma):
"""Update callback for the default stats created above.
To be passed into training loop and called back once per training step.
Updates total collections with stats from specified training step.
Args:
t: Training step index.
current_loss: Loss at training step `t`.
current_eigenvalues: Eigen values at training step `t`.
losses: Collection of all losses, to be updated at index `t`.
eigenvalues: Collection of all Eigen values, to be updated at index `t`.
eigenvalues_ma: Collection of moving averages for Eigen values, to be
updated at index `t`.
"""
losses[t] = current_loss
eigenvalues[t] = current_eigenvalues
decay = 0.01
if t > 0:
eigenvalues_ma[t] = (
decay * current_eigenvalues + (1 - decay) * eigenvalues_ma[t - 1])
else:
eigenvalues_ma[t] = current_eigenvalues
@tf.custom_gradient
def _my_softplus(x):
def grad(dy):
return tf.nn.sigmoid(x) * dy
return tf.nn.softplus(x), grad
def _add_mask(x, y, lim):
"""Makes boundary conditions for network (fixed box)."""
# Force the wavefunction to zero at the boundaries of the box defined by
# [-lim, lim].
mask = 1.0
for i in range(x.shape.as_list()[1]):
mask *= tf.maximum((tf.sqrt(2 * lim**2 - x[:, i]**2) - lim) / lim, 0)
return tf.expand_dims(mask, -1) * y
def make_network(x, hid, ws, bs, apply_boundary, lim, custom_softplus=False):
"""Constructs network and loss function.
Args:
x: Input to the network.
hid: List of shapes of the hidden layers of the networks.
ws: List of weights of the network.
bs: List of biases of the network.
apply_boundary: If true, force network output to be zero at boundary.
lim: The limit of the network, if apply_boundary is true.
custom_softplus (optional):
Returns:
Output of multi-layer perception network.
"""
inp = x
my_softplus = _my_softplus if custom_softplus else tf.nn.softplus
for i in range(len(hid)-1):
inp = my_softplus(tf.matmul(inp, ws[i]) + bs[i])
y = tf.matmul(inp, ws[-1]) + bs[-1]
if apply_boundary:
return _add_mask(x, y, lim)
else:
return y
def make_conv_network(x, conv_stride, paddings, ws, bs):
"""Creates convolutional network.
Args:
x: Input to the convnet.
conv_stride: List of strides of the convolutions, one per layer.
paddings: List of paddings of the convolutions, one per layer.
ws: List of weights. Conv or fully-connected inferred by shape.
bs: List of biases.
Returns:
Output of convolutional neural network.
"""
inp = x
nh = len(ws)
for i in range(nh-1):
weight = ws[i]
if len(weight.shape) == 4:
stride = conv_stride[i]
inp = tf.nn.relu(tf.nn.conv2d(inp, weight, [1, stride, stride, 1],
padding=paddings[i]) + bs[i])
# flatten if this is the last conv layer
if len(ws[i+1].shape) == 2:
inp = tf.reshape(inp, [inp.shape[0], np.prod(inp.shape[1:])])
else:
inp = tf.nn.relu(tf.matmul(inp, weight) + bs[i])
features = tf.matmul(inp, ws[-1]) + bs[-1]
dim0 = tf.shape(inp)[0]
const_feature = tf.ones((dim0, 1))
features = tf.concat((const_feature, features), 1)
return features
def grid_reader(dim, lim, points=128):
"""Creates a reader function for generating a grid of position vectors.
Args:
dim: Dimension of position vector.
lim: Limit of the cell. Each vector component is in [-lim, lim].
points: Number of points to generate along each axis.
Returns:
A tensorflow op containing a constant grid of the n-dim box defined by
[-lim, lim] along each axis. A 2D plane defined by hyperplane is generated
for n>2-D systems.
Raises:
ValueError: len(hyperplane) + 2 != ndim.
"""
hyperplane = [0 for _ in range(dim - 2)]
if len(hyperplane) + 2 != dim:
raise ValueError('Incorrect number of hyperplane values specified.')
xx = np.linspace(-lim, lim, points, dtype=np.float32)
yy = np.linspace(-lim, lim, points, dtype=np.float32)
if dim == 1:
grid = xx
elif dim == 2:
grid = np.meshgrid(xx, yy)
else:
zz = [np.linspace(z_i, z_i, 1, dtype=np.float32) for z_i in hyperplane]
grid = np.meshgrid(xx, yy, *zz)
xyz = np.array(grid).T.reshape(-1, dim)
return tf.constant(xyz)
| spectral_inference_networks-master | spectral_inference_networks/src/util.py |
# Copyright 2018-2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library to approximate eigenfunctions by stochastic gradient descent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| spectral_inference_networks-master | spectral_inference_networks/src/__init__.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for the LearnedSimulator model."""
import jax
import jax.numpy as jnp
import jraph
import tree
from inverse_design.src import normalizers
def flatten_features(input_graph,
connectivity_radius,
is_padded_graph,
apply_normalization=False):
"""Returns a graph with a single array of node and edge features."""
# Normalize the eleements of the graph.
if apply_normalization:
graph_elements_normalizer = normalizers.GraphElementsNormalizer(
template_graph=input_graph,
is_padded_graph=is_padded_graph)
# Computing relative distances in the model.
if "relative_world_position" not in input_graph.edges:
input_graph = _add_relative_distances(
input_graph)
# Extract important features from the position_sequence.
position_sequence = input_graph.nodes["world_position"]
velocity_sequence = time_diff(position_sequence) # Finite-difference.
# Collect node features.
node_features = []
# Normalized velocity sequence, flattening spatial axis.
flat_velocity_sequence = jnp.reshape(velocity_sequence,
[velocity_sequence.shape[0], -1])
if apply_normalization:
flat_velocity_sequence = graph_elements_normalizer.normalize_node_array(
"velocity_sequence", flat_velocity_sequence)
node_features.append(flat_velocity_sequence)
# Material types (one-hot, does not need normalization).
node_features.append(jax.nn.one_hot(input_graph.nodes["material_type(9)"], 9))
# Collect edge features.
edge_features = []
# Relative distances and norms.
relative_world_position = input_graph.edges["relative_world_position"]
relative_world_distance = safe_edge_norm(
input_graph.edges["relative_world_position"],
input_graph,
is_padded_graph,
keepdims=True)
if apply_normalization:
# Scaled determined by connectivity radius.
relative_world_position = relative_world_position / connectivity_radius
relative_world_distance = relative_world_distance / connectivity_radius
edge_features.append(relative_world_position)
edge_features.append(relative_world_distance)
# Handle normalization.
node_features = jnp.concatenate(node_features, axis=-1)
edge_features = jnp.concatenate(edge_features, axis=-1)
return input_graph._replace(
nodes=node_features,
edges=edge_features,
globals=None,
)
def time_diff(input_sequence):
"""Compute finnite time difference."""
return input_sequence[:, 1:] - input_sequence[:, :-1]
def safe_edge_norm(array, graph, is_padded_graph, keepdims=False):
"""Compute vector norm, preventing nans in padding elements."""
if is_padded_graph:
padding_mask = jraph.get_edge_padding_mask(graph)
epsilon = 1e-8
perturb = jnp.logical_not(padding_mask) * epsilon
array += jnp.expand_dims(perturb, range(1, len(array.shape)))
return jnp.linalg.norm(array, axis=-1, keepdims=keepdims)
def _add_relative_distances(input_graph,
use_last_position_only=True):
"""Computes relative distances between particles and with walls."""
# If these exist, there is probably something wrong.
assert "relative_world_position" not in input_graph.edges
assert "clipped_distance_to_walls" not in input_graph.nodes
input_graph = tree.map_structure(lambda x: x, input_graph) # Avoid mutating.
particle_pos = input_graph.nodes["world_position"]
if use_last_position_only:
particle_pos = particle_pos[:, -1]
input_graph.edges["relative_world_position"] = (
particle_pos[input_graph.receivers] - particle_pos[input_graph.senders])
return input_graph
| inverse_design-main | src/model_utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Watercourse 3D environment utils."""
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
import numpy as np
import tree
from inverse_design.src import connectivity_utils
NORMAL = 0
OBSTACLE = 1
INFLOW = 4
# for eliminating stray particles from pipe
OOB_AREA = 1.5
def _update_edges(input_graph, obstacle_edges, radius):
"""Recomputes particle edges, adds obstacle edges."""
# get input graph nodes corresponding to fluid
query_mask = ~input_graph.nodes["external_mask"]
# get input graph ndoes that are either fluid or obstacle
valid_mask = query_mask | input_graph.nodes["obstacle_mask"]
max_edges = input_graph.senders.shape[0]
num_obstacle_edges = obstacle_edges.shape[0]
# compute the sender and receiver edges for fluid-fluid and fluid-obstacle
# interactions.
n_edge, senders, receivers = connectivity_utils.compute_fixed_radius_connectivity_jax(
input_graph.nodes["world_position"][:, -1],
n_node=input_graph.n_node[:-1], max_edges=max_edges - num_obstacle_edges,
radius=radius, query_mask=query_mask, node_mask=valid_mask)
# update edges to include obstacle edges and new fluid-fluid edges
return input_graph._replace(
senders=jnp.concatenate([obstacle_edges[:, 0], senders], axis=0),
receivers=jnp.concatenate([obstacle_edges[:, 1], receivers], axis=0),
n_edge=n_edge.at[0].set(n_edge[0] + num_obstacle_edges))
def forward(input_graph, new_particles, network, haiku_model, obstacle_edges,
radius):
"""Runs model and post-processing steps in jax, returns position sequence."""
@hk.transform_with_state
def model(inputs):
return haiku_model()(inputs)
rnd_key = jax.random.PRNGKey(42) # use a fixed random key
# only run for a single graph (plus one padding graph), update graph with
# obstacle edges
assert len(input_graph.n_node) == 2, "Not a single padded graph."
graph = tree.map_structure(lambda x: x, input_graph)
graph = _update_edges(graph, obstacle_edges, radius)
# build material type
pattern = jnp.ones_like(graph.nodes["external_mask"], dtype=jnp.int32)
inflow_mask = jnp.any(~graph.nodes["mask_stack"], axis=-1)
graph.nodes["material_type(9)"] = jnp.where(
graph.nodes["external_mask"], pattern * OBSTACLE,
jnp.where(inflow_mask, pattern * INFLOW,
pattern * NORMAL))
graph.nodes["type/particles"] = None
# run model
prev_pos = input_graph.nodes["world_position"]
model_out = model.apply(network["params"], network["state"], rnd_key, graph)
pred_pos = model_out[0][0].nodes["p:world_position"]
total_nodes = jnp.sum(input_graph.n_node[:-1])
node_padding_mask = jnp.arange(prev_pos.shape[0]) < total_nodes
# update history, reset external particles
next_pos_seq = jnp.concatenate([prev_pos[:, 1:], pred_pos[:, None]], axis=1)
mask = (~input_graph.nodes["external_mask"]) & node_padding_mask
next_pos_seq = jnp.where(mask[:, None, None], next_pos_seq, prev_pos)
# add new particles, remove old particles that go below the floor surface
delete_particles = next_pos_seq[:, -1, 1] <= 0
delete_particles &= graph.nodes["mask_stack"][:, -1]
particle_mask = graph.nodes["mask_stack"][:, -1] & ~delete_particles
particle_mask |= new_particles
mask_stack = jnp.concatenate(
[graph.nodes["mask_stack"][:, 1:], particle_mask[:, None]], axis=1)
# create new node features and update graph
new_node_features = {
**input_graph.nodes,
"world_position": next_pos_seq,
"mask_stack": mask_stack,
"external_mask": ~particle_mask,
"deleted": graph.nodes["deleted"] | delete_particles,
}
return input_graph._replace(nodes=new_node_features)
def build_initial_graph(input_graphs, max_edges):
"""Builds initial padded graphs tuple from typed graph."""
obstacle_edges = np.stack(
[input_graphs[0].senders, input_graphs[0].receivers], axis=1)
graph = tree.map_structure(lambda x: x.copy(), input_graphs[0])
# clear graph edges
dummy_edge = np.zeros((0,), dtype=np.int32)
graph = graph._replace(
senders=dummy_edge,
receivers=dummy_edge,
n_edge=np.array([0], dtype=np.int32))
# build inflow stack
inflow_stack = []
init_pos = graph.nodes["world_position"]
for cur_graph in input_graphs:
mask_stack = cur_graph.nodes["mask_stack"]
cur_pos = cur_graph.nodes["world_position"]
new_particles = mask_stack[:, -1] & (~mask_stack[:, -2])
init_pos[new_particles] = cur_pos[new_particles]
new_particles = np.concatenate([new_particles, [False]])
inflow_stack.append(new_particles)
inflow_stack = np.stack(inflow_stack[1:], axis=0)
graph.nodes["world_position"] = init_pos
graph.nodes["deleted"] = np.zeros(init_pos.shape[0], dtype=np.bool)
# fix stray particles
stray_particles = init_pos[:, -1, 1] > OOB_AREA
graph.nodes["mask_stack"][stray_particles] = False
graph.nodes["external_mask"][stray_particles] = True
# pad to maximum node, edge values and add padding graph
max_n_node = graph.n_node.sum() + 1
graphs_tuple = jraph.pad_with_graphs(graph, n_node=max_n_node,
n_edge=max_edges, n_graph=2)
return obstacle_edges, inflow_stack, graphs_tuple
def rollout(initial_graph, inflow_stack, network, haiku_model, obstacle_edges,
radius):
"""Runs a jittable model rollout."""
@jax.checkpoint
def _step(graph, inflow_mask):
out_graph = forward(graph, inflow_mask, network, haiku_model,
obstacle_edges, radius)
out_data = dict(
pos=out_graph.nodes["world_position"][:, -1],
mask=out_graph.nodes["mask_stack"][:, -1])
return out_graph, out_data
final_graph, trajectory = jax.lax.scan(_step, init=initial_graph,
xs=inflow_stack)
return final_graph, trajectory
def make_plain_obstacles(num_side=25):
"""Create a mesh obstacle (landscape) with num_side squared control points."""
px, pz = np.meshgrid(
np.linspace(-0.5, 0.5, num_side), np.linspace(-0.5, 0.5, num_side))
trans = np.array([0.5, 0.5, 0.5])
# generate height map
py = np.zeros_like(px)
pos = np.stack([px, py, pz], axis=-1).reshape((-1, 3))
pos += trans[None]
return pos
def max_x_loss_fn(graph):
"""Example loss function for maximizing x position of particles when they hit the ground."""
z_pos = graph.nodes["world_position"][:, -1, 2]
z_var = jnp.std(z_pos, where=graph.nodes["deleted"])
x_pos = graph.nodes["world_position"][:, -1, 0]
x_max = jnp.mean(-x_pos, where=graph.nodes["deleted"])
return x_max + z_var
def smooth_loss_fn(obs_pos, num_side=25):
"""Smoothing loss function for minimizing sharp changes across obstacle."""
obs_grid = jnp.reshape(obs_pos, (num_side, num_side))
obs_dx = jnp.diff(obs_grid, axis=0) ** 2
obs_dy = jnp.diff(obs_grid, axis=1) ** 2
return 0.5 * (jnp.mean(obs_dx) + jnp.mean(obs_dy))
def design_fn(params, graph, height_scale=0.15):
"""Convert parameters in params into landscape heightfield to be represented in graph."""
graph = tree.map_structure(lambda x: x, graph)
init_pos = jnp.array(graph.nodes["world_position"])
# use tanh transformation to limit height to be within [-1, 1]
raw_obs_pos = jnp.tanh(params) * height_scale
# tile graph to have the same time history as the fluid particles
obs_pos = jnp.tile(raw_obs_pos[:, None], [1, init_pos.shape[1]])
# only controlling the height, so set other dimensions to 0
obs_pos = jnp.stack(
[jnp.zeros_like(obs_pos), obs_pos,
jnp.zeros_like(obs_pos)], axis=-1)
# add controlled height to initial heightfield and update graph nodes
pos = jnp.concatenate(
[init_pos[:obs_pos.shape[0]] + obs_pos, init_pos[obs_pos.shape[0]:]],
axis=0)
graph.nodes["world_position"] = pos
return graph, raw_obs_pos
| inverse_design-main | src/watercourse_env.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX module for normalization with accumulated statistics."""
import haiku as hk
import jax.numpy as jnp
import jraph
def get_accumulated_normalizer(name):
return AccumulatedNormalizer(name=name)
class AccumulatedNormalizer(hk.Module):
"""Feature normalizer that accumulates statistics for normalization.
It will accumulate statistics using float32 variables, and will return
the mean and std. It accumulates statistics until the accumulate method is
called `max_num_accumulations` times or the total number of batch elements
processed is below `max_example_count`.
To enable full GPU compatibility the number of accumulations is stored as a
float32. As this number is incremented one by one, we require
`max_num_accumulations` to be smaller than the highest float32 number that
maintains integer precision (16777216).
"""
def __init__(
self,
*,
std_epsilon: float = 1e-5,
name: str = 'accumulated_normalizer',
):
"""Inits the module.
Args:
std_epsilon: minimum value of the standard deviation to use.
name: Name of the module.
"""
super(AccumulatedNormalizer, self).__init__(name=name)
self._accumulator_shape = None
self._std_epsilon = std_epsilon
def __call__(self, batched_data):
"""Direct transformation of the normalizer."""
self._set_accumulator_shape(batched_data)
return (batched_data - self.mean) / self.std_with_epsilon
def inverse(self, normalized_batch_data):
"""Inverse transformation of the normalizer."""
self._set_accumulator_shape(normalized_batch_data)
return normalized_batch_data * self.std_with_epsilon + self.mean
def _set_accumulator_shape(self, batched_sample_data):
self._accumulator_shape = batched_sample_data.shape[-1]
def _verify_module_connected(self):
if self._accumulator_shape is None:
raise RuntimeError(
'Trying to read the mean before connecting the module.')
@property
def _acc_sum(self):
return hk.get_state(
'acc_sum', self._accumulator_shape, dtype=jnp.float32, init=jnp.zeros)
@property
def _acc_count(self):
return hk.get_state('acc_count', (), dtype=jnp.float32, init=jnp.zeros)
@property
def _acc_sum_squared(self):
return hk.get_state(
'acc_sum_squared',
self._accumulator_shape,
dtype=jnp.float32,
init=jnp.zeros)
@property
def _safe_count(self):
# To ensure count is at least one and avoid nan's.
return jnp.maximum(self._acc_count, 1.)
@property
def mean(self):
self._verify_module_connected()
return self._acc_sum / self._safe_count
@property
def std(self):
self._verify_module_connected()
var = self._acc_sum_squared / self._safe_count - self.mean**2
var = jnp.maximum(var, 0.) # Prevent negatives due to numerical precision.
return jnp.sqrt(var)
@property
def std_with_epsilon(self):
# To use in case the std is too small.
return jnp.maximum(self.std, self._std_epsilon)
class GraphElementsNormalizer(hk.Module):
"""Online normalization of individual graph components of a GraphsTuple.
Can be used to normalize individual node, edge, and global arrays.
"""
def __init__(self,
template_graph: jraph.GraphsTuple,
is_padded_graph: bool,
name: str = 'graph_elements_normalizer'):
"""Inits the module.
Args:
template_graph: Input template graph to compute edge/node/global padding
masks.
is_padded_graph: Whether the graph has padding.
name: Name of the Haiku module.
"""
super().__init__(name=name)
self._node_mask = None
self._edge_mask = None
self._graph_mask = None
if is_padded_graph:
self._node_mask = jraph.get_node_padding_mask(template_graph)
self._edge_mask = jraph.get_edge_padding_mask(template_graph)
self._graph_mask = jraph.get_graph_padding_mask(template_graph)
self._names_used = []
def _run_normalizer(self, name, array, mask):
if name in self._names_used:
raise ValueError(
f'Attempt to reuse name {name}. Used names: {self._names_used}')
self._names_used.append(name)
normalizer = get_accumulated_normalizer(name)
return normalizer(array)
def normalize_node_array(self, name, array):
return self._run_normalizer(name, array, self._node_mask)
| inverse_design-main | src/normalizers.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Graph Network Simulator implementation used in NeurIPS 2022 submission.
Inverse Design for Fluid-Structure Interactions using Graph Network Simulators
Kelsey R. Allen*, Tatiana Lopez-Guevera*, Kimberly Stachenfeld*,
Alvaro Sanchez-Gonzalez, Peter Battaglia, Jessica Hamrick, Tobias Pfaff
"""
from typing import Any, Dict
import haiku as hk
import jraph
from inverse_design.src import graph_network
from inverse_design.src import normalizers
class LearnedSimulator(hk.Module):
"""Graph Network Simulator."""
def __init__(self,
connectivity_radius,
*,
graph_network_kwargs: Dict[str, Any],
flatten_features_fn=None,
name="LearnedSimulator"):
"""Initialize the model.
Args:
connectivity_radius: Radius of connectivity within which to connect
particles with edges.
graph_network_kwargs: Keyword arguments to pass to the learned part of the
graph network `model.EncodeProcessDecode`.
flatten_features_fn: Function that takes the input graph and dataset
metadata, and returns a graph where node and edge features are a single
array of rank 2, and without global features. The function will be
wrapped in a haiku module, which allows the flattening fn to instantiate
its own variable normalizers.
name: Name of the Haiku module.
"""
super().__init__(name=name)
self._connectivity_radius = connectivity_radius
self._graph_network_kwargs = graph_network_kwargs
self._graph_network = None
# Wrap flatten function in a Haiku module, so any haiku modules created
# by the function are reused in case of multiple calls.
self._flatten_features_fn = hk.to_module(flatten_features_fn)(
name="flatten_features_fn")
def _maybe_build_modules(self, input_graph):
if self._graph_network is None:
num_dimensions = input_graph.nodes["world_position"].shape[-1]
self._graph_network = graph_network.EncodeProcessDecode(
name="encode_process_decode",
node_output_size=num_dimensions,
**self._graph_network_kwargs)
self._target_normalizer = normalizers.get_accumulated_normalizer(
name="target_normalizer")
def __call__(self, input_graph: jraph.GraphsTuple, padded_graph=True):
self._maybe_build_modules(input_graph)
flat_graphs_tuple = self._encoder_preprocessor(
input_graph, padded_graph=padded_graph)
normalized_prediction = self._graph_network(flat_graphs_tuple).nodes
next_position = self._decoder_postprocessor(normalized_prediction,
input_graph)
return input_graph._replace(
nodes={"p:world_position": next_position},
edges={},
globals={},
senders=input_graph.senders[:0],
receivers=input_graph.receivers[:0],
n_edge=input_graph.n_edge * 0), {}
def _encoder_preprocessor(self, input_graph, padded_graph):
# Flattens the input graph
graph_with_flat_features = self._flatten_features_fn(
input_graph,
connectivity_radius=self._connectivity_radius,
is_padded_graph=padded_graph)
return graph_with_flat_features
def _decoder_postprocessor(self, normalized_prediction, input_graph):
# Un-normalize and integrate
position_sequence = input_graph.nodes["world_position"]
# The model produces the output in normalized space so we apply inverse
# normalization.
prediction = self._target_normalizer.inverse(normalized_prediction)
new_position = euler_integrate_position(position_sequence, prediction)
return new_position
def euler_integrate_position(position_sequence, finite_diff_estimate):
"""Integrates finite difference estimate to position (assuming dt=1)."""
# Uses an Euler integrator to go from acceleration to position,
# assuming dt=1 corresponding to the size of the finite difference.
previous_position = position_sequence[:, -1]
previous_velocity = previous_position - position_sequence[:, -2]
next_acceleration = finite_diff_estimate
next_velocity = previous_velocity + next_acceleration
next_position = previous_position + next_velocity
return next_position
def euler_integrate_position_inverse(position_sequence, next_position):
"""Computes a finite difference estimate from current position and history."""
previous_position = position_sequence[:, -1]
previous_velocity = previous_position - position_sequence[:, -2]
next_velocity = next_position - previous_position
acceleration = next_velocity - previous_velocity
return acceleration
| inverse_design-main | src/learned_simulator.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to compute the connectivity of the graph."""
import functools
import jax
from jax.experimental import host_callback as hcb
import jax.numpy as jnp
import numpy as np
from sklearn import neighbors
def _cb_radius_query(args):
"""Host callback function to compute connectivity."""
padded_pos, n_node, radius, max_edges, query_mask, node_mask = args
edges = []
offset = 0
for num_nodes in n_node:
pos_nodes = padded_pos[offset:offset+num_nodes]
pos_query = padded_pos[offset:offset+num_nodes]
pos_nodes = pos_nodes[node_mask[offset:offset+num_nodes]]
pos_query = pos_query[query_mask[offset:offset+num_nodes]]
# indices: [num_edges, 2] array of receivers ([:, 0]) and senders ([:, 1])
indices = compute_fixed_radius_connectivity_np(pos_nodes, radius, pos_query)
mask = query_mask[offset:offset+num_nodes]
renumber = np.arange(num_nodes, dtype=np.int32)[mask]
indices[:, 0] = renumber[indices[:, 0]]
mask = node_mask[offset:offset+num_nodes]
renumber = np.arange(num_nodes, dtype=np.int32)[mask]
indices[:, 1] = renumber[indices[:, 1]]
# remove self-edges
mask = indices[:, 0] != indices[:, 1]
indices = indices[mask]
# create unique two way edges (only necessary in the masked case)
indices = np.stack([np.min(indices, axis=1),
np.max(indices, axis=1)],
axis=1)
indices = np.unique(indices, axis=0)
indices = np.concatenate([indices, indices[:, [1, 0]]], axis=0)
edges.append(indices + offset)
offset += num_nodes
n_edge = [x.shape[0] for x in edges]
total_edges = np.sum(n_edge)
# padding
if total_edges >= max_edges:
raise ValueError("%d edges found, max_edges: %d" % (total_edges, max_edges))
# create a [n_p, 2] padding array, which connects the first dummy padding node
# (with index `num_nodes`) to itself.
padding_size = max_edges - total_edges
padding = np.ones((padding_size, 2), dtype=np.int32) * offset
edges = np.concatenate(edges + [padding], axis=0)
n_edge = np.array(n_edge + [padding_size], dtype=np.int32)
return n_edge, edges
@functools.partial(jax.custom_jvp, nondiff_argnums=(4, 5))
def compute_fixed_radius_connectivity_jax(positions, n_node, query_mask,
node_mask, radius, max_edges):
"""Computes connectivity for batched graphs using a jax host callback.
Args:
positions: concatenated vector (N, 2) of node positions for all graphs
n_node: array of num_nodes for each graph
query_mask: defines the subset of nodes to query from (None=all)
node_mask: defines the subset of nodes to query to (None=all)
radius: connectivity radius
max_edges: maximum total number of edges
Returns:
array of num_edges, senders, receivers
"""
callback_arg = (positions, n_node, radius, max_edges, query_mask, node_mask)
out_shape = (jax.ShapeDtypeStruct((len(n_node) + 1,), jnp.int32),
jax.ShapeDtypeStruct((max_edges, 2), jnp.int32))
n_edge, indices = hcb.call(_cb_radius_query, callback_arg,
result_shape=out_shape)
senders = indices[:, 1]
receivers = indices[:, 0]
return n_edge, senders, receivers
@compute_fixed_radius_connectivity_jax.defjvp
def _compute_fixed_radius_connectivity_jax_jvp(radius, max_edges, primals,
tangents):
"""Custom zero-jvp function for compute_fixed_radius_connectivity_jax."""
del tangents
primal_out = compute_fixed_radius_connectivity_jax(
*primals, radius=radius, max_edges=max_edges)
grad_out = tuple(jnp.zeros_like(x) for x in primal_out)
return primal_out, grad_out
def compute_fixed_radius_connectivity_np(
positions, radius, receiver_positions=None, remove_self_edges=False):
"""Computes connectivity between positions and receiver_positions."""
# if removing self edges, receiver positions must be none
assert not (remove_self_edges and receiver_positions is not None)
if receiver_positions is None:
receiver_positions = positions
# use kdtree for efficient calculation of pairs within radius distance
kd_tree = neighbors.KDTree(positions)
receivers_list = kd_tree.query_radius(receiver_positions, r=radius)
num_nodes = len(receiver_positions)
senders = np.repeat(range(num_nodes), [len(a) for a in receivers_list])
receivers = np.concatenate(receivers_list, axis=0)
if remove_self_edges:
# Remove self edges.
mask = senders != receivers
senders = senders[mask]
receivers = receivers[mask]
return np.stack([senders.astype(np.int32),
receivers.astype(np.int32)],
axis=-1)
| inverse_design-main | src/connectivity_utils.py |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""JAX implementation of Encode Process Decode."""
from typing import Optional
import haiku as hk
import jax
import jax.numpy as jnp
import jraph
class EncodeProcessDecode(hk.Module):
"""Encode-Process-Decode function approximator for learnable simulator."""
def __init__(
self,
*,
latent_size: int,
mlp_hidden_size: int,
mlp_num_hidden_layers: int,
num_message_passing_steps: int,
num_processor_repetitions: int = 1,
encode_nodes: bool = True,
encode_edges: bool = True,
node_output_size: Optional[int] = None,
edge_output_size: Optional[int] = None,
include_sent_messages_in_node_update: bool = False,
use_layer_norm: bool = True,
name: str = "EncodeProcessDecode"):
"""Inits the model.
Args:
latent_size: Size of the node and edge latent representations.
mlp_hidden_size: Hidden layer size for all MLPs.
mlp_num_hidden_layers: Number of hidden layers in all MLPs.
num_message_passing_steps: Number of unshared message passing steps
in the processor steps.
num_processor_repetitions: Number of times that the same processor is
applied sequencially.
encode_nodes: If False, the node encoder will be omitted.
encode_edges: If False, the edge encoder will be omitted.
node_output_size: Output size of the decoded node representations.
edge_output_size: Output size of the decoded edge representations.
include_sent_messages_in_node_update: Whether to include pooled sent
messages from each node in the node update.
use_layer_norm: Whether it uses layer norm or not.
name: Name of the model.
"""
super().__init__(name=name)
self._latent_size = latent_size
self._mlp_hidden_size = mlp_hidden_size
self._mlp_num_hidden_layers = mlp_num_hidden_layers
self._num_message_passing_steps = num_message_passing_steps
self._num_processor_repetitions = num_processor_repetitions
self._encode_nodes = encode_nodes
self._encode_edges = encode_edges
self._node_output_size = node_output_size
self._edge_output_size = edge_output_size
self._include_sent_messages_in_node_update = (
include_sent_messages_in_node_update)
self._use_layer_norm = use_layer_norm
self._networks_builder()
def __call__(self, input_graph: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Forward pass of the learnable dynamics model."""
# Encode the input_graph.
latent_graph_0 = self._encode(input_graph)
# Do `m` message passing steps in the latent graphs.
latent_graph_m = self._process(latent_graph_0)
# Decode from the last latent graph.
return self._decode(latent_graph_m)
def _networks_builder(self):
def build_mlp(name, output_size=None):
if output_size is None:
output_size = self._latent_size
mlp = hk.nets.MLP(
output_sizes=[self._mlp_hidden_size] * self._mlp_num_hidden_layers + [
output_size], name=name + "_mlp", activation=jax.nn.relu)
return jraph.concatenated_args(mlp)
def build_mlp_with_maybe_layer_norm(name, output_size=None):
network = build_mlp(name, output_size)
if self._use_layer_norm:
layer_norm = hk.LayerNorm(
axis=-1, create_scale=True, create_offset=True,
name=name + "_layer_norm")
network = hk.Sequential([network, layer_norm])
return jraph.concatenated_args(network)
# The encoder graph network independently encodes edge and node features.
encoder_kwargs = dict(
embed_edge_fn=build_mlp_with_maybe_layer_norm("encoder_edges")
if self._encode_edges else None,
embed_node_fn=build_mlp_with_maybe_layer_norm("encoder_nodes")
if self._encode_nodes else None,)
self._encoder_network = jraph.GraphMapFeatures(**encoder_kwargs)
# Create `num_message_passing_steps` graph networks with unshared parameters
# that update the node and edge latent features.
# Note that we can use `modules.InteractionNetwork` because
# it also outputs the messages as updated edge latent features.
self._processor_networks = []
for step_i in range(self._num_message_passing_steps):
self._processor_networks.append(
jraph.InteractionNetwork(
update_edge_fn=build_mlp_with_maybe_layer_norm(
f"processor_edges_{step_i}"),
update_node_fn=build_mlp_with_maybe_layer_norm(
f"processor_nodes_{step_i}"),
include_sent_messages_in_node_update=(
self._include_sent_messages_in_node_update)))
# The decoder MLP decodes edge/node latent features into the output sizes.
decoder_kwargs = dict(
embed_edge_fn=build_mlp("decoder_edges", self._edge_output_size)
if self._edge_output_size else None,
embed_node_fn=build_mlp("decoder_nodes", self._node_output_size)
if self._node_output_size else None,
)
self._decoder_network = jraph.GraphMapFeatures(**decoder_kwargs)
def _encode(
self, input_graph: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Encodes the input graph features into a latent graph."""
# Copy the globals to all of the nodes, if applicable.
if input_graph.globals is not None:
broadcasted_globals = jnp.repeat(
input_graph.globals, input_graph.n_node, axis=0,
total_repeat_length=input_graph.nodes.shape[0])
input_graph = input_graph._replace(
nodes=jnp.concatenate(
[input_graph.nodes, broadcasted_globals], axis=-1),
globals=None)
# Encode the node and edge features.
latent_graph_0 = self._encoder_network(input_graph)
return latent_graph_0
def _process(
self, latent_graph_0: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Processes the latent graph with several steps of message passing."""
# Do `num_message_passing_steps` with each of the `self._processor_networks`
# with unshared weights, and repeat that `self._num_processor_repetitions`
# times.
latent_graph = latent_graph_0
for unused_repetition_i in range(self._num_processor_repetitions):
for processor_network in self._processor_networks:
latent_graph = self._process_step(processor_network, latent_graph,
latent_graph_0)
return latent_graph
def _process_step(
self, processor_network_k,
latent_graph_prev_k: jraph.GraphsTuple,
latent_graph_0: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Single step of message passing with node/edge residual connections."""
input_graph_k = latent_graph_prev_k
# One step of message passing.
latent_graph_k = processor_network_k(input_graph_k)
# Add residuals.
latent_graph_k = latent_graph_k._replace(
nodes=latent_graph_k.nodes+latent_graph_prev_k.nodes,
edges=latent_graph_k.edges+latent_graph_prev_k.edges)
return latent_graph_k
def _decode(self, latent_graph: jraph.GraphsTuple) -> jraph.GraphsTuple:
"""Decodes from the latent graph."""
return self._decoder_network(latent_graph)
| inverse_design-main | src/graph_network.py |
"""
Create and populate a minimal PostgreSQL schema for full text search
"""
import sqlite3
import glob
import os
import re
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--output", type=str, help="Path to write SQLite3 search index")
parser.add_argument("--debug", type=bool, help="Debug mode")
parser.add_argument('input', type=str, help="Path to input directory of Markdown files")
args = parser.parse_args()
DB = sqlite3.connect(database=args.output)
DB.text_factory = str
path = args.input
def debug(msg):
if args.debug:
print(msg)
srcLinkPattern = re.compile('<a class="entityLink".*</a>')
def makeSearchText(section):
return buffer(re.sub(srcLinkPattern, "", section))
def sections(path):
pattern = re.compile('<a (name|id)="(.*)"></a>')
for packageName in os.listdir(path):
for filePath in glob.glob(os.path.join(path, packageName, "*.md")):
debug("Indexing " + filePath)
with open(filePath, 'r') as f:
section = ""
tag = packageName + "." + os.path.basename(filePath) + ".dok"
for line in f.readlines():
result = pattern.match(line)
if result:
section = makeSearchText(section)
yield packageName, tag, section
tag = result.group(2)
section = ""
else:
section += line
section = makeSearchText(section)
yield packageName, tag, section
def load_db():
"""Add sample data to the database"""
ins = """INSERT INTO fulltext_search(package, tag, doc) VALUES(?, ?, ?);"""
for (packageName, tag, section) in sections(path):
DB.execute(ins, (packageName, tag, section))
DB.commit()
def init_db():
"""Initialize our database"""
DB.execute("DROP TABLE IF EXISTS fulltext_search")
DB.execute("""CREATE VIRTUAL TABLE fulltext_search USING fts4(
id SERIAL,
package TEXT,
tag TEXT,
doc TEXT,
tokenize=porter
);""")
if __name__ == "__main__":
init_db()
load_db()
DB.close()
| torch-dokx-master | dokx-search/dokx-build-search-index.py |
# Based on http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
# By Sander Marechal (public domain)
import sys, os, time, atexit
from signal import SIGTERM
class Daemon:
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile,'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
| torch-dokx-master | dokx-search/dokxDaemon.py |
#!/usr/bin/env python2.7
"""
Simple search UI for SQLite3 full-text search
"""
import json
import flask
import os
import sys
import urllib
from jinja2 import Environment, FileSystemLoader
import argparse
from dokxDaemon import Daemon
parser = argparse.ArgumentParser()
parser.add_argument("command", type=str, help="start|restart|stop")
parser.add_argument("--docs", type=str, default=None, help="Path to HTML docs")
parser.add_argument("--debug", type=bool, default=False, help="Debug mode")
args = parser.parse_args()
JSON_HOST = "http://localhost:8130" # Where the restserv service is running
PORT = 5000
env = Environment(loader=FileSystemLoader(searchpath="%s/templates" % os.path.dirname((os.path.realpath(__file__)))), trim_blocks=True)
app = flask.Flask(__name__, static_folder=args.docs, static_url_path='')
@app.route("/")
def root():
return app.send_static_file("index.html")
@app.route("/search")
def search():
"""Simple search for terms, with optional limit and paging"""
query = flask.request.args.get('query', '')
if not query:
template = env.get_template('index.html')
return template.render()
page = flask.request.args.get('page', '')
jsonu = u"%s/search/%s/" % (JSON_HOST, urllib.quote_plus(query.encode('utf-8')))
if page:
jsonu = u"%s%d" % (jsonu, int(page))
res = json.loads(urllib.urlopen(jsonu).read().decode('utf-8'))
template = env.get_template('results.html')
return(template.render(
terms=res['query'].replace('+', ' '),
results=res,
request=flask.request
))
class WebDaemon(Daemon):
def run(self):
app.run(port=PORT)
if __name__ == "__main__":
app.debug = args.debug
pidFile = sys.argv[0] + ".pid"
daemon = WebDaemon(pidFile)
if args.command == 'start':
daemon.start()
elif args.command == 'restart':
daemon.restart()
elif args.command == 'stop':
daemon.stop()
| torch-dokx-master | dokx-search/web/dokx-service-web.py |
#!/usr/bin/env python2.7
"""
Serve up search results as JSON via REST requests
Provide JSON results including the ID and the search snippet for given search
requests.
Ultimately needs to support advanced search as well, including NOT operators
and wildcards.
"""
import json
import sqlite3
import flask
import urllib
import argparse
import os
import sys
from dokxDaemon import Daemon
parser = argparse.ArgumentParser()
parser.add_argument("command", type=str, help="start|restart|stop")
parser.add_argument("--database", type=str, default=None, help="Path to SQLite3 search index")
parser.add_argument("--debug", type=bool, default=False, help="Debug mode")
args = parser.parse_args()
# Port on which JSON should be served up
PORT = 8130
app = flask.Flask(__name__)
@app.route("/search/<query>/")
@app.route("/search/<query>/<int:page>")
@app.route("/search/<query>/<int:page>/<int:limit>")
def search(query, page=0, limit=10):
"""Return JSON formatted search results, including snippets and facets"""
query = urllib.unquote(query)
results = __get_ranked_results(query, limit, page)
count = __get_result_count(query)
resj = json.dumps({
'query': query,
'results': results,
'meta': {
'total': count,
'page': page,
'limit': limit,
'results': len(results)
}
})
return flask.Response(response=str(resj), mimetype='application/json')
def __get_ranked_results(query, limit, page):
"""Simple search for terms, with optional limit and paging"""
DB = sqlite3.connect(args.database)
sql = """
SELECT id, package, tag, doc, snippet(fulltext_search, "<b>", "</b>", "<b>...</b>", -1, -40) AS rank
FROM fulltext_search
WHERE fulltext_search MATCH ?
ORDER BY rank DESC
LIMIT ? OFFSET ?
"""
cur = DB.execute(sql, (query, limit, page*limit))
results = []
for row in cur:
results.append({
'id': row[0],
'package' : row[1],
'tag' : row[2],
'snippets': [row[4]]
})
return results
def __get_result_count(query):
"""Gather count of matching results"""
DB = sqlite3.connect(args.database)
sql = """
SELECT COUNT(*) AS rescnt
FROM fulltext_search
WHERE fulltext_search MATCH ?
"""
cur = DB.execute(sql, (query,))
count = cur.fetchone()
return count[0]
class RestDaemon(Daemon):
def run(self):
app.run(port=PORT)
if __name__ == "__main__":
app.debug = args.debug
pidFile = sys.argv[0] + ".pid"
daemon = RestDaemon(pidFile)
if args.command == 'start':
daemon.start()
elif args.command == 'restart':
daemon.restart()
elif args.command == 'stop':
daemon.stop()
| torch-dokx-master | dokx-search/rest/dokx-service-rest.py |
# Copyright 2023 DeepMind Technologies Limited
# Copyright 2023 Massachusetts Institute of Technology (M.I.T.)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pytorch_lightning.utilities import migration
import torch
data_directory = os.environ['BOP_DATA_DIR']
checkpoint_path = os.path.join(
data_directory, 'models', 'ycbv-jwpvdij1.compact.ckpt'
)
with migration.pl_legacy_patch():
checkpoint = torch.load(checkpoint_path)
checkpoint['pytorch-lightning_version'] = '1.6.5'
migration.migrate_checkpoint(checkpoint)
torch.save(checkpoint, checkpoint_path)
| threednel-main | upgrade_checkpoint.py |
# Copyright 2023 DeepMind Technologies Limited
# Copyright 2023 Massachusetts Institute of Technology (M.I.T.)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
NAME = 'threednel'
VERSION = '0.0.1'
if __name__ == '__main__':
setuptools.setup(
name=NAME,
version=VERSION,
packages=setuptools.find_namespace_packages(include=['threednel.*']),
)
| threednel-main | setup.py |
"""Script to reproduce the 6D object pose estimation results on YCB-V."""
# Copyright 2023 DeepMind Technologies Limited
# Copyright 2023 Massachusetts Institute of Technology (M.I.T.)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import os
import jax
import joblib
import numpy as np
import taichi as ti
from absl import app, flags
from threednel.bop.data import BOPTestDataset, RGBDImage
from threednel.bop.detector import Detector
from threednel.bop.results import generate_csv
from threednel.third_party.surfemb.utils import timer
from tqdm import tqdm
_EXPERIMENT_NAME = flags.DEFINE_string(
'experiment_name', None, 'Name of the experiment.'
)
_SCALE_FACTOR = flags.DEFINE_float(
'scale_factor', 0.25, 'Scale factor to run detection on.'
)
_FILL_IN_DEPTH = flags.DEFINE_boolean(
'fill_in_depth', True, 'Whether to fill in missing depth.'
)
_N_PASSES_POSE_HYPOTHESES = flags.DEFINE_integer(
'n_passes_pose_hypotheses',
1,
'Number of passes to propose to pose hypotheses.',
)
_N_PASSES_ICP = flags.DEFINE_integer(
'n_passes_icp', 1, 'Number of passes to propose to ICP poses.'
)
_N_PASSES_FINETUNE = flags.DEFINE_integer(
'n_passes_finetune', 1, 'Number of passes to do finetuning.'
)
_USE_CROPS = flags.DEFINE_boolean(
'use_crops', True, 'Whether to use crop in generating pose hypotheses.'
)
def main(_):
ti.init(arch=ti.cuda)
experiment_name = _EXPERIMENT_NAME.value
data_directory = os.environ['BOP_DATA_DIR']
data = BOPTestDataset(
data_directory=data_directory,
load_detector_crops=True,
)
detector = Detector(
data_directory=data_directory,
n_passes_pose_hypotheses=_N_PASSES_POSE_HYPOTHESES.value,
n_passes_icp=_N_PASSES_ICP.value,
n_passes_finetune=_N_PASSES_FINETUNE.value,
use_crops=_USE_CROPS.value,
)
flag_values = jax.tree_util.tree_map(
lambda x: str(x),
dict(
experiment_name=_EXPERIMENT_NAME.value,
scale_factor=_SCALE_FACTOR.value,
fill_in_depth=_FILL_IN_DEPTH.value,
n_passes_pose_hypotheses=_N_PASSES_POSE_HYPOTHESES.value,
n_passes_icp=_N_PASSES_ICP.value,
n_passes_finetune=_N_PASSES_FINETUNE.value,
use_crops=_USE_CROPS.value,
),
)
results_hash = hashlib.md5(json.dumps(flag_values).encode('utf-8')).hexdigest()
results_directory = os.path.join(
data_directory,
'results',
f'pose_estimates_{experiment_name}_{results_hash}',
)
print(f'Working on results directory {results_directory}.')
if not os.path.exists(results_directory):
os.makedirs(results_directory)
joblib.dump(flag_values, os.path.join(results_directory, 'flags.joblib'))
with open(os.path.join(results_directory, 'flags.json'), 'w') as f:
json.dump(flag_values, f)
for scene_id in np.sort(np.unique(data.test_targets['scene_id'])):
test_scene = data[scene_id]
for img_id in test_scene.img_indices:
print(f'Working on scene {scene_id}, image {img_id}.')
results_fname = os.path.join(
results_directory,
f'scene_{scene_id}_img_{img_id}.joblib',
)
if os.path.exists(results_fname):
continue
bop_img = test_scene[img_id]
test_img = RGBDImage(
rgb=bop_img.rgb,
depth=bop_img.depth,
intrinsics=bop_img.intrinsics,
bop_obj_indices=np.array(bop_img.bop_obj_indices),
fill_in_depth=_FILL_IN_DEPTH.value,
max_depth=1260.0,
annotations=bop_img.annotations,
)
with timer(f'Inference for scene {scene_id}, image {img_id}'):
detection_results = detector.detect(
img=test_img,
key=jax.random.PRNGKey(np.random.randint(0, 100000)),
scale_factor=_SCALE_FACTOR.value,
)
joblib.dump(
dict(
bop_obj_indices=test_img.bop_obj_indices,
gt_poses=bop_img.get_gt_poses(),
initial_poses=detection_results.initial_poses,
inferred_poses=detection_results.inferred_poses,
),
results_fname,
)
predictions = []
voting_predictions = []
for scene_id in np.sort(np.unique(data.test_targets['scene_id'])):
print(f'Working on scene {scene_id}.')
test_scene = data[scene_id]
for img_id in tqdm(test_scene.img_indices):
results_fname = os.path.join(
results_directory,
f'scene_{scene_id}_img_{img_id}.joblib',
)
if not os.path.exists(results_fname):
continue
with open(results_fname, 'rb') as f:
results = joblib.load(f)
for obj_idx, bop_obj_idx in enumerate(results['bop_obj_indices']):
predictions.append(
dict(
scene_id=scene_id,
im_id=img_id,
obj_id=bop_obj_idx,
score=-1,
R=results['inferred_poses'][obj_idx][:3, :3],
t=results['inferred_poses'][obj_idx][:3, -1],
time=-1,
)
)
voting_predictions.append(
dict(
scene_id=scene_id,
im_id=img_id,
obj_id=bop_obj_idx,
score=-1,
R=results['initial_poses'][obj_idx][:3, :3],
t=results['initial_poses'][obj_idx][:3, -1],
time=-1,
)
)
generate_csv(
predictions,
os.path.join(
results_directory,
(
f'ycbv-threednel-{str(experiment_name).replace("_", "-")}-{results_hash}_ycbv-test.csv'
),
),
)
generate_csv(
voting_predictions,
os.path.join(
results_directory,
(
f'ycbv-voting-{str(experiment_name).replace("_", "-")}-{results_hash}_ycbv-test.csv'
),
),
)
if __name__ == '__main__':
app.run(main)
| threednel-main | scripts/pose_estimation.py |
"""Module containing ICP-related utils."""
# Copyright 2023 DeepMind Technologies Limited
# Copyright 2023 Massachusetts Institute of Technology (M.I.T.)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Callable
import numba
import numpy as np
from scipy.spatial import KDTree
def apply_transform(coords: np.array, transform: np.ndarray) -> np.ndarray:
"""Apply transformation matrix to coordinates.
Args:
coords: Array of shape (..., 3)
transform: Array of shape (4, 4)
Returns:
np.ndarray: Array of shape (..., 3), transformed coordinates.
"""
coords = np.einsum(
'ij,...j->...i',
transform,
np.concatenate([coords, np.ones(coords.shape[:-1] + (1,))], axis=-1),
)[..., :-1]
return coords
@numba.jit(nopython=True, cache=True, nogil=True)
def get_transform_from_cloud_to_target(
A: np.ndarray, B: np.ndarray
) -> np.ndarray:
"""Estimate the best rigid transform to transform a point cloud A to a point cloud B.
Args:
A: A source point cloud.
B: A target point cloud.
Returns:
The estimated rigid transform.
"""
assert A.shape == B.shape
# find mean column wise
centroid_A = np.array(
[[np.mean(A[:, 0]), np.mean(A[:, 1]), np.mean(A[:, 2])]]
)
centroid_B = np.array(
[[np.mean(B[:, 0]), np.mean(B[:, 1]), np.mean(B[:, 2])]]
)
# subtract mean
Am = A - centroid_A
Bm = B - centroid_B
H = np.transpose(Am) @ Bm
# find rotation
U, S, Vt = np.linalg.svd(H)
R = Vt.T @ U.T
# special reflection case
if np.linalg.det(R) < 0:
Vt[2, :] *= -1
R = Vt.T @ U.T
t = -R @ np.transpose(centroid_A) + np.transpose(centroid_B)
transform = np.eye(4)
transform[:3, :3] = R
transform[:3, 3] = t.ravel()
return transform
@dataclass
class ICP:
"""Class implementing iterative closest point (ICP).
target_cloud: Array of shape (n_target_points, 3). Target point cloud.
render: function.
Args:
pose: Array of shape (4, 4). Pose of the object.
obj_idx: object index in the scene.
Returns:
cloud: Array of shape (n_points, 3). Rendered point cloud of the
object.
model_mask: Array of shape (H, W). Model mask.
n_outer_iterations: Number of outer iterations.
We call the renderer once in each outer iteration.
n_inner_iterations: Number of inner iterations.
We update the rendered cloud instead of call the renderer in each inner
iteration.
"""
target_cloud: np.ndarray
render: Callable
n_outer_iterations: int = 5
n_inner_iterations: int = 2
def __post_init__(self):
self.target_cloud = np.array(self.target_cloud)
self.target_tree = KDTree(self.target_cloud)
def _update_pose_and_cloud(self, pose: np.ndarray, cloud: np.ndarray):
"""Locally update object pose and point cloud without re-rendering."""
_, idxs = self.target_tree.query(cloud)
target_neighbors = self.target_cloud[idxs, :]
transform = get_transform_from_cloud_to_target(
cloud.astype(np.float32), target_neighbors.astype(np.float32)
)
pose = transform.dot(pose)
cloud = apply_transform(cloud, transform)
return pose, cloud
def _update_pose(self, pose: np.ndarray, obj_idx: int):
"""Render and run ICP to update object pose."""
cloud = self.render(pose, obj_idx)[0]
for _ in range(self.n_inner_iterations):
pose, cloud = self._update_pose_and_cloud(pose, cloud)
return pose
def fit(self, pose: np.ndarray, obj_idx: int):
"""Function to run ICP."""
for _ in range(self.n_outer_iterations):
pose = self._update_pose(pose, obj_idx)
return pose
| threednel-main | threednel/icp.py |
"""Module implementing 3DNEL evaluation."""
# Copyright 2023 DeepMind Technologies Limited
# Copyright 2023 Massachusetts Institute of Technology (M.I.T.)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import os
from dataclasses import dataclass
from typing import Tuple
import jax
import jax.dlpack
import jax.numpy as jnp
import numpy as np
import torch
from jax.scipy.special import logsumexp
from threednel.bop.data import RGBDImage
from threednel.third_party.surfemb.surface_embedding import \
SurfaceEmbeddingModel
@dataclass
class JAXNDL:
"""JAX-based implementation of 3DNEL evaluation."""
model: SurfaceEmbeddingModel
n_objs: int
r: float
outlier_prob: float
outlier_volume: float
outlier_scaling: float
filter_shape: Tuple[int, int]
data_directory: str = os.environ['BOP_DATA_DIR']
def __post_init__(self):
self.p_background = self.outlier_prob / self.outlier_volume * self.outlier_scaling
self.indices = list(range(self.n_objs))
@functools.partial(
jnp.vectorize,
signature='(h,w,l),(h,w,m),(h,w)->()',
excluded=(3, 4, 5, 6),
)
def ndl_from_rendered_data(
model_xyz: jnp.ndarray,
key_embeddings: jnp.ndarray,
obj_ids: jnp.ndarray,
data_xyz: jnp.ndarray,
query_embeddings: jnp.ndarray,
log_normalizers: jnp.ndarray,
data_mask: jnp.ndarray,
):
model_mask = obj_ids >= 0
p_foreground = (1.0 - self.outlier_prob) / model_mask.sum()
log_prob = neural_embedding_likelihood(
data_xyz=data_xyz,
query_embeddings=query_embeddings,
log_normalizers=log_normalizers,
model_xyz=model_xyz,
key_embeddings=key_embeddings,
model_mask=model_mask,
obj_ids=obj_ids,
data_mask=data_mask,
r=self.r,
p_background=self.p_background,
p_foreground=p_foreground,
filter_shape=self.filter_shape,
)
return log_prob
self.ndl_from_rendered_data = jax.jit(ndl_from_rendered_data)
def set_for_new_img(
self,
img: RGBDImage,
query_embeddings: np.ndarray,
log_normalizers: np.ndarray,
data_mask: np.ndarray,
scale_factor: float,
):
"""Image-specific updates to allow 3DNEL evaluations on a new image."""
self.initialized = True
assert len(img.bop_obj_indices) == self.n_objs
self.renderer = img.get_renderer(self.data_directory, scale_factor)
shape = (self.renderer.height, self.renderer.width)
data_xyz = img.unproject()
self.data_xyz = jax.image.resize(
data_xyz,
shape=shape + data_xyz.shape[2:],
method='nearest',
)
self.query_embeddings = jax.image.resize(
query_embeddings,
shape=shape + query_embeddings.shape[2:],
method='nearest',
)
self.log_normalizers = jax.image.resize(
log_normalizers,
shape=shape + log_normalizers.shape[2:],
method='nearest',
)
self.data_mask = jax.image.resize(data_mask, shape=shape, method='nearest')
self.bop_obj_indices = jax.device_put(np.array(img.bop_obj_indices))
def compute_likelihood(
self,
poses: np.ndarray,
):
"""Wrapper function to compute likelihoods of a given set of poses.
Args:
poses: Array of shape (n_objs, n_particles, 4, 4) The set of pose
hypotheses we are going to score
Returns:
The likelihoods of the given set of poses.
"""
assert self.initialized
rendered_data = self.renderer.render(poses, self.indices)
key_embeddings = torch.zeros(
rendered_data.obj_coords.shape[:-1] + (self.model.emb_dim,),
device=self.model.device,
)
obj_coords_torch = torch.from_dlpack(jax.dlpack.to_dlpack(rendered_data.obj_coords))
obj_ids_torch = torch.from_dlpack(jax.dlpack.to_dlpack(rendered_data.obj_ids))
for ii in range(len(self.bop_obj_indices)):
mask = obj_ids_torch == ii
key_embeddings[mask] = self.model.infer_mlp(
obj_coords_torch[mask], int(self.bop_obj_indices[ii])
)
key_embeddings = jax.dlpack.from_dlpack(torch.to_dlpack(key_embeddings))
log_prob = self.ndl_from_rendered_data(
rendered_data.model_xyz,
key_embeddings,
rendered_data.obj_ids,
self.data_xyz,
self.query_embeddings,
self.log_normalizers,
self.data_mask,
)
return log_prob
@functools.partial(jax.jit, static_argnames='filter_shape')
def neural_embedding_likelihood(
data_xyz: jnp.ndarray,
query_embeddings: jnp.ndarray,
log_normalizers: jnp.ndarray,
model_xyz: jnp.ndarray,
key_embeddings: jnp.ndarray,
model_mask: jnp.ndarray,
obj_ids: jnp.ndarray,
data_mask: jnp.ndarray,
r: float,
p_background: float,
p_foreground: float,
filter_shape: Tuple[int, int],
) -> jnp.ndarray:
"""Function implementing 3DNEL evalaution.
Args:
data_xyz: Array of shape (H, W, 3). Observed point cloud organized as an
image.
query_embeddings: Array of shape (H, W, n_objs, d). Query embeddings for
each observed pixel using models from different objects.
log_normalizers: Array of shape (H, W, n_objs). The log normalizers for
each pixel given each object model
model_xyz: Array of shape (H, W, 3). Rendered point cloud organized as an
image.
key_embeddings: Array of shape (H, W, d). Key embeddings organized as an
image.
model_mask: Array of shape (H, W). Mask indicating relevant pixels from
rendering.
obj_ids: Array of shape (H, W). The object id of each pixel.
data_mask: Array of shape (H, W). Mask indicating the relevant set of
pixels.
r: Radius of the ball.
p_background: background probability.
p_foreground: foreground probability.
filter_shape: used to restrict likelihood evaluation to a 2D neighborhood.
Returns:
The likelihood as evaluated using 3DNEL.
"""
obj_ids = jnp.round(obj_ids).astype(jnp.int32)
padding = [
(filter_shape[ii] // 2, filter_shape[ii] - filter_shape[ii] // 2 - 1)
for ii in range(len(filter_shape))
]
model_xyz_padded = jnp.pad(model_xyz, pad_width=padding + [(0, 0)])
key_embeddings_padded = jnp.pad(key_embeddings, pad_width=padding + [(0, 0)])
model_mask_padded = jnp.pad(model_mask, pad_width=padding)
obj_ids_padded = jnp.pad(obj_ids, pad_width=padding)
@functools.partial(
jnp.vectorize,
signature='(m),(n),(o,d),(o)->()',
)
def log_likelihood_for_pixel(
ij: jnp.ndarray,
data_xyz_for_pixel: jnp.ndarray,
query_embeddings_for_pixel: jnp.ndarray,
log_normalizers_for_pixel: jnp.ndarray,
):
"""Function to evaluate the log-likelihood for a given pixel.
Args:
ij: Array of shape (2,). The i, j index of the pixel.
data_xyz_for_pixel: The camera frame coordinate of the point at the pixel.
query_embeddings_for_pixel: The query embeddings at the pixel.
log_normalizers_for_pixel: The log_normalizers at the pixel.
Returns:
The log likelihood for the given pixel.
"""
model_xyz_patch = jax.lax.dynamic_slice(
model_xyz_padded,
jnp.array([ij[0], ij[1], 0]),
(filter_shape[0], filter_shape[1], 3),
)
key_embeddings_patch = jax.lax.dynamic_slice(
key_embeddings_padded,
jnp.array([ij[0], ij[1], 0]),
(filter_shape[0], filter_shape[1], key_embeddings.shape[-1]),
)
model_mask_patch = jax.lax.dynamic_slice(model_mask_padded, ij, filter_shape)
obj_ids_patch = jax.lax.dynamic_slice(obj_ids_padded, ij, filter_shape)
log_prob_correspondence = (
jnp.sum(
query_embeddings_for_pixel[obj_ids_patch] * key_embeddings_patch,
axis=-1,
)
- log_normalizers_for_pixel[obj_ids_patch]
).ravel()
distance = jnp.linalg.norm(data_xyz_for_pixel - model_xyz_patch, axis=-1).ravel()
a = jnp.concatenate([jnp.zeros(1), log_prob_correspondence])
b = jnp.concatenate([
jnp.array([p_background]),
jnp.where(
jnp.logical_and(distance <= r, model_mask_patch.ravel() > 0),
3 * p_foreground / (4 * jnp.pi * r**3),
0.0,
),
])
log_mixture_prob = logsumexp(a=a, b=b)
return log_mixture_prob
log_mixture_prob = log_likelihood_for_pixel(
jnp.moveaxis(jnp.mgrid[: data_xyz.shape[0], : data_xyz.shape[1]], 0, -1),
data_xyz,
query_embeddings,
log_normalizers,
)
return jnp.sum(jnp.where(data_mask, log_mixture_prob, 0.0))
| threednel-main | threednel/ndl.py |
# Copyright 2023 DeepMind Technologies Limited
# Copyright 2023 Massachusetts Institute of Technology (M.I.T.)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| threednel-main | threednel/__init__.py |
"""Module containing distribution-related utilities."""
# Copyright 2023 DeepMind Technologies Limited
# Copyright 2023 Massachusetts Institute of Technology (M.I.T.)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
def quaternion_to_rotation_matrix(Q: jnp.ndarray) -> jnp.ndarray:
"""Covert a quaternion into a full three-dimensional rotation matrix.
Args:
Q: A 4 element array representing the quaternion (q0,q1,q2,q3)
Returns:
A 3x3 element matrix representing the full 3D rotation matrix.
This rotation matrix converts a point in the local reference
frame to a point in the global reference frame.
"""
# Extract the values from Q
q0 = Q[0]
q1 = Q[1]
q2 = Q[2]
q3 = Q[3]
# First row of the rotation matrix
r00 = 2 * (q0 * q0 + q1 * q1) - 1
r01 = 2 * (q1 * q2 - q0 * q3)
r02 = 2 * (q1 * q3 + q0 * q2)
# Second row of the rotation matrix
r10 = 2 * (q1 * q2 + q0 * q3)
r11 = 2 * (q0 * q0 + q2 * q2) - 1
r12 = 2 * (q2 * q3 - q0 * q1)
# Third row of the rotation matrix
r20 = 2 * (q1 * q3 - q0 * q2)
r21 = 2 * (q2 * q3 + q0 * q1)
r22 = 2 * (q0 * q0 + q3 * q3) - 1
# 3x3 rotation matrix
rot_matrix = jnp.array([[r00, r01, r02], [r10, r11, r12], [r20, r21, r22]])
return rot_matrix
def gaussian_vmf(
key: jnp.ndarray, var: jnp.ndarray, concentration: float
) -> jnp.ndarray:
"""Function to sample from the a GaussianVMF distribution."""
translation = tfp.distributions.MultivariateNormalFullCovariance(
loc=jnp.zeros(3), covariance_matrix=jnp.diag(var)
).sample(seed=key)
quat = tfp.distributions.VonMisesFisher(
jnp.array([1.0, 0.0, 0.0, 0.0]), concentration
).sample(seed=key)
rot_matrix = quaternion_to_rotation_matrix(quat)
return jnp.vstack([
jnp.hstack([rot_matrix, translation.reshape(3, 1)]),
jnp.array([0.0, 0.0, 0.0, 1.0]),
])
def gaussian_vmf_sample(
key: jnp.ndarray,
pose_mean: jnp.ndarray,
var: jnp.ndarray,
concentration: jnp.ndarray,
) -> jnp.ndarray:
"""Function to sample from a GaussianVMF distribution centered at pose_mean."""
return pose_mean.dot(gaussian_vmf(key, var, concentration))
def gaussian_sample(
key: jnp.ndarray, pose_mean: jnp.ndarray, var: jnp.ndarray
) -> jnp.ndarray:
"""Sample from a Gaissuain distribution centered at pose_mean."""
translation = tfp.distributions.MultivariateNormalFullCovariance(
loc=jnp.zeros(3), covariance_matrix=jnp.diag(var)
).sample(seed=key)
return pose_mean.at[:3, -1].add(translation)
| threednel-main | threednel/distributions.py |
"""Module implementing the inference program of 3DNEL MSIGP."""
# Copyright 2023 DeepMind Technologies Limited
# Copyright 2023 Massachusetts Institute of Technology (M.I.T.)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
import functools
from typing import Callable, Optional, Sequence, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from scipy.spatial.transform import Rotation
from threednel.distributions import gaussian_vmf_sample
from threednel.icp import ICP
@functools.partial(jax.jit, static_argnames='n_samples')
def generate_candidate_poses(
key: jnp.ndarray,
curr_poses: jnp.ndarray,
idx: int,
var: jnp.ndarray,
concentration: float,
n_samples: int,
):
"""Randomly sample a set of candidate poses around the current poses.
Args:
key: PRNGKey
curr_poses: Array of shape (n_objs, 4, 4). Current poses
idx: Index of the object for which we are generating candidate poses
var: Array of shape (3,). Diagonal for the covariance matrix.
concentration: concentration. Parameter for Gaussian VMF
n_samples: n_samples
Returns:
The splitted PRNGKey and the generated candidate poses.
"""
keys = jax.random.split(key, n_samples + 1)
key, subkeys = keys[0], keys[1:]
sampled_poses = jax.vmap(
gaussian_vmf_sample, in_axes=(0, None, None, None), out_axes=0
)(subkeys, curr_poses[idx], var, concentration)
candidate_poses = (
jnp.tile(curr_poses[:, None], (1, n_samples, 1, 1))
.at[idx]
.set(sampled_poses)
)
return key, candidate_poses
def move_to_candidate_poses(
inferred_poses: jnp.ndarray,
candidate_poses: jnp.ndarray,
compute_likelihood: Callable,
) -> jnp.ndarray:
"""Function to update the current poses given a set of candidate poses.
We identify the candidate pose with the highest likelihood, and update the
current inferred psoes to the candidate pose with highest likelihood if it
increases the likelihood.
Args:
inferred_poses: Current inferred poses for all objects.
candidate_poses: A set of candidate poses.
compute_likelihood: A function evaluating the likelihood of different
candidate poses.
Returns:
The updated inferred poses.
"""
candidate_poses = jnp.concatenate(
[inferred_poses[:, None], candidate_poses], axis=1
)
log_likelihoods = compute_likelihood(candidate_poses)
inferred_poses = candidate_poses[:, jnp.argmax(log_likelihoods)]
return inferred_poses
@dataclass
class InferenceProgram:
"""The inference program for 3DNEL MSIGP.
Attributes:
n_passes_pose_hypotheses: Number of times to to through pose hypotheses.
n_passes_icp: Number of times to make ICP moves.
n_passes_finetune: Number of times to do random walk finetuning.
n_samples_per_iteration: Number of candidate poses to evaluate in parallel
per iteration.
var_concentration_list: List of variance and concentration parameters to use
for sampling candidate poses.
icp_var_concentration: Variance and concentration parameters for sampling
candidate psoes around ICP results.
use_flip: Whether to use flip proposals.
"""
n_passes_pose_hypotheses: int = 1
n_passes_icp: int = 1
n_passes_finetune: int = 1
n_samples_per_iteration: int = 80
var_concentration_list: Sequence[Tuple[float, float]] = (
(10.0, 300.0),
(10.0, 800.0),
(0.01, 800.0),
(2.0, 2000.0),
)
icp_var_concentration: Tuple[float, float] = (10.0, 800.0)
use_flip: bool = True
def __post_init__(self):
flip_transforms = []
if self.use_flip:
for dimension in range(3):
euler = np.zeros(3)
euler[dimension] = np.pi
transform = np.eye(4)
transform[:3, :3] = Rotation.from_euler('xyz', euler).as_matrix()
flip_transforms.append(transform)
flip_transforms.append(np.eye(4))
self.flip_transforms = jax.device_put(flip_transforms)
def infer(
self,
key: jnp.ndarray,
initial_poses: np.ndarray,
pose_hypotheses: Sequence[np.ndarray],
compute_likelihood: Callable,
icp: Optional[ICP] = None,
) -> jnp.ndarray:
"""Returns the inferred object poses using the inference program.
Args:
key: PRNGKey
initial_poses: Array of shape (n_objs, 4, 4). The initial pose estimates.
pose_hypotheses: Each element is an array of shape (n_hypotheses, 4, 4),
and contains the pose hypotheses for a particular object.
compute_likelihood: Function to evaluate the likelihood of given object
poses.
icp: Optional module implementing ICP-based updates.
"""
n_objs = initial_poses.shape[0]
assert len(pose_hypotheses) == n_objs
inferred_poses = jax.device_put(initial_poses)
pose_hypotheses = jax.device_put(pose_hypotheses)
for _ in range(self.n_passes_pose_hypotheses):
for obj_idx in np.random.permutation(n_objs):
candidate_poses = (
jnp.tile(
inferred_poses[:, None],
(1, pose_hypotheses[obj_idx].shape[0], 1, 1),
)
.at[obj_idx]
.set(pose_hypotheses[obj_idx])
)
inferred_poses = move_to_candidate_poses(
inferred_poses, candidate_poses, compute_likelihood
)
if icp is not None:
var, concentration = self.icp_var_concentration
for _ in range(self.n_passes_icp):
for obj_idx in np.random.permutation(n_objs):
for dimension in range(len(self.flip_transforms)):
try:
icp_pose = jax.device_put(
icp.fit(
inferred_poses[obj_idx].dot(
self.flip_transforms[dimension]
),
obj_idx,
)
)
except ZeroDivisionError:
print('ICP crashed. Using original object pose...')
icp_pose = inferred_poses[obj_idx]
key, candidate_poses = generate_candidate_poses(
key,
inferred_poses.at[obj_idx].set(icp_pose),
obj_idx,
var * jnp.ones(3),
concentration,
self.n_samples_per_iteration,
)
inferred_poses = move_to_candidate_poses(
inferred_poses, candidate_poses, compute_likelihood
)
for _ in range(self.n_passes_finetune):
for var, concentration in self.var_concentration_list:
for obj_idx in np.random.permutation(n_objs):
key, candidate_poses = generate_candidate_poses(
key,
inferred_poses,
obj_idx,
var * jnp.ones(3),
concentration,
self.n_samples_per_iteration,
)
inferred_poses = move_to_candidate_poses(
inferred_poses, candidate_poses, compute_likelihood
)
return inferred_poses
| threednel-main | threednel/inference_program.py |
"""Module containing utils for discretizing the rotation space."""
# Copyright 2023 DeepMind Technologies Limited
# Copyright 2023 Massachusetts Institute of Technology (M.I.T.)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import jax
import jax.numpy as jnp
import numpy as np
from scipy.spatial.transform import Rotation as R
def fibonacci_sphere(n_points: int = 1000):
"""Evenly distribute points on a sphere using fibonacci sphere.
https://extremelearning.com.au/evenly-distributing-points-on-a-sphere/
Args:
n_points: Number of samples on the sphere.
Returns:
n_points evenly distributed points on the sphere using fibonacci sphere.
"""
phi = np.pi * (3.0 - np.sqrt(5.0)) # golden angle in radians
y = 1 - (np.arange(n_points) / (n_points - 1)) * 2
radius = np.sqrt(1 - y**2)
theta = phi * np.arange(n_points)
x = np.cos(theta) * radius
z = np.sin(theta) * radius
points = np.stack([x, y, z], axis=1)
return points
@jax.jit
@functools.partial(jax.vmap, in_axes=(None, 0), out_axes=0)
def get_rotation_vectors(source: jnp.ndarray, target: jnp.ndarray):
"""Generate a rotation to rotate the source vector to the target vector.
Args:
source: Array of shape (3,) representing the source vector.
target: Array of shape (3,) representing the target vector.
Returns:
Array of shape (3,), representing the rotation vector.
"""
perp = jnp.cross(source, target)
perp = perp / jnp.linalg.norm(perp)
theta = jnp.arctan2(target @ jnp.cross(perp, source), target @ source)
rotvec = theta * perp
return rotvec
def generate_prototype_rotations(
num_points_on_sphere: int = 200,
num_inplane_rotations: int = 32,
principal_axis: jnp.ndarray = jnp.array([0, 0, 1.0]),
) -> R:
"""Generate a set of prototype rotations to discretize the rotation space.
Each prototype rotation first rotates the given principal axis to a direction
specified by a point on the unit sphere, and then rotates the object around
the resulting axis by an in-plane rotation.
Args:
num_points_on_sphere (int): num_points_on_sphere
num_inplane_rotations (int): num_inplane_rotations
principal_axis (jnp.ndarray): principal_axis
Returns:
R:
"""
points_on_sphere = fibonacci_sphere(num_points_on_sphere)
rotation_vectors = get_rotation_vectors(principal_axis, points_on_sphere)
rotate_z = R.from_rotvec(rotation_vectors)
prototype_rotations = []
for ii in range(points_on_sphere.shape[0]):
prototype_rotations.append(
(
R.from_rotvec(
np.linspace(
0, 2 * np.pi, num_inplane_rotations, endpoint=False
)[:, None]
* points_on_sphere[ii]
)
* rotate_z[ii]
).as_quat()
)
prototype_rotations = R.from_quat(np.concatenate(prototype_rotations, axis=0))
return prototype_rotations
| threednel-main | threednel/rotation.py |
# Copyright 2023 DeepMind Technologies Limited
# Copyright 2023 Massachusetts Institute of Technology (M.I.T.)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| threednel-main | threednel/renderer/__init__.py |
"""Module containing camera-related utilities."""
# Copyright 2023 DeepMind Technologies Limited
# Copyright 2023 Massachusetts Institute of Technology (M.I.T.)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import NamedTuple
import numpy as np
class CameraIntrinsics(NamedTuple):
"""Container class for camera intrinsics matrix.
Attributes:
height: Height of the image
width: Width of the image
fx: Focal length in the x direction
fy: Focal length in the y direction
cx: Principal point in the x direction
cy: Principal point in the y direction
near: Near plane distance
far: Far plane distance
"""
height: int
width: int
fx: float
fy: float
cx: float
cy: float
near: float = 10.0
far: float = 10000.0
@staticmethod
def from_matrix(
shape: tuple[int, int], intrinsics: np.ndarray
) -> CameraIntrinsics:
"""Construct a CameraIntrinsics object from a camera intrinsics matrix."""
return CameraIntrinsics(
height=shape[0],
width=shape[1],
fx=intrinsics[0, 0],
fy=intrinsics[1, 1],
cx=intrinsics[0, 2],
cy=intrinsics[1, 2],
)
@property
def intrinsics_matrix(self) -> np.ndarray:
"""Returns the 3x3 camera intrinsics matrix."""
intrinsics = np.array(
[[self.fx, 0, self.cx], [0, self.fy, self.cy], [0, 0, 1]]
)
return intrinsics
def scale(self, scale_factor: float) -> CameraIntrinsics:
"""Returns the scaled CameraIntrinsics object."""
return CameraIntrinsics(
height=int(np.round(self.height * scale_factor)),
width=int(np.round(self.width * scale_factor)),
fx=self.fx * scale_factor,
fy=self.fy * scale_factor,
cx=self.cx * scale_factor,
cy=self.cy * scale_factor,
near=self.near,
far=self.far,
)
def open_gl_projection_matrix(
h: int,
w: int,
fx: float,
fy: float,
cx: float,
cy: float,
near: float,
far: float,
) -> np.ndarray:
"""Function to create OpenGL projection matrix.
Args:
h: Height of the image
w: Width of the image
fx: Focal length in the x direction
fy: Focal length in the y direction
cx: Principal point in the x direction
cy: Principal point in the y direction
near: Near plane distance
far: Far plane distance
Returns:
OpenGL projection matrix.
"""
# transform from cv2 camera coordinates to opengl (flipping sign of y and z)
view = np.eye(4)
view[1:3] *= -1
# see http://ksimek.github.io/2013/06/03/calibrated_cameras_in_opengl/
persp = np.zeros((4, 4))
persp[0, 0] = fx
persp[1, 1] = fy
persp[0, 2] = cx
persp[1, 2] = cy
persp[2, 2] = near + far
persp[2, 3] = near * far
persp[3, 2] = -1
# transform the camera matrix from cv2 to opengl as well (flipping sign of
# y and z)
persp[:2, 1:3] *= -1
# The origin of the image is in the *center* of the top left pixel.
# The orthographic matrix should map the whole image *area* into the opengl
# NDC, therefore the -.5 below:
left, right, bottom, top = -0.5, w - 0.5, -0.5, h - 0.5
orth = np.array([
(2 / (right - left), 0, 0, -(right + left) / (right - left)),
(0, 2 / (top - bottom), 0, -(top + bottom) / (top - bottom)),
(0, 0, -2 / (far - near), -(far + near) / (far - near)),
(0, 0, 0, 1),
])
return orth @ persp @ view
def scale_camera_parameters(
h: int,
w: int,
fx: float,
fy: float,
cx: float,
cy: float,
scaling_factor: float,
):
"""Function to scale camera parameters.
Args:
h: Height of the image
w: Width of the image
fx: Focal length in the x direction
fy: Focal length in the y direction
cx: Principal point in the x direction
cy: Principal point in the y direction
scaling_factor: The scaling factor we use to scale the camera parameters.
Returns:
Scaled camera parameters.
"""
new_fx = fx * scaling_factor
new_fy = fy * scaling_factor
new_cx = cx * scaling_factor
new_cy = cy * scaling_factor
new_h = int(np.round(h * scaling_factor))
new_w = int(np.round(w * scaling_factor))
return new_h, new_w, new_fx, new_fy, new_cx, new_cy
| threednel-main | threednel/renderer/camera.py |
"""Wrapper around https://github.com/nishadgothoskar/pararender.
Efficient parallel rendering of a large number of 3D scene descriptions.
"""
# Copyright 2023 DeepMind Technologies Limited
# Copyright 2023 Massachusetts Institute of Technology (M.I.T.)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import NamedTuple
import jax.numpy as jnp
class RenderedData(NamedTuple):
"""Container class holding the rendered results.
Attributes:
model_xyz: Coordinates of the rendered points in camera frame.
obj_coords: Coordinates of the rendered points in object frame.
obj_ids: Object ids of the rendered points.
"""
model_xyz: jnp.ndarray
obj_coords: jnp.ndarray
obj_ids: jnp.ndarray
| threednel-main | threednel/renderer/parallel.py |
"""Module containing classes for detector and detection results."""
# Copyright 2023 DeepMind Technologies Limited
# Copyright 2023 Massachusetts Institute of Technology (M.I.T.)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from dataclasses import dataclass
from typing import Callable, Optional, Sequence, Tuple
import jax
import jax.dlpack
import jax.numpy as jnp
import numpy as np
import torch
from threednel import ndl
from threednel.bop.bop_surfemb import BOPSurfEmb
from threednel.bop.bop_vote import BOPVoting
from threednel.bop.data import RGBDImage
from threednel.bop.hypotheses import HypothesesGeneration
from threednel.icp import ICP
from threednel.inference_program import InferenceProgram
from threednel.renderer.parallel import ParallelRenderer
@dataclass
class DetectionResults:
"""Container class for detection results.
Attributes:
initial_poses: Initial pose estimation.
inferred_poses: Final inferred pose estimation.
renderer: Constructed renderer.
query_embeddings: Relevant query embeddings.
data_mask: Data masks.
pose_hypotheses: All the generated pose hypotheses.
"""
initial_poses: jnp.ndarray
inferred_poses: jnp.ndarray
renderer: ParallelRenderer
query_embeddings: jnp.ndarray
data_mask: jnp.ndarray
pose_hypotheses: Sequence[jnp.ndarray]
@dataclass
class Detector:
"""A detector implementing 6D pose estimation using 3DNEL."""
data_directory: str = os.environ['BOP_DATA_DIR']
device: str = 'cuda:0'
n_objs_range: Tuple[int, int] = (3, 7)
# Params for hypotheses generation
mask_threshold: float = 0.7
n_top_translations: int = 20
n_pose_hypotheses_per_crop: int = 80
maximum_filter_size: int = 10
n_top_rotations_per_translation: int = 10
n_pose_hypotheses_per_object: int = 30
default_scale: float = 1.5
use_crops: bool = True
# Params for likelihood
likelihood_factory: Callable = ndl.JAXNDL
r: float = 5.0
outlier_prob: float = 0.01
outlier_volume: float = 1000.0**3
outlier_scaling: float = 1 / 70000
filter_shape: Tuple[int, int] = (10, 10)
# Params for inference program
n_passes_pose_hypotheses: int = 1
n_passes_icp: int = 1
n_passes_finetune: int = 1
n_samples_per_iteration: int = 80
var_concentration_list: Sequence[Tuple[float, float]] = (
(10.0, 300.0),
(10.0, 800.0),
(0.01, 800.0),
(2.0, 2000.0),
)
use_flip: bool = True
# Params for ICP
icp_var_concentration: Tuple[float, float] = (10.0, 800.0)
icp_n_outer_iterations: int = 5
icp_n_inner_iterations: int = 2
def __post_init__(self):
self.bop_surfemb = BOPSurfEmb(
surfemb_model_path=os.path.join(
self.data_directory, 'models', 'ycbv-jwpvdij1.compact.ckpt'
),
device=self.device,
)
self.bop_vote = BOPVoting(data_directory=self.data_directory, device=self.device)
self.bop_vote.load_all_key_embeddings(self.bop_surfemb.surfemb_model)
self.hypotheses_generation = HypothesesGeneration(
bop_surfemb=self.bop_surfemb,
bop_vote=self.bop_vote,
mask_threshold=self.mask_threshold,
n_top_translations=self.n_top_translations,
n_pose_hypotheses_per_crop=self.n_pose_hypotheses_per_crop,
maximum_filter_size=self.maximum_filter_size,
n_top_rotations_per_translation=self.n_top_rotations_per_translation,
n_pose_hypotheses_per_object=self.n_pose_hypotheses_per_object,
default_scale=self.default_scale,
use_crops=self.use_crops,
)
self.likelihood_dict = {}
for n_objs in range(self.n_objs_range[0], self.n_objs_range[1]):
self.likelihood_dict[n_objs] = self.likelihood_factory(
model=self.bop_surfemb.surfemb_model,
n_objs=n_objs,
r=self.r,
outlier_prob=self.outlier_prob,
outlier_volume=self.outlier_volume,
outlier_scaling=self.outlier_scaling,
filter_shape=self.filter_shape,
data_directory=self.data_directory,
)
self.inference_program = InferenceProgram(
n_passes_pose_hypotheses=self.n_passes_pose_hypotheses,
n_passes_icp=self.n_passes_icp,
n_passes_finetune=self.n_passes_finetune,
n_samples_per_iteration=self.n_samples_per_iteration,
var_concentration_list=self.var_concentration_list,
use_flip=self.use_flip,
)
def detect(
self,
img: RGBDImage,
key: jnp.ndarray,
scale_factor: float = 0.25,
initial_poses: Optional[jnp.ndarray] = None,
):
"""Function to do pose estimation on a given RGBDImage.
Args:
img: The input RGBDImage.
key: JAX PRNGKey.
scale_factor: Scale factor used to scale the input RGBDImage.
initial_poses: Initial poses. If none, use the top scoring pose hypothesis
for each object as the initial poses.
Returns:
Detection results on the input RGBDImage.
"""
(
query_embeddings,
data_mask,
pose_hypotheses,
) = self.hypotheses_generation.generate(img)
log_normalizers = self.bop_vote.get_log_normalizers(
torch.from_dlpack(jax.dlpack.to_dlpack(query_embeddings)),
img.bop_obj_indices,
squeeze=False,
)
n_objs = len(img.bop_obj_indices)
likelihood = self.likelihood_dict[n_objs]
likelihood.set_for_new_img(
img=img,
query_embeddings=query_embeddings,
log_normalizers=jax.dlpack.from_dlpack(torch.to_dlpack(log_normalizers)),
data_mask=data_mask,
scale_factor=scale_factor,
)
def render(pose: np.ndarray, obj_idx: int):
"""Function to render an objectin a given pose and get the point cloud and object mask.
Args:
pose: Array of shape (4, 4). Pose of the object.
obj_idx: object index in the scene.
Returns:
cloud: Array of shape (n_points, 3). Rendered point cloud of the
object.
"""
pose = jax.device_put(pose)
rendered_data = likelihood.renderer.render(pose[None, None], [obj_idx])
model_mask = np.array(rendered_data.obj_ids[0] >= 0)
cloud = np.array(rendered_data.model_xyz[0])[model_mask]
return cloud, model_mask
self.icp = ICP(
target_cloud=likelihood.data_xyz[likelihood.data_mask],
render=render,
n_outer_iterations=self.icp_n_outer_iterations,
n_inner_iterations=self.icp_n_inner_iterations,
)
if initial_poses is None:
initial_poses = jnp.array([pose_hypotheses[ii][0] for ii in range(n_objs)])
inferred_poses = self.inference_program.infer(
key,
initial_poses,
pose_hypotheses,
compute_likelihood=likelihood.compute_likelihood,
icp=self.icp,
)
return DetectionResults(
initial_poses=initial_poses,
inferred_poses=inferred_poses,
renderer=likelihood.renderer,
query_embeddings=query_embeddings,
data_mask=data_mask,
pose_hypotheses=pose_hypotheses,
)
| threednel-main | threednel/bop/detector.py |
# Copyright 2023 DeepMind Technologies Limited
# Copyright 2023 Massachusetts Institute of Technology (M.I.T.)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import json
import os
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import jax.dlpack
import jax.numpy as jnp
import numpy as np
import taichi as ti
import torch
import trimesh
from scipy.ndimage import maximum_filter
from threednel.bop.data import RGBDImage
from threednel.rotation import generate_prototype_rotations
from threednel.third_party.surfemb.surface_embedding import \
SurfaceEmbeddingModel
from torch_cluster import fps
from tqdm import tqdm
@ti.kernel
def taichi_compute_max_indices_log_normalizers_probs(
query_embeddings: ti.types.ndarray(element_dim=1),
key_embeddings: ti.types.ndarray(element_dim=1),
max_indices: ti.types.ndarray(dtype=ti.i32),
log_normalizers: ti.types.ndarray(),
probs: ti.types.ndarray(),
):
"""A Taichi kernel that updates max_indices, log_normalizers and probs in place.
Args:
query_embeddings: Array of shape (M, d). Query embeddings of the image
key_embeddings: Array of shape (N, d). Key embeddings of the model points
max_indices: Array of shape (M,). max_indices[i] = argmax(key_embeddings @
query_embeddings[i]). Indices of the model points with key embeddings
most similar to the given query embeddings.
log_normalizers: Array of shape (M,). logsumexp(key_embeddings @
query_embeddings[i]). Log of the normalization constant summing over all
model points.
probs: Array of shape (M,). probs[i] = np.exp(query_embeddings[i] *
key_embeddings[max_indices[i]] - log_normalizers[i]). The probability
(P_RGB) of the model point that is most similar to a given query
embedding.
"""
for i in range(query_embeddings.shape[0]):
max_indices[i] = -1
log_normalizers[i] = 0.0
max_inner_product = -100000.0
for k in range(key_embeddings.shape[0]):
inner_product = key_embeddings[k].dot(query_embeddings[i])
if inner_product <= max_inner_product:
log_normalizers[i] += ti.exp(inner_product - max_inner_product)
else:
max_indices[i] = k
log_normalizers[i] *= ti.exp(max_inner_product - inner_product)
log_normalizers[i] += 1.0
max_inner_product = inner_product
log_normalizers[i] = ti.log(log_normalizers[i]) + max_inner_product
for i in range(query_embeddings.shape[0]):
probs[i] = ti.exp(
key_embeddings[max_indices[i]].dot(query_embeddings[i]) - log_normalizers[i]
)
@ti.kernel
def taichi_compute_log_normalizers(
query_embeddings: ti.types.ndarray(element_dim=1),
key_embeddings: ti.types.ndarray(element_dim=1),
log_normalizers: ti.types.ndarray(),
):
"""A taichi kernel that updates log_normalizers in place.
Args:
query_embeddings: Array of shape (M, d). Query embeddings of the image
key_embeddings: Array of shape (N, d). Key embeddings of the model points
log_normalizers: Array of shape (M,). logsumexp(key_embeddings @
query_embeddings[i]). Log of the normalization constant summing over all
model points.
"""
for i in range(query_embeddings.shape[0]):
log_normalizers[i] = 0.0
max_inner_product = -100000.0
for k in range(key_embeddings.shape[0]):
inner_product = key_embeddings[k].dot(query_embeddings[i])
if inner_product <= max_inner_product:
log_normalizers[i] += ti.exp(inner_product - max_inner_product)
else:
log_normalizers[i] *= ti.exp(max_inner_product - inner_product)
log_normalizers[i] += 1.0
max_inner_product = inner_product
log_normalizers[i] = ti.log(log_normalizers[i]) + max_inner_product
@ti.kernel
def taichi_spherical_vote(
centers: ti.types.ndarray(element_dim=1),
radiuses: ti.types.ndarray(),
weights: ti.types.ndarray(),
voxel_grid: ti.types.ndarray(),
voxel_grid_start: ti.types.ndarray(element_dim=1),
voxel_diameter: float,
multipliers: ti.types.ndarray(),
):
"""A Taichi kernel that implements the spherical voting procedure.
Updates voxel_grid in place. Units are milimeters.
Refer to Section C in the supplementary for more details.
Args:
centers: Array of shape (batch_size, n_centers, 3,). Coordinates of the
centers of the spheres.
radiuses: Array of shape (batch_size, n_centers). Radiuses of the spheres.
weights: Array of shape (batch_size, n_centers,). Weights of votes from
the spheres.
voxel_grid: Array of shape voxel_grid_shape. Votes from different sphere
centers are aggregated into the voxel grid.
voxel_grid_start: Array of shape (3,). Coordinate of the center of voxel
(0, 0, 0).
voxel_diameter: float. Diameter of a voxel.
multipliers: Constant array with elements [1.0, -1.0].
"""
for voxel in ti.grouped(voxel_grid):
voxel_grid[voxel] = 0.0
for ii, jj in centers:
center_on_voxel_grid = (centers[ii, jj] - voxel_grid_start[None]) / voxel_diameter
center_on_voxel_grid = ti.round(center_on_voxel_grid)
radius_in_voxels = radiuses[ii, jj] / voxel_diameter + 0.5
for x in range(ti.ceil(radius_in_voxels)):
for y in range(ti.ceil(ti.sqrt(radius_in_voxels**2 - x**2))):
z_range = (
ti.ceil(
ti.sqrt(
ti.max(
0.0,
(radiuses[ii, jj] / voxel_diameter - 0.5) ** 2
- x**2
- y**2,
)
)
),
ti.ceil(ti.sqrt(radius_in_voxels**2 - x**2 - y**2)),
)
for z in range(z_range[0], z_range[1]):
for xx in range(2):
if x == 0 and multipliers[xx] < 0:
continue
x_coord = ti.cast(
center_on_voxel_grid[0] + multipliers[xx] * x,
ti.i32,
)
if x_coord < 0 or x_coord >= voxel_grid.shape[1]:
continue
for yy in range(2):
if y == 0 and multipliers[yy] < 0:
continue
y_coord = ti.cast(
center_on_voxel_grid[1] + multipliers[yy] * y,
ti.i32,
)
if y_coord < 0 or y_coord >= voxel_grid.shape[2]:
continue
for zz in range(2):
if z == 0 and multipliers[zz] < 0:
continue
z_coord = ti.cast(
center_on_voxel_grid[2] + multipliers[zz] * z,
ti.i32,
)
if z_coord < 0 or z_coord >= voxel_grid.shape[3]:
continue
ti.atomic_add(
voxel_grid[ii, x_coord, y_coord, z_coord],
weights[ii, jj],
)
@functools.partial(jax.jit, static_argnames=('n_top_translations', 'n_pose_hypotheses'))
def _get_top_pose_hypotheses(
voting_voxel_grid: jnp.ndarray,
keypoints_voxel_offsets: jnp.ndarray,
voxel_grid_start: jnp.ndarray,
voxel_diameter: float,
prototype_rotations: jnp.ndarray,
n_top_translations: int = 100,
n_pose_hypotheses: int = 50,
):
"""Function to get top pose hypotheses given voting results in a voxel grid."""
@functools.partial(jax.vmap, in_axes=0, out_axes=0)
def get_top_pose_hypotheses_for_obj(
voting_voxel_grid: jnp.ndarray,
keypoints_voxel_offsets: jnp.ndarray,
):
indices = jnp.array(
jnp.unravel_index(
jnp.argsort(-voting_voxel_grid[0].ravel()),
voting_voxel_grid[0].shape,
)
).T
top_indices = indices[:n_top_translations]
voxel_indices = top_indices[:, None, None] + keypoints_voxel_offsets
valid_entries = jnp.logical_and(
jnp.all(voxel_indices >= 0, axis=(-2, -1)),
jnp.all(
voxel_indices < jnp.array(voting_voxel_grid.shape[1:]),
axis=(-2, -1),
),
)
scores = jnp.where(
valid_entries,
jnp.sum(
voting_voxel_grid[1:][
jnp.arange(keypoints_voxel_offsets.shape[1])[None, None],
voxel_indices[..., 0],
voxel_indices[..., 1],
voxel_indices[..., 2],
],
axis=-1,
),
-jnp.inf,
)
top_voxel_indices, top_rotation_indices = jnp.unravel_index(
jnp.argsort(-scores.ravel())[:n_pose_hypotheses], scores.shape
)
translations = voxel_grid_start + top_indices[top_voxel_indices] * voxel_diameter
rotations = prototype_rotations[top_rotation_indices]
transform_matrices = jnp.zeros((n_pose_hypotheses, 4, 4))
transform_matrices = transform_matrices.at[:, :3, :3].set(rotations)
transform_matrices = transform_matrices.at[:, :3, 3].set(translations)
transform_matrices = transform_matrices.at[:, 3, 3].set(1.0)
top_scores = scores[top_voxel_indices, top_rotation_indices]
return transform_matrices.reshape((-1, 4, 4)), top_scores.ravel()
return get_top_pose_hypotheses_for_obj(voting_voxel_grid, keypoints_voxel_offsets)
@dataclass
class BOPVoting:
"""Wrapper class for running the spherical voting process.
The voting process assumes all the relevant object coordinates (x, y, z)
satisfy np.all(np.array([x, y, z]) >= voxel_grid_start) and
np.all(
np.array([x, y, z]) <
voxel_grid_start + voxel_diameter * np.array(voxel_grid_shape)
)
"""
data_directory: str
num_points_on_sphere: int = 200
num_inplane_rotations: int = 32
principal_axis: np.ndarray = np.array([0, 0, 1.0])
n_keypoints: int = 8
voxel_diameter: float = 5.0
voxel_grid_start: np.ndarray = np.array([-350.0, -210.0, 530.0], dtype=np.float32)
voxel_grid_shape: Tuple[int, int, int] = (129, 87, 168)
device: str = 'cuda:0'
def __post_init__(self):
device = torch.device(self.device)
self.prototype_rotations = torch.from_numpy(
generate_prototype_rotations(
num_points_on_sphere=self.num_points_on_sphere,
num_inplane_rotations=self.num_inplane_rotations,
principal_axis=self.principal_axis,
).as_matrix()
).to(device)
with open(
os.path.join(
self.data_directory,
'bop',
'ycbv',
'models',
'models_info.json',
),
'r',
) as f:
models_info = json.load(f)
self.models_info = {
int(bop_obj_idx): models_info[bop_obj_idx] for bop_obj_idx in models_info
}
self.bop_obj_indices = list(self.models_info.keys())
self._init_voting_info()
def _init_voting_info(self):
"""Initialize information relevant for the voting process.
The relevant information includes:
keypoints: Sampled on objects surfaces using farthest point sampling.
model_radiuses: The distance of each model point to the sampled keypoints.
keypoints_voxel_offsets: Offsets in terms of number of voxel grids, given
model_radiuses and voxel_diameter.
We precompute model_radiuses and keypoints_voxel_offsets to allow efficient
implementation of the voting process.
"""
device = torch.device(self.device)
self.model_radiuses = {}
self.keypoints = {}
self.keypoints_voxel_offsets = {}
for bop_obj_idx in tqdm(self.bop_obj_indices):
model_coords = torch.from_numpy(
np.asarray(
trimesh.load(
os.path.join(
self.data_directory,
f'surface_samples/ycbv/obj_{bop_obj_idx:06d}.ply',
)
).vertices
)
).to(device)
keypoints_indices = fps(
model_coords,
batch=None,
ratio=self.n_keypoints / model_coords.shape[0],
)
keypoints = model_coords[keypoints_indices]
keypoints = torch.cat([torch.zeros((1, 3), device=device), keypoints], dim=0)
self.keypoints[bop_obj_idx] = keypoints
self.model_radiuses[bop_obj_idx] = torch.norm(
model_coords - keypoints[:, None], dim=-1
).to(device)
rotated_keypoints = torch.einsum(
'ijk,mk->imj', self.prototype_rotations, keypoints[1:]
)
self.keypoints_voxel_offsets[bop_obj_idx] = torch.round(
rotated_keypoints / self.voxel_diameter
).type(torch.int32)
def load_all_key_embeddings(self, surfemb_model: SurfaceEmbeddingModel):
"""Loading the key embeddings for all models points in all objects."""
print('Loading all key embeddings...')
all_key_embeddings = {}
for bop_obj_idx in tqdm(self.bop_obj_indices):
verts_np = trimesh.load(
os.path.join(
self.data_directory,
f'surface_samples/ycbv/obj_{bop_obj_idx:06d}.ply',
)
).vertices
mesh = trimesh.load(
os.path.join(
self.data_directory,
f'bop/ycbv/models/obj_{bop_obj_idx:06d}.ply',
)
)
offset, scale = (
mesh.bounding_sphere.primitive.center,
mesh.bounding_sphere.primitive.radius,
)
verts_norm = (verts_np - offset) / scale
all_key_embeddings[bop_obj_idx] = surfemb_model.infer_mlp(
torch.from_numpy(verts_norm).float().to(surfemb_model.device),
bop_obj_idx,
)
self.all_key_embeddings = all_key_embeddings
def get_max_indices_normalizers_probs(
self,
query_embeddings: torch.Tensor,
bop_obj_indices: Union[np.ndarray, int],
squeeze: bool = True,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Function to get max_indices, log_normalizers and probs.
Refer to taichi_compute_max_indices_log_normalizers_probs for details.
Args:
query_embeddings: Array of shape (N, len(bop_obj_indices),
emb_dim)
bop_obj_indices: indices should be in self.bop_obj_indices.
Typically ranges from 1 to len(self.bop_obj_indices) + 1
squeeze: Whether to get rid of redundant size 1 dimensions.
Returns:
max_indices, log_normalizers and probs.
"""
target_shape = query_embeddings.shape[:-2]
if query_embeddings.ndim == 4:
query_embeddings = query_embeddings.reshape((-1,) + query_embeddings.shape[-2:])
if np.isscalar(bop_obj_indices):
bop_obj_indices = jnp.array([bop_obj_indices])
assert query_embeddings.shape[1] == len(bop_obj_indices)
max_indices = []
log_normalizers = []
probs = []
for idx, bop_obj_idx in enumerate(bop_obj_indices):
device = torch.device(self.device)
max_indices_for_obj = torch.empty(
query_embeddings.shape[:1], dtype=torch.int32, device=device
)
log_normalizers_for_obj = torch.empty(
query_embeddings.shape[:1], dtype=torch.float32, device=device
)
probs_for_obj = torch.empty(
query_embeddings.shape[:1], dtype=torch.float32, device=device
)
taichi_compute_max_indices_log_normalizers_probs(
query_embeddings[:, idx].float(),
self.all_key_embeddings[bop_obj_idx].float(),
max_indices_for_obj,
log_normalizers_for_obj,
probs_for_obj,
)
ti.sync()
max_indices.append(max_indices_for_obj)
log_normalizers.append(log_normalizers_for_obj)
probs.append(probs_for_obj)
max_indices = torch.stack(max_indices, dim=1)
max_indices = max_indices.reshape(target_shape + max_indices.shape[-1:])
log_normalizers = torch.stack(log_normalizers, dim=1)
log_normalizers = log_normalizers.reshape(target_shape + log_normalizers.shape[-1:])
probs = torch.stack(probs, dim=1)
probs = probs.reshape(target_shape + probs.shape[-1:])
if squeeze:
max_indices, log_normalizers, probs = (
torch.squeeze(max_indices),
torch.squeeze(log_normalizers),
torch.squeeze(probs),
)
return max_indices, log_normalizers, probs
def get_log_normalizers(
self,
query_embeddings: torch.Tensor,
bop_obj_indices: Union[np.ndarray, int],
squeeze: bool = True,
) -> torch.Tensor:
"""Function to get log_normalizers.
Refer to taichi_compute_max_indices_log_normalizers_probs for details.
Args:
query_embeddings: Array of shape (N, len(bop_obj_indices),
emb_dim)
bop_obj_indices: indices should be in self.bop_obj_indices.
Typically ranges from 1 to len(self.bop_obj_indices) + 1
squeeze: Whether to get rid of redundant size 1 dimensions.
Returns:
log_normalizers.
"""
target_shape = query_embeddings.shape[:-2]
if query_embeddings.ndim == 4:
query_embeddings = query_embeddings.reshape((-1,) + query_embeddings.shape[-2:])
if np.isscalar(bop_obj_indices):
bop_obj_indices = jnp.array([bop_obj_indices])
assert query_embeddings.shape[1] == len(bop_obj_indices)
log_normalizers = []
for idx, bop_obj_idx in enumerate(bop_obj_indices):
device = torch.device(self.device)
log_normalizers_for_obj = torch.empty(
query_embeddings.shape[:1], dtype=torch.float32, device=device
)
taichi_compute_log_normalizers(
query_embeddings[:, idx].float().contiguous(),
self.all_key_embeddings[bop_obj_idx].float(),
log_normalizers_for_obj,
)
ti.sync()
log_normalizers.append(log_normalizers_for_obj)
log_normalizers = torch.stack(log_normalizers, dim=1)
log_normalizers = log_normalizers.reshape(target_shape + log_normalizers.shape[-1:])
if squeeze:
log_normalizers = torch.squeeze(log_normalizers)
return log_normalizers
def get_voting_voxel_grids(
self,
img: RGBDImage,
query_embeddings: torch.Tensor,
mask: Optional[torch.Tensor] = None,
bop_obj_indices: Optional[Union[jnp.ndarray, int]] = None,
max_indices: Optional[torch.Tensor] = None,
probs: Optional[torch.Tensor] = None,
return_log_normalizers: bool = False,
):
"""Function to run voting and get the voting voxel grids.
Args:
img: The input RGBDImage
query_embeddings: Array of shape (H, W, len(img.bop_obj_indices),
emb_dim) or arrayay of shape (H, W, len(bop_obj_indices), emb_dim)
when bop_obj_indices is not None
mask: Array of shape (H, W)
bop_obj_indices: bop_obj_indices
max_indices: Optional precomputed max_indices
probs: Optional precomputed probs
return_log_normalizers: Whether to also return log_normalizers
Returns:
voting_voxel_grids: Array of shape (
len(bop_obj_indices),
self.n_keypoints + 1,
) + self.voxel_grid_shape
and optionally the log_normalizers
"""
device = torch.device(self.device)
if np.isscalar(bop_obj_indices):
bop_obj_indices = np.array([bop_obj_indices])
if mask is None:
mask = (torch.from_numpy(img.depth) > 0).to(device)
assert mask.shape == query_embeddings.shape[:2]
assert mask.shape == img.depth.shape
data_xyz = torch.from_numpy(img.unproject()).to(device)[mask.type(torch.bool)]
centers = torch.broadcast_to(data_xyz, (self.n_keypoints + 1,) + data_xyz.shape)
if bop_obj_indices is None:
bop_obj_indices = img.bop_obj_indices
assert jnp.all(jnp.sum(bop_obj_indices[:, None] == img.bop_obj_indices, axis=1) > 0)
obj_indices_in_img = torch.from_numpy(
np.argmax(bop_obj_indices[:, None] == img.bop_obj_indices, axis=1)
).to(device)
if max_indices is None or probs is None or return_log_normalizers:
if query_embeddings.shape[2] == len(bop_obj_indices):
(
max_indices,
log_normalizers_for_mask,
probs,
) = self.get_max_indices_normalizers_probs(
query_embeddings[mask.type(torch.bool)],
bop_obj_indices,
squeeze=False,
)
else:
assert query_embeddings.shape[2] == len(img.bop_obj_indices)
(
max_indices,
log_normalizers_for_mask,
probs,
) = self.get_max_indices_normalizers_probs(
query_embeddings[mask][:, obj_indices_in_img],
bop_obj_indices,
squeeze=False,
)
log_normalizers = torch.zeros(query_embeddings.shape[:-1], device=device)
log_normalizers[mask.type(torch.bool)] = log_normalizers_for_mask
voting_voxel_grids = torch.empty(
(len(bop_obj_indices), self.n_keypoints + 1) + self.voxel_grid_shape,
dtype=torch.float32,
device=device,
)
multipliers = torch.tensor([1.0, -1.0], dtype=torch.float32, device=device)
for idx, bop_obj_idx in enumerate(bop_obj_indices):
weights = torch.broadcast_to(
probs[:, idx],
(self.n_keypoints + 1,) + probs[:, idx].shape,
)
radiuses = self.model_radiuses[bop_obj_idx][:, max_indices[:, idx]]
taichi_spherical_vote(
centers.float().contiguous(),
radiuses.float().contiguous(),
weights.float().contiguous(),
voting_voxel_grids[idx],
self.voxel_grid_start,
self.voxel_diameter,
multipliers,
)
ti.sync()
if return_log_normalizers:
return voting_voxel_grids, log_normalizers
return voting_voxel_grids
def get_top_pose_hypotheses(
self,
voting_voxel_grids: torch.Tensor,
bop_obj_indices: np.ndarray,
n_top_translations: int = 100,
n_pose_hypotheses: int = 50,
return_scores: bool = False,
):
"""Function to generate top-scoring pose hypotheses given voting results.
Args:
voting_voxel_grids: Voxel grids containing voting results.
bop_obj_indices: Indices of the objects to generate pose hypotheses for.
n_top_translations: Number of top translations we would look at.
n_pose_hypotheses: Number of top poses hypotheses to generate.
return_scores: Whether to additionally return the heuristic scores of the
top pose hypotheses.
Returns:
Top pose hypotheses (in the form of 4x4 transform matrices) and optionally
their heuristic scores.
"""
if np.isscalar(bop_obj_indices):
bop_obj_indices = np.array(bop_obj_indices)
keypoints_voxel_offsets = torch.stack(
[self.keypoints_voxel_offsets[bop_obj_idx] for bop_obj_idx in bop_obj_indices],
dim=0,
)
transform_matrices, top_scores = _get_top_pose_hypotheses(
jax.dlpack.from_dlpack(torch.to_dlpack(voting_voxel_grids)),
jax.dlpack.from_dlpack(torch.to_dlpack(keypoints_voxel_offsets)),
voxel_grid_start=self.voxel_grid_start,
voxel_diameter=self.voxel_diameter,
prototype_rotations=jax.dlpack.from_dlpack(
torch.to_dlpack((self.prototype_rotations))
),
n_top_translations=n_top_translations,
n_pose_hypotheses=n_pose_hypotheses,
)
if return_scores:
return transform_matrices, top_scores
return transform_matrices
def get_top_pose_hypotheses_non_max_suppression(
self,
voting_voxel_grid: torch.Tensor,
bop_obj_idx: int,
maximum_filter_size: int = 5,
n_top_rotations_per_translation: int = 5,
n_pose_hypotheses: int = 50,
return_scores: bool = False,
):
"""Get top pose hypotheses with additional non-max suppression for translations."""
voting_voxel_grid = voting_voxel_grid.cpu().numpy()
max_filtered_voxel_grid = maximum_filter(
voting_voxel_grid[0], size=maximum_filter_size
)
voting_grid_mask = np.logical_and(
voting_voxel_grid[0] == max_filtered_voxel_grid,
max_filtered_voxel_grid != 0,
)
top_indices = np.argwhere(voting_grid_mask)
voxel_indices = (
top_indices[:, None, None]
+ self.keypoints_voxel_offsets[bop_obj_idx].cpu().numpy()
)
valid_entries = np.logical_and(
np.all(voxel_indices >= 0, axis=(-2, -1)),
np.all(voxel_indices < np.array(voting_voxel_grid.shape[1:]), axis=(-2, -1)),
)
scores = np.zeros(valid_entries.shape)
scores[valid_entries] = np.sum(
voting_voxel_grid[1:][
np.arange(self.n_keypoints)[None],
voxel_indices[valid_entries][..., 0],
voxel_indices[valid_entries][..., 1],
voxel_indices[valid_entries][..., 2],
],
axis=-1,
)
top_rotation_indices = np.argsort(-scores, axis=1)[
:, :n_top_rotations_per_translation
]
top_scores = scores[np.arange(scores.shape[0])[:, None], top_rotation_indices]
scores_top_indices = np.array(
np.unravel_index(np.argsort(-top_scores.ravel()), top_scores.shape)
).T[:n_pose_hypotheses]
top_voxel_indices = scores_top_indices[:, 0]
top_rotation_indices = top_rotation_indices[
scores_top_indices[:, 0], scores_top_indices[:, 1]
]
translations = (
self.voxel_grid_start + top_indices[top_voxel_indices] * self.voxel_diameter
)
rotations = self.prototype_rotations[top_rotation_indices].cpu().numpy()
transform_matrices = np.zeros((min(n_pose_hypotheses, translations.shape[0]), 4, 4))
transform_matrices[:, :3, :3] = rotations
transform_matrices[:, :3, -1] = translations
transform_matrices[:, 3, 3] = 1.0
if return_scores:
top_scores = scores[top_voxel_indices, top_rotation_indices]
return transform_matrices, top_scores
return transform_matrices
| threednel-main | threednel/bop/bop_vote.py |
"""Module for converting results into the BOP challenge format."""
# Copyright 2023 DeepMind Technologies Limited
# Copyright 2023 Massachusetts Institute of Technology (M.I.T.)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Mapping, Sequence
import jax
import pandas as pd
def generate_csv(
predictions: Sequence[Mapping[str, Any]], csv_filename: str
) -> pd.DataFrame:
"""Helper function to convert results into the BOP challenge format.
Args:
predictions: A sequence of mappings Each mapping should contain scene_id,
im_id, obj_id, score, R, t and time
csv_filename: Name of the csv file containing the pose estimation results
in the BOP challenge format.
Returns:
The pandas dataframe containing the pose estimation results.
"""
for prediction in predictions:
prediction['R'] = ' '.join([str(ele) for ele in prediction['R'].ravel()])
prediction['t'] = ' '.join([str(ele) for ele in prediction['t']])
outer_treedef = jax.tree_util.tree_structure([0] * len(predictions))
inner_treedef = jax.tree_util.tree_structure(predictions[0])
transposed_predictions = jax.tree_util.tree_transpose(
outer_treedef, inner_treedef, predictions
)
columns = ['scene_id', 'im_id', 'obj_id', 'score', 'R', 't', 'time']
df = pd.DataFrame(data=transposed_predictions)[columns]
df.to_csv(csv_filename, index=False, header=False)
return df
| threednel-main | threednel/bop/results.py |
# Copyright 2023 DeepMind Technologies Limited
# Copyright 2023 Massachusetts Institute of Technology (M.I.T.)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| threednel-main | threednel/bop/__init__.py |
"""Module containing the class for pose hypotheses generation."""
# Copyright 2023 DeepMind Technologies Limited
# Copyright 2023 Massachusetts Institute of Technology (M.I.T.)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Sequence, Tuple
import jax
import jax.dlpack
import jax.numpy as jnp
import numpy as np
import torch
from threednel.bop.bop_surfemb import BOPSurfEmb
from threednel.bop.bop_vote import BOPVoting
from threednel.bop.data import RGBDImage
@dataclass
class HypothesesGeneration:
"""Class for pose hypotheses generation.
Attributes:
bop_surfemb: Module for interacting with a pretrained SurfEMB model.
bop_vote: Module implementing the spherical voting process.
mask_threshold: Threshold for mask probabilities. Used to mask out
irrevelant, noisy query embeddings.
n_top_translations: Number of top translations to look at for pose
hypotheses generation.
n_pose_hypotheses_per_crop: Number of top pose hypotheses to keep for each
2D detector crop.
maximum_filter_size: Maximum filter size for doing non-max suppression.
n_top_rotations_per_translation: Number of top rotations to look at per
translation for the pose hypotheses generation process.
n_pose_hypotheses_per_object: Number of top pose hypotheses to keep for each
object.
default_scale: The scale at which to obtain the query embeddings.
use_crops: Whether to use information from 2D detection to improve pose
hypotheses generation.
"""
bop_surfemb: BOPSurfEmb
bop_vote: BOPVoting
mask_threshold: float = 0.7
n_top_translations: int = 20
n_pose_hypotheses_per_crop: int = 80
maximum_filter_size: int = 10
n_top_rotations_per_translation: int = 10
n_pose_hypotheses_per_object: int = 30
default_scale: float = 1.5
use_crops: bool = True
def _process_crop(self, crop_img: RGBDImage):
"""Process a detector crop from 2D detection.
Args:
crop_img: A detector crop from 2D detection structured as an RGBDImage.
Returns:
Query embeddings, top-scoring pose hypotheses and their scores.
"""
(
query_embeddings,
masks,
) = self.bop_surfemb.get_query_embeddings_masks(crop_img)
masks = jnp.logical_and(crop_img.depth > 0, masks[..., 0] > self.mask_threshold)
query_embeddings = query_embeddings * masks[..., None, None]
voting_voxel_grids = self.bop_vote.get_voting_voxel_grids(
crop_img,
torch.from_dlpack(jax.dlpack.to_dlpack(query_embeddings)),
mask=torch.from_dlpack(jax.dlpack.to_dlpack(masks)),
)
pose_hypotheses, top_scores = self.bop_vote.get_top_pose_hypotheses(
voting_voxel_grids,
crop_img.bop_obj_indices,
n_top_translations=self.n_top_translations,
n_pose_hypotheses=self.n_pose_hypotheses_per_crop,
return_scores=True,
)
return query_embeddings, pose_hypotheses, top_scores
def _combine_query_embeddings_from_crops(
self,
img: RGBDImage,
obj_idx: int,
query_embeddings_list: Sequence[jnp.ndarray],
):
"""Function to combine query embeddings from multiple detector crops for the same object.
We rescale the query embedding maps from each detector crop, and use the
estimated masks to merge the query embedding maps from multiple detector
crops into a single query embedding map.
When the masks from multiple detector crops overlap, we take the query
embedding with the maximum L2 norm.
"""
query_embeddings_for_obj = jnp.zeros(
img.rgb.shape[:2]
+ (
len(query_embeddings_list),
self.bop_surfemb.surfemb_model.emb_dim,
)
)
for crop_idx in range(len(query_embeddings_list)):
left, top, right, bottom = img.annotations[obj_idx]['detector_crops'][crop_idx][
'AABB_crop'
]
top_padding = max(top, 0) - top
left_padding = max(left, 0) - left
bottom_padding = bottom - min(query_embeddings_for_obj.shape[0], bottom)
bottom_idx = -bottom_padding if bottom_padding > 0 else None
right_padding = right - min(query_embeddings_for_obj.shape[1], right)
right_idx = -right_padding if right_padding > 0 else None
query_embeddings_for_obj = query_embeddings_for_obj.at[
top + top_padding : bottom - bottom_padding,
left + left_padding : right - right_padding,
[crop_idx],
].set(
jax.image.resize(
query_embeddings_list[crop_idx],
shape=(bottom - top, right - left)
+ (1, self.bop_surfemb.surfemb_model.emb_dim),
method='nearest',
)[top_padding:bottom_idx, left_padding:right_idx]
)
query_embeddings_for_obj = query_embeddings_for_obj[
jnp.arange(img.rgb.shape[0])[:, None],
jnp.arange(img.rgb.shape[1])[None],
jnp.argmax(jnp.linalg.norm(query_embeddings_for_obj, axis=-1), axis=-1),
][:, :, None]
return query_embeddings_for_obj
def generate_from_crops_for_obj(
self, img: RGBDImage, obj_idx: int
) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Generate pose hypotheses from the available detector crops."""
pose_hypotheses_list = []
scores_list = []
bop_obj_idx = img.bop_obj_indices[obj_idx]
assert img.annotations[obj_idx].get('detector_crops', [])
query_embeddings_list = []
for detector_crop in img.annotations[obj_idx]['detector_crops']:
crop_img = RGBDImage(
rgb=detector_crop['rgb_crop'],
depth=detector_crop['depth_crop'],
intrinsics=detector_crop['K_crop'],
bop_obj_indices=np.array([bop_obj_idx]),
fill_in_depth=img.fill_in_depth,
max_depth=img.max_depth,
)
(
query_embeddings_for_crop,
pose_hypotheses,
top_scores,
) = self._process_crop(crop_img)
query_embeddings_list.append(query_embeddings_for_crop)
pose_hypotheses_list.append(pose_hypotheses[0])
scores_list.append(top_scores[0])
query_embeddings_for_obj = self._combine_query_embeddings_from_crops(
img, obj_idx, query_embeddings_list
)
pose_hypotheses_for_obj = jnp.concatenate(pose_hypotheses_list, axis=0)[
jnp.argsort(-jnp.concatenate(scores_list))
]
return query_embeddings_for_obj, pose_hypotheses_for_obj
def generate_from_whole_image_for_obj(self, img: RGBDImage, obj_idx: int):
"""Generate pose hypotheses from the the whole image."""
bop_obj_idx = img.bop_obj_indices[obj_idx]
query_embeddings_for_obj = jax.device_put(
self.bop_surfemb.get_query_embeddings(
img,
scale=self.default_scale,
target_shape=img.rgb.shape[:2],
bop_obj_indices=np.array([bop_obj_idx]),
)
)
query_embeddings_for_obj = (
query_embeddings_for_obj * (img.depth > 0)[..., None, None]
)
voting_voxel_grids = self.bop_vote.get_voting_voxel_grids(
img,
torch.from_dlpack(jax.dlpack.to_dlpack(query_embeddings_for_obj)),
mask=torch.from_dlpack(jax.dlpack.to_dlpack(jax.device_put(img.depth > 0))),
bop_obj_indices=np.array([bop_obj_idx]),
)
pose_hypotheses_for_obj = self.bop_vote.get_top_pose_hypotheses_non_max_suppression(
voting_voxel_grids[0],
bop_obj_idx,
maximum_filter_size=self.maximum_filter_size,
n_top_rotations_per_translation=self.n_top_rotations_per_translation,
n_pose_hypotheses=self.n_pose_hypotheses_per_object,
return_scores=False,
)
return query_embeddings_for_obj, pose_hypotheses_for_obj
def generate(self, img: RGBDImage):
"""Generate pose hypotheses for all objects in the scene.
Uses detector crops from 2D detection when they are availbel, and falls back
to using the whole image when 2D detection is not available.
"""
query_embeddings_all = []
pose_hypotheses_all = []
for obj_idx in range(len(img.bop_obj_indices)):
if self.use_crops and img.annotations[obj_idx].get('detector_crops', []):
(
query_embeddings_for_obj,
pose_hypotheses_for_obj,
) = self.generate_from_crops_for_obj(img, obj_idx)
else:
(
query_embeddings_for_obj,
pose_hypotheses_for_obj,
) = self.generate_from_whole_image_for_obj(img, obj_idx)
query_embeddings_all.append(query_embeddings_for_obj)
pose_hypotheses_all.append(pose_hypotheses_for_obj)
query_embeddings = jnp.concatenate(query_embeddings_all, axis=2)
data_mask = jnp.logical_and(
img.depth > 0,
jnp.any(jnp.linalg.norm(query_embeddings, axis=-1) > 0, axis=-1),
)
return query_embeddings, data_mask, pose_hypotheses_all
| threednel-main | threednel/bop/hypotheses.py |
"""Module containing a wrapper class for a pretrained SurfEMB model."""
# Copyright 2023 DeepMind Technologies Limited
# Copyright 2023 Massachusetts Institute of Technology (M.I.T.)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional, Tuple
import cv2
import jax.dlpack
import jax.numpy as jnp
from jax.scipy.special import expit
import numpy as np
from threednel.bop.data import RGBDImage
from threednel.third_party.surfemb.surface_embedding import SurfaceEmbeddingModel
import torch
@dataclass
class BOPSurfEmb:
"""Wrapper class around a pretrained SurfEMB model.
Supports applying a pretrained SurfEMB model to a given RGB image to get query
embeddings and mask estimations.
Attributes:
surfemb_model_path: Path to the a SurfEMB model checkpoint trained on YCB
objects.
device: Device to use for running the SurfEMB model. Defaults to 'cuda:0'.
surfemb_model: The loaded SurfEMB model.
bop_obj_indices: Indices of all the relevant objects in the BOP data format.
For the YCB-V dataset this refers to the object indices in
https://bop.felk.cvut.cz/media/data/bop_datasets/ycbv_models.zip
"""
surfemb_model_path: str
device: str = 'cuda:0'
def __post_init__(self):
print(f'Loading surfemb model {self.surfemb_model_path} for dataset ycb.')
device = torch.device(self.device)
surfemb_model = SurfaceEmbeddingModel.load_from_checkpoint(
self.surfemb_model_path
)
surfemb_model.eval()
surfemb_model.freeze()
surfemb_model.to(device)
self.surfemb_model = surfemb_model
self.bop_obj_indices = np.arange(1, 22)
self.surfemb_model.init_bop_obj_indices_to_obj_indices(self.bop_obj_indices)
def get_query_embeddings(
self,
img: RGBDImage,
scale: float = 1.0,
target_shape: Optional[Tuple[int, int]] = None,
bop_obj_indices: Optional[np.ndarray] = None,
):
"""Apply the SurfEMB model on an RGBDImage to get query embeddings.
Args:
img: An RGB-D image
scale: scaling to apply to the RGB image. Defaults to 1.0. The width and
height of the RGB image have to be multiples of 32 in order to be
compatible with the CNNs.
target_shape: (width, height) of the final query embeddings output.
bop_obj_indices: Optional array specifying the indices of the set of
objects for which we want to get query embeddings for. If None, return
the query embeddings for all the objects specified in
img.bop_obj_indices.
Returns:
query_embeddings: Array of shape target_shape + (n_bop_objects, emb_dim)
The query embeddings for the relevant objects as specified in
bop_obj_indices.
"""
rgb = img.rgb.copy()
if target_shape is None:
target_shape = img.rgb.shape[:2]
if bop_obj_indices is None:
bop_obj_indices = img.bop_obj_indices
else:
for bop_obj_idx in bop_obj_indices:
assert bop_obj_idx in img.bop_obj_indices
dsize = (np.round(scale * np.array(rgb.shape[:2]) / 32).astype(int) * 32)[
[1, 0]
]
rgb = cv2.resize(rgb, dsize=dsize, interpolation=1)
surfemb_model = self.surfemb_model
query_embeddings = np.zeros(
target_shape + (len(bop_obj_indices), surfemb_model.emb_dim)
)
for ii, bop_obj_idx in enumerate(bop_obj_indices):
query_embeddings[:, :, ii] = cv2.resize(
surfemb_model.infer_cnn(rgb, bop_obj_idx, False)[1].cpu().numpy(),
dsize=(target_shape[1], target_shape[0]),
interpolation=0,
)
return query_embeddings
def get_query_embeddings_masks(
self, img: RGBDImage, squeeze: bool = False
) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Function to get both query embeddings and mask predictions.
Args:
img: The input RGBDImage.
squeeze: Get rid of the dimension corresponding to the objects if there is
only one object.
Returns:
The query embeddings and mask probabilities predictions.
"""
surfemb_model = self.surfemb_model
query_embeddings = []
mask_lgts = []
for bop_obj_idx in img.bop_obj_indices:
mask_lgts_for_obj, query_embeddings_for_obj = surfemb_model.infer_cnn(
img.rgb, bop_obj_idx, False
)
query_embeddings.append(
jax.dlpack.from_dlpack(
torch.to_dlpack(query_embeddings_for_obj.contiguous())
)
)
mask_lgts.append(
jax.dlpack.from_dlpack(
torch.to_dlpack(mask_lgts_for_obj.contiguous())
)
)
query_embeddings = jnp.stack(query_embeddings, axis=2)
mask_lgts = jnp.stack(mask_lgts, axis=2)
masks = expit(mask_lgts)
if squeeze:
query_embeddings = jnp.squeeze(query_embeddings)
masks = jnp.squeeze(masks)
return query_embeddings, masks
| threednel-main | threednel/bop/bop_surfemb.py |
"""Module containing useful classes for interacting with data in BOP format."""
# Copyright 2023 DeepMind Technologies Limited
# Copyright 2023 Massachusetts Institute of Technology (M.I.T.)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import json
import os
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional, Sequence
import imageio
import jax
import numpy as np
import tensorflow as tf
import trimesh
from threednel.renderer.camera import CameraIntrinsics
from threednel.renderer.parallel import ParallelRenderer
from threednel.third_party.ip_basic import fill_in_multiscale
from threednel.third_party.surfemb.utils import get_bbox_rgb_crop_K_crop
def depth_to_coords_in_camera(
depth: np.ndarray,
intrinsics: np.ndarray,
mask: Optional[np.ndarray] = None,
as_image_shape: bool = False,
) -> tuple[np.ndarray, np.ndarray]:
"""Convert depth image to coords in camera space for points in mask.
Args:
depth: Array of shape (H, W).
intrinsics: Array of shape (3, 3), camera intrinsic matrix.
mask: Array of shape (H, W), with 1s where points are quried.
as_image_shape: If True, return arrays of shape (H, W, 3)
Returns:
np.ndarray: Array of shape (N, 3) or (H, W, 3), coordinates in camera
space.
np.ndarray: Array of shape (N, 2) or (H, W, 2), coordinates on image
plane.
N is the number of 1s in mask.
"""
if as_image_shape:
assert mask is None
vu = np.mgrid[: depth.shape[0], : depth.shape[1]]
else:
if mask is None:
mask = np.ones_like(depth)
else:
assert not as_image_shape
assert mask.shape == depth.shape
vu = np.nonzero(mask)
depth_for_uv = depth[vu[0], vu[1]]
full_vec = np.stack(
[vu[1] * depth_for_uv, vu[0] * depth_for_uv, depth_for_uv], axis=0
)
coords_in_camera = np.moveaxis(
np.einsum('ij,j...->i...', np.linalg.inv(intrinsics), full_vec), 0, -1
)
coords_on_image = np.moveaxis(vu, 0, -1)
return coords_in_camera, coords_on_image
@dataclass
class RGBDImage:
"""Generic container class for an RGB-D image.
Attributes:
rgb: The RGB image.
depth: The depth map in milimeters.
intrinsics: The 3x3 camera intrinsics matrix.
bop_obj_indices: Array containing the indices of the objects in the scene
fill_in_depth: Whether we fill in missing depth values.
max_depth: Maximum depth used to fill in missing depth values.
annotations: Optional annotations for each object in the scene.
"""
rgb: np.ndarray
depth: np.ndarray
intrinsics: np.ndarray
bop_obj_indices: np.ndarray
fill_in_depth: bool = False
max_depth: float = np.inf
annotations: Optional[Sequence] = None
def __post_init__(self):
self.depth[self.depth > self.max_depth] = 0.0
if self.fill_in_depth:
assert self.max_depth < np.inf
self.depth, _ = fill_in_multiscale(self.depth, max_depth=self.max_depth)
if self.annotations is None:
self.annotations = [{} for _ in self.bop_obj_indices]
def unproject(self) -> np.ndarray:
"""Unproject pixels in the RGB-D image into 3D space (in camera frame)."""
data_xyz, _ = depth_to_coords_in_camera(
self.depth, self.intrinsics, as_image_shape=True
)
return data_xyz
def scale(self, scale_factor: float) -> RGBDImage:
"""Scale the RGB-D image by the given scale factor."""
camera_intrinsics = CameraIntrinsics.from_matrix(
shape=self.depth.shape, intrinsics=self.intrinsics
).scale(scale_factor)
shape = (camera_intrinsics.height, camera_intrinsics.width)
return RGBDImage(
rgb=np.round(
jax.image.resize(self.rgb, shape=shape + (3,), method='bilinear')
).astype(int),
depth=np.array(jax.image.resize(self.depth, shape=shape, method='nearest')),
intrinsics=camera_intrinsics.intrinsics_matrix,
bop_obj_indices=self.bop_obj_indices,
fill_in_depth=self.fill_in_depth,
max_depth=self.max_depth,
)
def get_renderer(
self, data_directory: str, scale_factor: float = 1.0
) -> ParallelRenderer:
"""Construct a renderer that can render a set of 3D scene descriptions in parallel."""
height, width = self.depth.shape
gl_renderer = ParallelRenderer(
height=height,
width=width,
intrinsics=self.intrinsics,
scale_factor=scale_factor,
)
for bop_obj_idx in self.bop_obj_indices:
gl_renderer.add_trimesh(
trimesh.load(
os.path.join(data_directory, f'bop/ycbv/models/obj_{bop_obj_idx:06d}.ply')
),
mesh_name=bop_obj_idx,
)
return gl_renderer
@dataclass
class BOPTestImage:
"""Class for interacting with test images from the BOP dataset.
Attributes:
dataset: Name of the dataset.
scene_id: ID of the scene in the BOP dataset.
img_id: ID of the image for the scene in the BOP dataset.
rgb: Array of shape (H, W, 3).
depth: Array of shape (H, W). In milimeters.
intrinsics: Array of shape (3, 3). Camera intrinsics matrix.
camera_pose: Array of shape (4, 4). Transform matrix from world frame to
camera frame.
bop_obj_indices: BOP indices of the objects in the image. Ranges from 1 to
21.
annotations: Annotations for each object in the scene, including object
pose information and optionally 2D detection results.
default_scales: default scales at which we obtain query embeddings for
each object.
"""
dataset: str
scene_id: int
img_id: int
rgb: np.ndarray
depth: np.ndarray
intrinsics: np.ndarray
camera_pose: np.ndarray
bop_obj_indices: tuple[int, ...]
annotations: Sequence
default_scales: np.ndarray
def __post_init__(self):
self.obj_id, self.inst_count = np.unique(self.bop_obj_indices, return_counts=True)
assert np.all(np.repeat(self.obj_id, self.inst_count) == self.bop_obj_indices)
def get_gt_poses(self) -> List[np.ndarray]:
"""Function to get ground-truth object poses for the objects in the scene."""
gt_poses = [annotation['model_to_cam'].copy() for annotation in self.annotations]
return gt_poses
@dataclass
class BOPTestScene:
"""Class for interacting with scenes from the BOP dataset."""
scene_path: str
load_detector_crops: bool = False
def __post_init__(self):
self.data_directory = str(Path(self.scene_path).parents[3])
if self.data_directory.startswith('gs:/'):
self.data_directory = self.data_directory[:4] + '/' + self.data_directory[4:]
self.scene_id = int(os.path.basename(self.scene_path))
with open(os.path.join(self.scene_path, 'scene_camera.json'), 'r') as f:
self.scene_camera = json.load(f)
with open(os.path.join(self.scene_path, 'scene_gt.json'), 'r') as f:
self.scene_gt = json.load(f)
with open(os.path.join(self.scene_path, 'scene_gt_info.json'), 'r') as f:
self.scene_gt_info = json.load(f)
self.img_indices = [int(img_id) for img_id in self.scene_camera.keys()]
with open(
os.path.join(
self.data_directory,
'bop',
'ycbv',
'camera_uw.json',
),
'r',
) as f:
self.camera_info = json.load(f)
with open(
os.path.join(
self.data_directory,
'bop',
'ycbv',
'models',
'models_info.json',
),
'r',
) as f:
models_info = json.load(f)
self.models_info = {
int(bop_obj_idx): models_info[bop_obj_idx] for bop_obj_idx in models_info
}
self.default_scales = {
bop_obj_idx: (1.5 if self.models_info[bop_obj_idx]['diameter'] > 70 else 3.0)
for bop_obj_idx in self.models_info
}
if self.load_detector_crops:
detection_folder = os.path.join(self.data_directory, 'detection_results', 'ycbv')
with tf.io.gfile.GFile(os.path.join(detection_folder, 'bboxes.npy'), 'rb') as f:
self.bboxes = np.load(f)
with tf.io.gfile.GFile(os.path.join(detection_folder, 'obj_ids.npy'), 'rb') as f:
self.obj_ids = np.load(f)
with tf.io.gfile.GFile(
os.path.join(detection_folder, 'scene_ids.npy'), 'rb'
) as f:
self.scene_ids = np.load(f)
with tf.io.gfile.GFile(os.path.join(detection_folder, 'view_ids.npy'), 'rb') as f:
self.view_ids = np.load(f)
def __getitem__(self, img_id):
img_id = str(img_id)
img_fname = f'{int(img_id):06d}.png'
rgb = imageio.imread(os.path.join(self.scene_path, 'rgb', img_fname))
depth = (
imageio.imread(os.path.join(self.scene_path, 'depth', img_fname))
* self.camera_info['depth_scale']
)
intrinsics = np.array(self.scene_camera[img_id]['cam_K']).reshape((3, 3))
cam_pose = np.eye(4)
cam_pose[:3, :3] = np.array(
self.scene_camera[img_id].get('cam_R_w2c', np.eye(3).ravel())
).reshape((3, 3))
cam_pose[:3, -1] = np.array(self.scene_camera[img_id].get('cam_t_w2c', np.zeros(3)))
annotations = []
bop_obj_indices = []
for instance_idx, instance in enumerate(self.scene_gt[img_id]):
model_to_cam = np.eye(4)
model_to_cam[:3, :3] = np.array(instance['cam_R_m2c']).reshape((3, 3))
model_to_cam[:3, -1] = np.array(instance['cam_t_m2c'])
mask = imageio.imread(
os.path.join(
self.scene_path,
'mask',
f'{int(img_id):06d}_{instance_idx:06d}.png',
)
)
mask_visible = imageio.imread(
os.path.join(
self.scene_path,
'mask_visib',
f'{int(img_id):06d}_{instance_idx:06d}.png',
)
)
annotation = {
'model_to_cam': model_to_cam,
'mask': mask,
'mask_visible': mask_visible,
}
annotation.update(self.scene_gt_info[img_id][instance_idx])
bop_obj_indices.append(instance['obj_id'])
if self.load_detector_crops:
detection_crop_idx = np.argwhere(
(self.scene_ids == self.scene_id)
* (self.view_ids == int(img_id))
* (self.obj_ids == instance['obj_id'])
)
detector_crops = []
for detection_crop_idx in detection_crop_idx.ravel():
bbox = self.bboxes[detection_crop_idx]
(
crop_bbox_in_original,
rgb_crop,
depth_crop,
K_crop,
) = get_bbox_rgb_crop_K_crop(rgb, depth, bbox, K=intrinsics)
left, top, right, bottom = crop_bbox_in_original
detector_crops.append(
dict(
AABB_crop=crop_bbox_in_original,
rgb_crop=rgb_crop,
depth_crop=depth_crop,
K_crop=K_crop,
)
)
annotation['detector_crops'] = detector_crops
annotations.append(annotation)
return BOPTestImage(
dataset='ycbv',
scene_id=self.scene_id,
img_id=int(img_id),
rgb=rgb,
depth=depth,
intrinsics=intrinsics,
camera_pose=cam_pose,
bop_obj_indices=tuple(bop_obj_indices),
annotations=annotations,
default_scales=np.array(
[self.default_scales[bop_obj_idx] for bop_obj_idx in bop_obj_indices]
),
)
@property
def images(self):
for img_id in self.img_indices:
yield self.__getitem__(img_id)
@dataclass
class BOPTestDataset:
"""Class for interacting with a BOP dataset."""
data_directory: str
load_detector_crops: bool = False
def __post_init__(self):
with open(
os.path.join(self.data_directory, 'bop', 'ycbv', 'test_targets_bop19.json'),
'r',
) as f:
test_targets = json.load(f)
outer_treedef = jax.tree_util.tree_structure([0] * len(test_targets))
inner_treedef = jax.tree_util.tree_structure(test_targets[0])
test_targets = jax.tree_util.tree_transpose(
outer_treedef, inner_treedef, test_targets
)
self.test_targets = {key: np.array(test_targets[key]) for key in test_targets}
with open(
os.path.join(
self.data_directory,
'bop',
'ycbv',
'camera_uw.json',
),
'r',
) as f:
self.camera_info = json.load(f)
def __getitem__(self, scene_id: int):
assert np.sum(self.test_targets['scene_id'] == scene_id) > 0
scene_path = os.path.join(
self.data_directory,
f'bop/ycbv/test/{scene_id:06d}',
)
return BOPTestScene(scene_path, load_detector_crops=self.load_detector_crops)
| threednel-main | threednel/bop/data.py |
# MIT License
#
# Copyright (c) 2018 Jason Ku
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Originally https://github.com/kujason/ip_basic/blob/master/ip_basic/depth_map_utils.py
import collections
import cv2
import numpy as np
# Full kernels
FULL_KERNEL_3 = np.ones((3, 3), np.uint8)
FULL_KERNEL_5 = np.ones((5, 5), np.uint8)
FULL_KERNEL_7 = np.ones((7, 7), np.uint8)
FULL_KERNEL_9 = np.ones((9, 9), np.uint8)
FULL_KERNEL_31 = np.ones((31, 31), np.uint8)
# 3x3 cross kernel
CROSS_KERNEL_3 = np.asarray(
[
[0, 1, 0],
[1, 1, 1],
[0, 1, 0],
],
dtype=np.uint8,
)
# 5x5 cross kernel
CROSS_KERNEL_5 = np.asarray(
[
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
],
dtype=np.uint8,
)
# 5x5 diamond kernel
DIAMOND_KERNEL_5 = np.array(
[
[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0],
],
dtype=np.uint8,
)
# 7x7 cross kernel
CROSS_KERNEL_7 = np.asarray(
[
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
],
dtype=np.uint8,
)
# 7x7 diamond kernel
DIAMOND_KERNEL_7 = np.asarray(
[
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
],
dtype=np.uint8,
)
def fill_in_fast(
depth_map,
max_depth=100.0,
custom_kernel=DIAMOND_KERNEL_5,
extrapolate=False,
blur_type="bilateral",
):
"""Fast, in-place depth completion.
Args:
depth_map: projected depths
max_depth: max depth value for inversion
custom_kernel: kernel to apply initial dilation
extrapolate: whether to extrapolate by extending depths to top of the
frame, and applying a 31x31 full kernel dilation
blur_type: 'bilateral' - preserves local structure (recommended)
'gaussian' - provides lower RMSE
Returns:
depth_map: dense depth map
"""
# Invert
valid_pixels = depth_map > 0.1
depth_map[valid_pixels] = max_depth - depth_map[valid_pixels]
# Dilate
depth_map = cv2.dilate(depth_map, custom_kernel)
# Hole closing
depth_map = cv2.morphologyEx(depth_map, cv2.MORPH_CLOSE, FULL_KERNEL_5)
# Fill empty spaces with dilated values
empty_pixels = depth_map < 0.1
dilated = cv2.dilate(depth_map, FULL_KERNEL_7)
depth_map[empty_pixels] = dilated[empty_pixels]
# Extend highest pixel to top of image
if extrapolate:
top_row_pixels = np.argmax(depth_map > 0.1, axis=0)
top_pixel_values = depth_map[top_row_pixels, range(depth_map.shape[1])]
for pixel_col_idx in range(depth_map.shape[1]):
depth_map[0 : top_row_pixels[pixel_col_idx], pixel_col_idx] = (
top_pixel_values[pixel_col_idx]
)
# Large Fill
empty_pixels = depth_map < 0.1
dilated = cv2.dilate(depth_map, FULL_KERNEL_31)
depth_map[empty_pixels] = dilated[empty_pixels]
# Median blur
depth_map = cv2.medianBlur(depth_map, 5)
# Bilateral or Gaussian blur
if blur_type == "bilateral":
# Bilateral blur
depth_map = cv2.bilateralFilter(depth_map, 5, 1.5, 2.0)
elif blur_type == "gaussian":
# Gaussian blur
valid_pixels = depth_map > 0.1
blurred = cv2.GaussianBlur(depth_map, (5, 5), 0)
depth_map[valid_pixels] = blurred[valid_pixels]
# Invert
valid_pixels = depth_map > 0.1
depth_map[valid_pixels] = max_depth - depth_map[valid_pixels]
return depth_map
def fill_in_multiscale(
depth_map,
max_depth=100.0,
dilation_kernel_far=CROSS_KERNEL_3,
dilation_kernel_med=CROSS_KERNEL_5,
dilation_kernel_near=CROSS_KERNEL_7,
extrapolate=False,
blur_type="bilateral",
show_process=False,
):
"""Slower, multi-scale dilation version with additional noise removal that
provides better qualitative results.
Args:
depth_map: projected depths
max_depth: max depth value for inversion
dilation_kernel_far: dilation kernel to use for 30.0 < depths < 80.0 m
dilation_kernel_med: dilation kernel to use for 15.0 < depths < 30.0 m
dilation_kernel_near: dilation kernel to use for 0.1 < depths < 15.0 m
extrapolate:whether to extrapolate by extending depths to top of the
frame, and applying a 31x31 full kernel dilation
blur_type: 'gaussian' - provides lower RMSE 'bilateral' - preserves local
structure (recommended)
show_process: saves process images into an OrderedDict
Returns:
depth_map: dense depth map
process_dict: OrderedDict of process images
"""
# Convert to float32
depths_in = np.float32(depth_map)
# Calculate bin masks before inversion
valid_pixels_near = (depths_in > 0.1) & (depths_in <= 15.0)
valid_pixels_med = (depths_in > 15.0) & (depths_in <= 30.0)
valid_pixels_far = depths_in > 30.0
# Invert (and offset)
s1_inverted_depths = np.copy(depths_in)
valid_pixels = s1_inverted_depths > 0.1
s1_inverted_depths[valid_pixels] = (
max_depth - s1_inverted_depths[valid_pixels]
)
# Multi-scale dilation
dilated_far = cv2.dilate(
np.multiply(s1_inverted_depths, valid_pixels_far), dilation_kernel_far
)
dilated_med = cv2.dilate(
np.multiply(s1_inverted_depths, valid_pixels_med), dilation_kernel_med
)
dilated_near = cv2.dilate(
np.multiply(s1_inverted_depths, valid_pixels_near), dilation_kernel_near
)
# Find valid pixels for each binned dilation
valid_pixels_near = dilated_near > 0.1
valid_pixels_med = dilated_med > 0.1
valid_pixels_far = dilated_far > 0.1
# Combine dilated versions, starting farthest to nearest
s2_dilated_depths = np.copy(s1_inverted_depths)
s2_dilated_depths[valid_pixels_far] = dilated_far[valid_pixels_far]
s2_dilated_depths[valid_pixels_med] = dilated_med[valid_pixels_med]
s2_dilated_depths[valid_pixels_near] = dilated_near[valid_pixels_near]
# Small hole closure
s3_closed_depths = cv2.morphologyEx(
s2_dilated_depths, cv2.MORPH_CLOSE, FULL_KERNEL_5
)
# Median blur to remove outliers
s4_blurred_depths = np.copy(s3_closed_depths)
blurred = cv2.medianBlur(s3_closed_depths, 5)
valid_pixels = s3_closed_depths > 0.1
s4_blurred_depths[valid_pixels] = blurred[valid_pixels]
# Calculate a top mask
top_mask = np.ones(depths_in.shape, dtype=bool)
for pixel_col_idx in range(s4_blurred_depths.shape[1]):
pixel_col = s4_blurred_depths[:, pixel_col_idx]
top_pixel_row = np.argmax(pixel_col > 0.1)
top_mask[0:top_pixel_row, pixel_col_idx] = False
# Get empty mask
valid_pixels = s4_blurred_depths > 0.1
empty_pixels = ~valid_pixels & top_mask
# Hole fill
dilated = cv2.dilate(s4_blurred_depths, FULL_KERNEL_9)
s5_dilated_depths = np.copy(s4_blurred_depths)
s5_dilated_depths[empty_pixels] = dilated[empty_pixels]
# Extend highest pixel to top of image or create top mask
s6_extended_depths = np.copy(s5_dilated_depths)
top_mask = np.ones(s5_dilated_depths.shape, dtype=bool)
top_row_pixels = np.argmax(s5_dilated_depths > 0.1, axis=0)
top_pixel_values = s5_dilated_depths[
top_row_pixels, range(s5_dilated_depths.shape[1])
]
for pixel_col_idx in range(s5_dilated_depths.shape[1]):
if extrapolate:
s6_extended_depths[0 : top_row_pixels[pixel_col_idx], pixel_col_idx] = (
top_pixel_values[pixel_col_idx]
)
else:
# Create top mask
top_mask[0 : top_row_pixels[pixel_col_idx], pixel_col_idx] = False
# Fill large holes with masked dilations
s7_blurred_depths = np.copy(s6_extended_depths)
for i in range(6):
empty_pixels = (s7_blurred_depths < 0.1) & top_mask
dilated = cv2.dilate(s7_blurred_depths, FULL_KERNEL_5)
s7_blurred_depths[empty_pixels] = dilated[empty_pixels]
# Median blur
blurred = cv2.medianBlur(s7_blurred_depths, 5)
valid_pixels = (s7_blurred_depths > 0.1) & top_mask
s7_blurred_depths[valid_pixels] = blurred[valid_pixels]
if blur_type == "gaussian":
# Gaussian blur
blurred = cv2.GaussianBlur(s7_blurred_depths, (5, 5), 0)
valid_pixels = (s7_blurred_depths > 0.1) & top_mask
s7_blurred_depths[valid_pixels] = blurred[valid_pixels]
elif blur_type == "bilateral":
# Bilateral blur
blurred = cv2.bilateralFilter(s7_blurred_depths, 5, 0.5, 2.0)
s7_blurred_depths[valid_pixels] = blurred[valid_pixels]
# Invert (and offset)
s8_inverted_depths = np.copy(s7_blurred_depths)
valid_pixels = np.where(s8_inverted_depths > 0.1)
s8_inverted_depths[valid_pixels] = (
max_depth - s8_inverted_depths[valid_pixels]
)
depths_out = s8_inverted_depths
process_dict = None
if show_process:
process_dict = collections.OrderedDict()
process_dict["s0_depths_in"] = depths_in
process_dict["s1_inverted_depths"] = s1_inverted_depths
process_dict["s2_dilated_depths"] = s2_dilated_depths
process_dict["s3_closed_depths"] = s3_closed_depths
process_dict["s4_blurred_depths"] = s4_blurred_depths
process_dict["s5_combined_depths"] = s5_dilated_depths
process_dict["s6_extended_depths"] = s6_extended_depths
process_dict["s7_blurred_depths"] = s7_blurred_depths
process_dict["s8_inverted_depths"] = s8_inverted_depths
process_dict["s9_depths_out"] = depths_out
return depths_out, process_dict
| threednel-main | threednel/third_party/ip_basic.py |
# MIT License
#
# Copyright (c) 2018 Naoto Usuyama
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Initially from https://github.com/usuyama/pytorch-unet (MIT License)
# Architecture slightly changed (removed some expensive high-res convolutions)
# and extended to allow multiple decoders
import torch
from torch import nn
import torchvision
def convrelu(in_channels, out_channels, kernel, padding):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel, padding=padding),
nn.ReLU(inplace=True),
)
class ResNetUNet(nn.Module):
def __init__(self, n_class, feat_preultimate=64, n_decoders=1):
super().__init__()
# shared encoder
self.base_model = torchvision.models.resnet18(pretrained=True)
self.base_layers = list(self.base_model.children())
self.layer0 = nn.Sequential(
*self.base_layers[:3]
) # size=(N, 64, x.H/2, x.W/2)
self.layer1 = nn.Sequential(
*self.base_layers[3:5]
) # size=(N, 64, x.H/4, x.W/4)
self.layer2 = self.base_layers[5] # size=(N, 128, x.H/8, x.W/8)
self.layer3 = self.base_layers[6] # size=(N, 256, x.H/16, x.W/16)
self.layer4 = self.base_layers[7] # size=(N, 512, x.H/32, x.W/32)
# n_decoders
self.upsample = nn.Upsample(
scale_factor=2, mode='bilinear', align_corners=False
)
self.decoders = [
dict(
layer0_1x1=convrelu(64, 64, 1, 0),
layer1_1x1=convrelu(64, 64, 1, 0),
layer2_1x1=convrelu(128, 128, 1, 0),
layer3_1x1=convrelu(256, 256, 1, 0),
layer4_1x1=convrelu(512, 512, 1, 0),
conv_up3=convrelu(256 + 512, 512, 3, 1),
conv_up2=convrelu(128 + 512, 256, 3, 1),
conv_up1=convrelu(64 + 256, 256, 3, 1),
conv_up0=convrelu(64 + 256, 128, 3, 1),
conv_original_size=convrelu(128, feat_preultimate, 3, 1),
conv_last=nn.Conv2d(feat_preultimate, n_class, 1),
)
for _ in range(n_decoders)
]
# register decoder modules
for i, decoder in enumerate(self.decoders):
for key, val in decoder.items():
setattr(self, f'decoder{i}_{key}', val)
def forward(self, input, decoder_idx=None):
if decoder_idx is None:
assert len(self.decoders) == 1
decoder_idx = [0]
else:
assert len(decoder_idx) == 1 or len(decoder_idx) == len(input)
# encoder
layer0 = self.layer0(input)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
layers = [layer0, layer1, layer2, layer3, layer4]
# decoders
out = []
for i, dec_idx in enumerate(decoder_idx):
decoder = self.decoders[dec_idx]
batch_slice = slice(None) if len(decoder_idx) == 1 else slice(i, i + 1)
x = decoder['layer4_1x1'](layer4[batch_slice])
x = self.upsample(x)
for layer_idx in 3, 2, 1, 0:
layer_slice = layers[layer_idx][batch_slice]
layer_projection = decoder[f'layer{layer_idx}_1x1'](layer_slice)
x = torch.cat([x, layer_projection], dim=1)
x = decoder[f'conv_up{layer_idx}'](x)
x = self.upsample(x)
x = decoder['conv_original_size'](x)
out.append(decoder['conv_last'](x))
if len(decoder_idx) == 1:
# out: 1 x (B, C, H, W)
return out[0]
else:
# out: B x (1, C, H, W)
return torch.stack(out)[:, 0]
| threednel-main | threednel/third_party/surfemb/unet.py |
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| threednel-main | threednel/third_party/surfemb/__init__.py |
# Copyright 2023 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
import time
from typing import Sequence
import cv2
import numpy as np
from threednel.third_party.surfemb.surface_embedding import SurfaceEmbeddingModel
import torch
@contextmanager
def timer(text='', do=True):
if do:
start = time.time()
try:
yield
finally:
print(f'{text}: {time.time() - start:.4}s')
else:
yield
def get_key_embeddings_visualizations(
key_embeddings: np.ndarray,
obj_ids: np.ndarray,
all_key_embeddings: Sequence[np.ndarray],
model: SurfaceEmbeddingModel,
) -> np.ndarray:
"""get_key_embeddings_visualizations.
Args:
key_embeddings (np.ndarray): Array of shape (H, W, emb_dim) Descriptors
for different model
obj_ids (np.ndarray): Array of shape (H, W) Object ids within the scene.
Ranges from -1 (background) to n_objs - 1
all_key_embeddings (Sequence[np.ndarray]): all_key_embeddings Sequence of
length n_objs. Descriptors associated with the different points sampled
from the object surface.
model (SurfaceEmbeddingModel): model
Returns:
Array of shape (H, W, 3). Visualization of the model descriptors.
"""
key_embeddings_visualizations = np.zeros(key_embeddings.shape[:-1] + (3,))
for ii, key_embeddings in enumerate(all_key_embeddings):
key_embeddings_visualizations[obj_ids == ii] = (
model.get_emb_vis(
torch.from_numpy(key_embeddings[obj_ids == ii]),
demean=torch.from_numpy(key_embeddings).mean(dim=0),
)
.cpu()
.numpy()
)
return key_embeddings_visualizations
def get_bbox_rgb_crop_K_crop(
rgb: np.ndarray,
depth: np.ndarray,
bbox: np.ndarray,
K: np.ndarray,
crop_scale: float = 1.2,
crop_res: int = 224,
):
"""Adapted from https://github.com/rasmushaugaard/surfemb/blob/53e1852433a3b2b84fedc7a3a01674fe1b6189cc/surfemb/data/std_auxs.py#L60
Args:
rgb (np.ndarray): Full RGB image
bbox (np.ndarray): Array of shape (4,). Bounding box of the detector crop.
4 elements are left, top, right, bottom
crop_scale (float): crop_scale
crop_res (int): crop_res
Returns:
crop_bbox_in_original: Tuple of length 4. Bounding box in the original
full RGB image.
4 elements are left, top, right, bottom
rgb_crop: Array of shape (crop_res, crop_res). Cropped RGB image
"""
R = np.eye(2)
left, top, right, bottom = bbox
cy, cx = (top + bottom) / 2, (left + right) / 2
size = crop_res / max(bottom - top, right - left) / crop_scale
r = crop_res
M = np.concatenate((R, [[-cx], [-cy]]), axis=1) * size
M[:, 2] += r / 2
Ms = np.concatenate((M, [[0, 0, 1]]))
# calculate axis aligned bounding box in the original image of the rotated crop
crop_corners = np.array(((0, 0, 1), (0, r, 1), (r, 0, 1), (r, r, 1))) - (
0.5,
0.5,
0,
) # (4, 3)
crop_corners = np.linalg.inv(Ms) @ crop_corners.T # (3, 4)
crop_corners = crop_corners[:2] / crop_corners[2:] # (2, 4)
left, top = np.floor(crop_corners.min(axis=1)).astype(int)
right, bottom = np.ceil(crop_corners.max(axis=1)).astype(int) + 1
crop_bbox_in_original = left, top, right, bottom
rgb_crop = cv2.warpAffine(rgb, M, (r, r), flags=1)
depth_crop = cv2.warpAffine(depth, M, (r, r), flags=0)
K_crop = Ms @ K
return crop_bbox_in_original, rgb_crop, depth_crop, K_crop
| threednel-main | threednel/third_party/surfemb/utils.py |
# MIT License
# Copyright (c) 2022 Rasmus Laurvig Haugaard
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Adapted from https://github.com/rasmushaugaard/surfemb/blob/master/surfemb/surface_embedding.py
# Updated object indices to range from 1 to 21 instead of from 0 to 20
# Deleted a few unused functions
from typing import Sequence, Union
import numpy as np
import pytorch_lightning as pl
import torch
from .siren import Siren
from .unet import ResNetUNet
mlp_class_dict = dict(siren=Siren)
imagenet_stats = (0.485, 0.456, 0.406), (0.229, 0.224, 0.225)
def tfms_normalize(img: np.ndarray): # (h, w, 3) -> (3, h, w)
mu, std = imagenet_stats
if img.dtype == np.uint8:
img = img / 255
img = (img - mu) / std
return img.transpose(2, 0, 1).astype(np.float32)
def tfms_denormalize(img: Union[np.ndarray, torch.Tensor]):
mu, std = imagenet_stats
if isinstance(img, torch.Tensor):
mu, std = [
torch.Tensor(v).type(img.dtype).to(img.device)[:, None, None]
for v in (mu, std)
]
return img * std + mu
class SurfaceEmbeddingModel(pl.LightningModule):
def __init__(
self,
n_objs: int,
emb_dim=12,
n_pos=1024,
n_neg=1024,
lr_cnn=3e-4,
lr_mlp=3e-5,
mlp_name='siren',
mlp_hidden_features=256,
mlp_hidden_layers=2,
key_noise=1e-3,
warmup_steps=2000,
separate_decoders=True,
**kwargs,
):
""":param emb_dim: number of embedding dimensions
:param n_pos: number of positive (q, k) pairs from the object mask
:param n_neg: number of negative keys, k-, from the object surface
"""
super().__init__()
self.save_hyperparameters()
self.n_objs, self.emb_dim = n_objs, emb_dim
self.n_pos, self.n_neg = n_pos, n_neg
self.lr_cnn, self.lr_mlp = lr_cnn, lr_mlp
self.warmup_steps = warmup_steps
self.key_noise = key_noise
self.separate_decoders = separate_decoders
# query model
self.cnn = ResNetUNet(
n_class=(emb_dim + 1) if separate_decoders else n_objs * (emb_dim + 1),
n_decoders=n_objs if separate_decoders else 1,
)
# key models
mlp_class = mlp_class_dict[mlp_name]
mlp_args = dict(
in_features=3,
out_features=emb_dim,
hidden_features=mlp_hidden_features,
hidden_layers=mlp_hidden_layers,
)
self.mlps = torch.nn.Sequential(
*[mlp_class(**mlp_args) for _ in range(n_objs)]
)
self.bop_obj_indices_to_obj_indices = None
def init_bop_obj_indices_to_obj_indices(self, bop_obj_indices: Sequence[int]):
assert self.bop_obj_indices_to_obj_indices is None
self.bop_obj_indices_to_obj_indices = {}
for obj_idx, bop_obj_idx in enumerate(sorted(bop_obj_indices)):
self.bop_obj_indices_to_obj_indices[bop_obj_idx] = obj_idx
@torch.no_grad()
def infer_cnn(
self,
img: Union[np.ndarray, torch.Tensor],
bop_obj_idx: int,
rotation_ensemble=True,
):
assert not self.training
assert self.bop_obj_indices_to_obj_indices is not None
obj_idx = self.bop_obj_indices_to_obj_indices[bop_obj_idx]
if isinstance(img, np.ndarray):
if img.dtype == np.uint8:
img = tfms_normalize(img)
img = torch.from_numpy(img).to(self.device)
_, h, w = img.shape
if rotation_ensemble:
img = utils.rotate_batch(img) # (4, 3, h, h)
else:
img = img[None] # (1, 3, h, w)
cnn_out = self.cnn(
img, [obj_idx] * len(img) if self.separate_decoders else None
)
if not self.separate_decoders:
channel_idxs = [obj_idx] + list(
self.n_objs + obj_idx * self.emb_dim + np.arange(self.emb_dim)
)
cnn_out = cnn_out[:, channel_idxs]
# cnn_out: (B, 1+emb_dim, h, w)
if rotation_ensemble:
cnn_out = utils.rotate_batch_back(cnn_out).mean(dim=0)
else:
cnn_out = cnn_out[0]
mask_lgts, query_img = cnn_out[0], cnn_out[1:]
query_img = query_img.permute(1, 2, 0) # (h, w, emb_dim)
return mask_lgts, query_img
@torch.no_grad()
def infer_mlp(
self, pts_norm: Union[np.ndarray, torch.Tensor], bop_obj_idx: int
):
assert not self.training
assert self.bop_obj_indices_to_obj_indices is not None
obj_idx = self.bop_obj_indices_to_obj_indices[bop_obj_idx]
if isinstance(pts_norm, np.ndarray):
pts_norm = torch.from_numpy(pts_norm).to(self.device).float()
return self.mlps[obj_idx](pts_norm) # (..., emb_dim)
def get_emb_vis(
self,
emb_img: torch.Tensor,
mask: torch.Tensor = None,
demean: torch.tensor = False,
):
if demean is True:
demean = emb_img[mask].view(-1, self.emb_dim).mean(dim=0)
if demean is not False:
emb_img = emb_img - demean
shape = emb_img.shape[:-1]
emb_img = emb_img.view(*shape, 3, -1).mean(dim=-1)
if mask is not None:
emb_img[~mask] = 0.0
emb_img /= torch.abs(emb_img).max() + 1e-9
emb_img.mul_(0.5).add_(0.5)
return emb_img
| threednel-main | threednel/third_party/surfemb/surface_embedding.py |
# MIT License
#
# Copyright (c) 2020 Vincent Sitzmann
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# From https://vsitzmann.github.io/siren/ (MIT License)
import numpy as np
import torch
from torch import nn
class SineLayer(nn.Module):
def __init__(
self, in_features, out_features, bias=True, is_first=False, omega_0=30.0
):
super().__init__()
self.omega_0 = omega_0
self.is_first = is_first
self.in_features = in_features
self.linear = nn.Linear(in_features, out_features, bias=bias)
self.init_weights()
def init_weights(self):
with torch.no_grad():
if self.is_first:
self.linear.weight.uniform_(-1 / self.in_features, 1 / self.in_features)
else:
self.linear.weight.uniform_(
-np.sqrt(6 / self.in_features) / self.omega_0,
np.sqrt(6 / self.in_features) / self.omega_0,
)
def forward(self, input):
return torch.sin(self.omega_0 * self.linear(input))
class Siren(nn.Module):
def __init__(
self,
in_features,
hidden_features,
hidden_layers,
out_features,
outermost_linear=True,
first_omega_0=30.0,
hidden_omega_0=30.0,
):
super().__init__()
self.net = []
self.net.append(
SineLayer(
in_features, hidden_features, is_first=True, omega_0=first_omega_0
)
)
for i in range(hidden_layers):
self.net.append(
SineLayer(
hidden_features,
hidden_features,
is_first=False,
omega_0=hidden_omega_0,
)
)
if outermost_linear:
final_linear = nn.Linear(hidden_features, out_features)
with torch.no_grad():
final_linear.weight.uniform_(
-np.sqrt(6 / hidden_features) / hidden_omega_0,
np.sqrt(6 / hidden_features) / hidden_omega_0,
)
self.net.append(final_linear)
else:
self.net.append(
SineLayer(
hidden_features,
out_features,
is_first=False,
omega_0=hidden_omega_0,
)
)
self.net = nn.Sequential(*self.net)
def forward(self, coords):
return self.net(coords)
| threednel-main | threednel/third_party/surfemb/siren.py |
# Copyright 2021 The dm_env Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""pytest configuration for dm_env."""
collect_ignore = [
'conftest.py',
'setup.py',
]
| dm_env-master | conftest.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.