python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utils."""
from dmvr import utils
import tensorflow as tf
class UtilsTest(tf.test.TestCase):
def test_combine_datasets(self):
ds_0 = tf.data.Dataset.from_tensor_slices({
'feature_0': [[[0] * 10] * 10] * 5,
'feature_1': [[0] * 10] * 5,
})
ds_1 = tf.data.Dataset.from_tensor_slices({
'feature_0': [[[1] * 10] * 10] * 5,
'feature_1': [[1] * 10] * 5,
})
ds_2 = tf.data.Dataset.from_tensor_slices({
'feature_0': [[[2] * 10] * 10] * 5,
'feature_1': [[2] * 10] * 5,
})
# Dataset uniformly sampling from all 3 datasets.
ds_uniform = utils.combine_datasets([ds_0, ds_1, ds_2], 7)
data_uniform = next(iter(ds_uniform))
# Dataset sampling from ds_1 and ds_2.
ds_no_1 = utils.combine_datasets([ds_0, ds_1, ds_2], 7, [0.5, 0, 0.5])
data_no_1 = next(iter(ds_no_1))
self.assertSetEqual(set(data_uniform.keys()),
set(['feature_0', 'feature_1']))
self.assertAllEqual(data_uniform['feature_0'].shape, (7, 10))
self.assertAllEqual(data_uniform['feature_1'].shape, (7,))
self.assertSetEqual(set(data_no_1.keys()),
set(['feature_0', 'feature_1']))
self.assertAllEqual(data_no_1['feature_0'].shape, (7, 10))
self.assertAllEqual(data_no_1['feature_1'].shape, (7,))
self.assertAllInSet(data_uniform['feature_0'], (0, 1, 2))
self.assertAllInSet(data_uniform['feature_1'], (0, 1, 2))
self.assertAllInSet(data_no_1['feature_0'], (0, 2))
self.assertAllInSet(data_no_1['feature_1'], (0, 2))
if __name__ == '__main__':
tf.test.main()
| dmvr-master | dmvr/utils_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for modalities."""
import os
from dmvr import builders
from dmvr import modalities
from dmvr import tokenizers
import numpy as np
from parameterized import parameterized
import tensorflow as tf
# Removed: Internal pyglib dependencies
_TESTDATA_DIR = os.path.join(os.path.dirname(__file__), 'testdata')
_SAMPLE_IMAGE_PATH = os.path.join(_TESTDATA_DIR, 'sample.jpeg')
_VOCAB_PATH = os.path.join(_TESTDATA_DIR, 'tokenizers', 'word_vocab.txt')
class ModalitiesTest(tf.test.TestCase):
def setUp(self):
super().setUp()
seq_example = tf.train.SequenceExample()
# Create stub frames and inject them in the SequenceExample.
with open(_SAMPLE_IMAGE_PATH, 'rb') as f: raw_image_bytes = f.read()
for _ in range(10 * 5):
seq_example.feature_lists.feature_list.get_or_create(
'image/encoded').feature.add().bytes_list.value[:] = [raw_image_bytes]
# Create stub flow and inject it in the SequenceExample.
for _ in range(10 * 5):
seq_example.feature_lists.feature_list.get_or_create(
'flow/encoded').feature.add().bytes_list.value[:] = [raw_image_bytes]
# Create stub label and inject it in the SequenceExample.
raw_label_index = 42
raw_label_name = b'label'
seq_example.context.feature.get_or_create(
'clip/label/index').int64_list.value[:] = [raw_label_index]
seq_example.context.feature.get_or_create(
'clip/label/string').bytes_list.value[:] = [raw_label_name]
# Create stub raw text and inject it in SequenceExample.
raw_text = b'hello world'
seq_example.context.feature.get_or_create(
'caption/string').bytes_list.value[:] = [raw_text, raw_text]
# Create stub audio and inject it in SequenceExample.
raw_audio = np.linspace(-1, 1, 48000 * 5)
seq_example.feature_lists.feature_list.get_or_create(
'WAVEFORM/feature/floats').feature.add().float_list.value[:] = raw_audio
serialized_seq_example = seq_example.SerializeToString()
self._seq_examples = [serialized_seq_example] * 8 # Batch size is 8.
# Create builders.
self._parser_builder = builders.SequenceExampleParserBuilder()
self._sampler_builder = builders.SamplerBuilder()
self._decoder_builder = builders.DecoderBuilder()
self._preprocessor_builder = builders.PreprocessorBuilder()
self._postprocessor_builder = builders.PostprocessorBuilder()
def _process_examples(self):
"""Process input examples simulating dataset object creation."""
def pre_batch_process(raw_seq_example):
output = self._parser_builder.build()(raw_seq_example)
output = self._sampler_builder.build()(output)
output = self._decoder_builder.build()(output)
output = self._preprocessor_builder.build()(output)
return output
# Batch and postprocess.
output = [pre_batch_process(rse) for rse in self._seq_examples]
batched_output = {}
for k in output[0].keys():
batched_output[k] = tf.stack([out[k] for out in output])
output = batched_output
output = self._postprocessor_builder.build()(output)
return output
@parameterized.expand((
(True, 1, False, True, ['image_random_sample'], [
'image_resize_smallest', 'image_random_crop', 'image_random_flip',
'image_normalize'
], []),
(True, 1, False, False, ['image_random_sample'],
['image_resize_smallest', 'image_random_crop', 'image_normalize'], []),
(False, 1, False, True, ['image_middle_sample'],
['image_resize_smallest', 'image_central_crop', 'image_normalize'], []),
(False, 2, False, True, ['image_linspace_sample'],
['image_resize_smallest', 'image_central_crop',
'image_normalize'], ['image_reshape']),
(True, 1, True, True, ['image_random_sample'], [
'image_normalize', 'image_resize_smallest', 'image_random_crop',
'image_random_flip', 'image_extract_flow_channels', 'image_clip_flow'
], []),
))
def test_add_image(self, is_training, num_test_clips, is_flow, random_flip,
sample_ops, preprocess_ops, postprocess_ops):
is_rgb = None if is_flow else True
zero_centering_image = is_flow
modalities.add_image(
self._parser_builder, # `parser_builder`
self._sampler_builder, # `sampler_builder`
self._decoder_builder, # `decoder_builder`
self._preprocessor_builder, # `preprocessor_builder`
self._postprocessor_builder, # `postprocessor_builder`
'image/encoded', # `input_feature_name`
'image', # `output_feature_name`
is_training, # `is_training`
32, # `num_frames`
1, # `stride`
num_test_clips, # `num_test_clips`
224, # `min_resize`
200, # `crop_size`
zero_centering_image, # `zero_centering_image`
True, # `sync_random_state`
is_rgb, # `is_rgb`
is_flow, # `is_flow`
random_flip) # `random_flip`
output = self._process_examples()
self.assertAllEqual(
[fd.fn_name for fd in self._sampler_builder.get_summary()], sample_ops)
self.assertAllEqual(
[fd.fn_name for fd in self._decoder_builder.get_summary()],
['image_decode_jpeg'])
self.assertAllEqual(
[fd.fn_name for fd in self._preprocessor_builder.get_summary()],
preprocess_ops)
self.assertAllEqual(
[fd.fn_name for fd in self._postprocessor_builder.get_summary()],
postprocess_ops)
# Assert static shape.
self.assertNotIn(None, output['image'].shape.as_list())
self.assertSetEqual(set(output.keys()), set(['image']))
num_output_channels = 2 if is_flow else 3
self.assertAllEqual(output['image'].shape,
(8 * num_test_clips, 32, 200, 200, num_output_channels))
@parameterized.expand(((False, False), (False, True), (True, True)))
def test_add_label(self, one_hot_label, add_label_name):
modalities.add_label(
self._parser_builder, # `parser_builder`
self._decoder_builder, # `decoder_builder`
self._preprocessor_builder, # `preprocessor_builder`
'clip/label/index', # `input_label_index_feature_name`
'label', # `output_label_index_feature_name`
'clip/label/string', # `input_label_name_feature_name`
'label_name', # `output_label_name_feature_name`
False, # `is_multi_label`
one_hot_label, # `one_hot_label`
50, # `num_classes`
add_label_name) # `add_label_name`
output = self._process_examples()
decoder_ops = ['label_sparse_to_dense']
if add_label_name:
decoder_ops.append('label_name_sparse_to_dense')
self.assertAllEqual(
[fd.fn_name for fd in self._decoder_builder.get_summary()],
decoder_ops)
if one_hot_label:
preprocess_ops = ['label_one_hot']
else:
preprocess_ops = ['label_set_shape']
if add_label_name:
preprocess_ops.append('label_name_set_shape')
self.assertAllEqual(
[fd.fn_name for fd in self._preprocessor_builder.get_summary()],
preprocess_ops)
# Assert static shape.
self.assertNotIn(None, output['label'].shape.as_list())
keys = set(['label'])
if add_label_name:
keys.add('label_name')
self.assertSetEqual(set(output.keys()), keys)
if one_hot_label:
self.assertAllEqual(output['label'], [[0] * 42 + [1] + [0] * 7] * 8)
else:
self.assertAllEqual(output['label'], [[42]] * 8)
if add_label_name:
self.assertAllEqual(output['label_name'], [[b'label']] * 8)
@parameterized.expand(((16,), (1,)))
def test_add_text(self, max_num_words):
tokenizer_model = tokenizers.WordTokenizer(
_VOCAB_PATH) # OSS: removed internal filename loading.
tokenizer_model.initialize()
modalities.add_text(
self._parser_builder, # `parser_builder`
self._decoder_builder, # `decoder_builder`
self._preprocessor_builder, # `preprocessor_builder`
tokenizer_model, # `tokenizer`
True, # `is_training`
'caption/string', # `input_feature_name`
builders.TEXT_FEATURE_NAME, # `output_raw_name`
builders.TEXT_INDICES_FEATURE_NAME, # `output_feature_name`
False, # `prepend_bos`
False, # `append_eos`
True, # `keep_raw_string`
2, # `max_num_captions`
max_num_words, # `max_num_words`
True) # `sync_random_state`
output = self._process_examples()
self.assertAllEqual(
[fd.fn_name for fd in self._decoder_builder.get_summary()],
['text_indices_sparse_to_dense'])
self.assertAllEqual(
[fd.fn_name for fd in self._preprocessor_builder.get_summary()],
['text_indices_sample_captions', 'text_indices_tokenization',
'text_indices_set_shape'])
# Assert static shape.
self.assertNotIn(
None, output[builders.TEXT_INDICES_FEATURE_NAME].shape.as_list())
self.assertSetEqual(set(output.keys()),
set([builders.TEXT_INDICES_FEATURE_NAME,
builders.TEXT_FEATURE_NAME]))
words = [4, 5][:min(2, max_num_words)]
padding = [0] * max(0, max_num_words - 2)
self.assertAllEqual(
output[builders.TEXT_INDICES_FEATURE_NAME],
[[words + padding, words + padding]] * 8)
@parameterized.expand((
(True, 1, ['audio_sparse_to_dense', 'audio_random_sample'], []),
(False, 1, ['audio_sparse_to_dense', 'audio_middle_sample'], []),
(False, 2, ['audio_sparse_to_dense', 'audio_linspace_sample'],
['audio_reshape'])))
def test_add_audio(self, is_training, num_test_clips, sample_ops,
postprocess_ops):
modalities.add_audio(
self._parser_builder, # `parser_builder`
self._sampler_builder, # `sampler_builder`
self._postprocessor_builder, # `postprocessor_builder`
'WAVEFORM/feature/floats', # `input_feature_name`
builders.AUDIO_FEATURE_NAME, # `output_feature_name`
is_training, # `is_training`
30720, # `num_samples`
1, # `stride`
num_test_clips) # `num_test_clips`
output = self._process_examples()
self.assertAllEqual(
[fd.fn_name for fd in self._sampler_builder.get_summary()],
sample_ops)
self.assertAllEqual(
[fd.fn_name for fd in self._postprocessor_builder.get_summary()],
postprocess_ops)
# Assert static shape.
self.assertNotIn(
None, output[builders.AUDIO_FEATURE_NAME].shape.as_list())
self.assertSetEqual(set(output.keys()),
set([builders.AUDIO_FEATURE_NAME]))
self.assertAllEqual(output[builders.AUDIO_FEATURE_NAME].shape,
(8 * num_test_clips, 30720))
def test_all_modalities(self):
# Add RGB image.
modalities.add_image(self._parser_builder, self._sampler_builder,
self._decoder_builder, self._preprocessor_builder,
self._postprocessor_builder)
# Add flow image. Note that in this test this will read from a RGB
# flow/encoded since we store flow on disk as RGB images where only the two
# first channels (RG) corresponds to the relevant horizontal and vertical
# displacement vector.
modalities.add_image(
self._parser_builder,
self._sampler_builder,
self._decoder_builder,
self._preprocessor_builder,
self._postprocessor_builder,
input_feature_name='flow/encoded',
output_feature_name=builders.FLOW_FEATURE_NAME,
is_rgb=None,
zero_centering_image=True,
is_flow=True)
modalities.add_label(
self._parser_builder,
self._decoder_builder,
self._preprocessor_builder,
num_classes=50)
tokenizer = tokenizers.WordTokenizer(
_VOCAB_PATH) # OSS: removed internal filename loading.
tokenizer.initialize()
modalities.add_text(
self._parser_builder,
self._decoder_builder,
self._preprocessor_builder,
tokenizer=tokenizer)
modalities.add_audio(self._parser_builder, self._sampler_builder,
self._postprocessor_builder)
output = self._process_examples()
self.assertSetEqual(
set(output.keys()),
set([
builders.IMAGE_FEATURE_NAME, builders.FLOW_FEATURE_NAME,
builders.LABEL_INDEX_FEATURE_NAME,
builders.TEXT_INDICES_FEATURE_NAME, builders.AUDIO_FEATURE_NAME
]))
self.assertAllEqual(output[builders.IMAGE_FEATURE_NAME].shape,
(8, 32, 200, 200, 3))
self.assertAllEqual(output[builders.FLOW_FEATURE_NAME].shape,
(8, 32, 200, 200, 2))
self.assertAllEqual(output[builders.LABEL_INDEX_FEATURE_NAME],
[[0] * 42 + [1] + [0] * 7] * 8)
self.assertAllEqual(output[builders.TEXT_INDICES_FEATURE_NAME],
[[[4, 5] + [0] * 14]] * 8)
self.assertAllEqual(output[builders.AUDIO_FEATURE_NAME].shape, (8, 30720))
if __name__ == '__main__':
tf.test.main()
| dmvr-master | dmvr/modalities_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for processors."""
import itertools
import os
from absl.testing import parameterized
from dmvr import processors
from dmvr import tokenizers
import numpy as np
import tensorflow as tf
# Removed: Internal pyglib dependencies
_TESTDATA_DIR = os.path.join(os.path.dirname(__file__), 'testdata')
_SAMPLE_IMAGE_PATH = os.path.join(_TESTDATA_DIR, 'sample.jpeg')
_VOCAB_PATH = os.path.join(_TESTDATA_DIR, 'tokenizers', 'word_vocab.txt')
class SampleTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
self._sequence = tf.range(100)
def test_sample_linspace_sequence(self):
sampled_seq_1 = processors.sample_linspace_sequence(self._sequence, 10, 10)
sampled_seq_2 = processors.sample_linspace_sequence(self._sequence, 7, 10)
sampled_seq_3 = processors.sample_linspace_sequence(self._sequence, 7, 5, 2)
sampled_seq_4 = processors.sample_linspace_sequence(self._sequence, 101, 1)
self.assertAllEqual(sampled_seq_1, range(100))
# [0, 1, 2, 3, 4, ..., 8, 9, 15, 16, ..., 97, 98, 99]
self.assertAllEqual(
sampled_seq_2,
[15 * i + j for i, j in itertools.product(range(7), range(10))])
# [0, 2, 4, 6, 8, 15, 17, 19, ..., 96, 98]
self.assertAllEqual(
sampled_seq_3,
[15 * i + 2 * j for i, j in itertools.product(range(7), range(5))])
self.assertAllEqual(sampled_seq_4, [0] + list(range(100)))
def test_sample_sequence(self):
sampled_seq_1 = processors.sample_sequence(self._sequence, 10, False)
sampled_seq_2 = processors.sample_sequence(self._sequence, 10, False, 2)
sampled_seq_3 = processors.sample_sequence(self._sequence, 10, True)
self.assertAllEqual(sampled_seq_1, range(45, 55))
self.assertAllEqual(sampled_seq_2, range(40, 60, 2))
offset_3 = sampled_seq_3[0]
self.assertBetween(offset_3, 0, 99)
self.assertAllEqual(sampled_seq_3, range(offset_3, offset_3 + 10))
def test_sample_sequence_with_state(self):
state = {}
sampled_seq_1 = processors.sample_sequence(
self._sequence, 10, True, state=state)
sampled_seq_2 = processors.sample_sequence(
self._sequence, 10, True, state=state)
self.assertAllEqual(sampled_seq_1, sampled_seq_2)
def test_sample_or_pad_non_sorted_sequence(self):
sampled_seq_1 = processors.sample_or_pad_non_sorted_sequence(
self._sequence, 10, 0, False)
sampled_seq_2 = processors.sample_or_pad_non_sorted_sequence(
self._sequence, 110, 0, False)
self.assertAllEqual(sampled_seq_1, range(10))
self.assertAllEqual(sampled_seq_2, list(range(100)) + [0] * 10)
def test_sample_or_pad_non_sorted_sequence_with_state(self):
state = {}
sampled_seq_1 = processors.sample_or_pad_non_sorted_sequence(
self._sequence, 10, 0, True, state=state)
sampled_seq_2 = processors.sample_or_pad_non_sorted_sequence(
self._sequence, 10, 0, True, state=state)
self.assertAllEqual(sampled_seq_1, sampled_seq_2)
self.assertRaises(
tf.errors.InvalidArgumentError,
processors.sample_or_pad_non_sorted_sequence,
self._sequence[:10], 10, 0, True, state=state)
def test_sample_or_pad_non_sorted_sequence_multidim_with_state(self):
state = {}
sampled_seq_1 = processors.sample_or_pad_non_sorted_sequence(
self._sequence, 10, 0, True, state=state)
multi_dim_sequence = tf.tile(self._sequence[:, None], (1, 10))
sampled_seq_2 = processors.sample_or_pad_non_sorted_sequence(
multi_dim_sequence, 10, 0, True, state=state)
self.assertAllEqual(sampled_seq_1, sampled_seq_2[:, 0])
@parameterized.named_parameters(
{
'testcase_name': 'len(seq) < num_steps',
'sequence': np.array([1, 2, 3]),
'num_steps': 5,
'expected_sequence': np.array([1, 2, 3, 1, 2])
},
{
'testcase_name': 'len(seq) == num_steps',
'sequence': np.array([1, 2, 3]),
'num_steps': 3,
'expected_sequence': np.array([1, 2, 3])
},
{
'testcase_name': 'len(seq) < num_steps with stride',
'sequence': np.array([1, 2, 3]),
'num_steps': 5,
'expected_sequence': np.array([1, 3, 2, 1, 3]),
'stride': 2
},
{
'testcase_name': 'len(seq) == num_steps with stride',
'sequence': np.array([1, 2, 3]),
'num_steps': 3,
'expected_sequence': np.array([1, 1, 1]),
'stride': 3
},
)
def test_sample_sequence_fixed_offset(self,
sequence: np.ndarray,
num_steps: int,
expected_sequence: np.ndarray,
stride: int = 1):
"""Tests that offset is always 0."""
for seed in range(5):
actual_sequence = processors.sample_sequence(
sequence, num_steps=num_steps, random=True, stride=stride, seed=seed)
np.testing.assert_array_equal(actual_sequence, expected_sequence)
class DecodeTest(tf.test.TestCase):
def test_decode_jpeg(self):
with open(_SAMPLE_IMAGE_PATH, 'rb') as f: raw_image_bytes = f.read()
raw_image = tf.constant([raw_image_bytes, raw_image_bytes])
decoded_image = processors.decode_jpeg(raw_image)
decoded_image_with_static_channels = processors.decode_jpeg(raw_image, 3)
self.assertEqual(decoded_image_with_static_channels.shape.as_list()[3], 3)
self.assertAllEqual(decoded_image.shape, (2, 263, 320, 3))
self.assertAllEqual(decoded_image_with_static_channels.shape,
(2, 263, 320, 3))
class PreprocessTest(tf.test.TestCase):
def setUp(self):
super().setUp()
# [[0, 1, ..., 119], [1, 2, ..., 120], ..., [119, 120, ..., 218]].
self._frames = tf.stack([tf.range(i, i + 120) for i in range(90)])
self._frames = tf.cast(self._frames, tf.uint8)
self._frames = self._frames[tf.newaxis, :, :, tf.newaxis]
self._frames = tf.broadcast_to(self._frames, (6, 90, 120, 3))
# Create an equivalent numpy array for assertions.
self._np_frames = np.array([range(i, i + 120) for i in range(90)])
self._np_frames = self._np_frames[np.newaxis, :, :, np.newaxis]
self._np_frames = np.broadcast_to(self._np_frames, (6, 90, 120, 3))
def test_set_shape(self):
with open(_SAMPLE_IMAGE_PATH, 'rb') as f: raw_image = f.read()
raw_image = tf.constant([raw_image])
decoded_image = processors.decode_jpeg(raw_image)
decoded_image = processors.set_shape(decoded_image, (1, 263, 320, 3))
self.assertAllEqual(decoded_image.shape.as_list(), (1, 263, 320, 3))
def test_crop_image(self):
cropped_image_1 = processors.crop_image(self._frames, 50, 70)
cropped_image_2 = processors.crop_image(self._frames, 200, 200)
cropped_image_3 = processors.crop_image(self._frames, 50, 70, True)
self.assertAllEqual(cropped_image_1.shape, (6, 50, 70, 3))
self.assertAllEqual(cropped_image_1, self._np_frames[:, 20:70, 25:95, :])
self.assertAllEqual(cropped_image_2.shape, (6, 200, 200, 3))
expected = np.pad(
self._np_frames, ((0, 0), (55, 55), (40, 40), (0, 0)), 'constant')
self.assertAllEqual(cropped_image_2, expected)
self.assertAllEqual(cropped_image_3.shape, (6, 50, 70, 3))
offset = cropped_image_3[0, 0, 0, 0]
expected = np.array([range(i, i + 70) for i in range(offset, offset + 50)])
expected = expected[np.newaxis, :, :, np.newaxis]
expected = np.broadcast_to(expected, (6, 50, 70, 3))
self.assertAllEqual(cropped_image_3, expected)
def test_crop_image_with_state(self):
state = {}
cropped_image_1 = processors.crop_image(self._frames, 50, 70, state=state)
cropped_image_2 = processors.crop_image(self._frames, 50, 70, state=state)
self.assertAllEqual(cropped_image_1, cropped_image_2)
def test_resize_smallest(self):
resized_frames_1 = processors.resize_smallest(self._frames, 180)
resized_frames_2 = processors.resize_smallest(self._frames, 45)
resized_frames_3 = processors.resize_smallest(self._frames, 90)
resized_frames_4 = processors.resize_smallest(
tf.transpose(a=self._frames, perm=(0, 2, 1, 3)), 45)
self.assertAllEqual(resized_frames_1.shape, (6, 180, 240, 3))
self.assertAllEqual(resized_frames_2.shape, (6, 45, 60, 3))
self.assertAllEqual(resized_frames_3.shape, (6, 90, 120, 3))
self.assertAllEqual(resized_frames_4.shape, (6, 60, 45, 3))
def test_resize_smallest_with_flow(self):
flows = tf.cast(self._frames, tf.float32)
resized_flows = processors.resize_smallest(flows, 180, True)
resized_flows_expected = 2.0 * processors.resize_smallest(flows, 180, False)
self.assertAllEqual(resized_flows, resized_flows_expected)
def test_random_flip_left_right(self):
flipped_frames = processors.random_flip_left_right(self._frames)
flipped = np.fliplr(self._np_frames[0, :, :, 0])
flipped = flipped[np.newaxis, :, :, np.newaxis]
flipped = np.broadcast_to(flipped, (6, 90, 120, 3))
self.assertTrue((flipped_frames == self._np_frames).numpy().all() or (
flipped_frames == flipped).numpy().all())
def test_random_flip_left_right_with_flow(self):
flows = tf.cast(self._frames, tf.float32)
flipped_flows = processors.random_flip_left_right(flows, is_flow=True)
flipped = np.fliplr(self._np_frames[0, :, :, 0])
flipped = flipped[np.newaxis, :, :, np.newaxis]
flipped = np.broadcast_to(flipped, (6, 90, 120, 3))
flipped_flow = flipped.astype(np.float32)
flipped_flow[:, :, :, 0] *= -1.0
self.assertTrue(
(flipped_flows == self._np_frames.astype(np.float32)).numpy().all() or (
flipped_flows == flipped_flow).numpy().all())
def test_random_flip_left_right_with_state(self):
state = {}
flipped_frames_1 = processors.random_flip_left_right(
self._frames, state=state)
flipped_frames_2 = processors.random_flip_left_right(
self._frames, state=state)
self.assertAllEqual(flipped_frames_1, flipped_frames_2)
def test_normalize_image(self):
normalized_images_1 = processors.normalize_image(
self._frames, False, tf.float32)
normalized_images_2 = processors.normalize_image(
self._frames, True, tf.float32)
self.assertAllClose(normalized_images_1, self._np_frames / 255)
self.assertAllClose(normalized_images_2, self._np_frames * 2 / 255 - 1.0)
def test_scale_jitter_augm(self):
no_jitter_images = processors.scale_jitter_augm(self._frames, 0.8, 1.0, 0.0)
jitter_images = processors.scale_jitter_augm(
self._frames, 2.0, 2.00001, 1.0)
self.assertAllEqual(no_jitter_images.shape, (6, 90, 120, 3))
self.assertAllEqual(jitter_images.shape, (6, 180, 240, 3))
def test_scale_jitter_augm_with_state(self):
state = {}
jitter_image_1 = processors.scale_jitter_augm(
self._frames, 0.8, 1.2, 1.0, state=state)
jitter_image_2 = processors.scale_jitter_augm(
self._frames, 0.8, 1.2, 1.0, state=state)
self.assertAllEqual(jitter_image_1, jitter_image_2)
def test_scale_jitter_augm_with_flow(self):
state = {}
flows = tf.cast(self._frames, tf.float32)
jitter_flows = processors.scale_jitter_augm(
flows, 0.8, 1.2, 1.0, state=state, is_flow=True)
jitter_flows_expected = processors.scale_jitter_augm(
flows, 0.8, 1.2, 1.0, state=state)
h_s, w_s, _ = state['scale_jitter_augm_info']
jitter_flows_expected *= tf.stack([h_s, w_s, 1.0])[None, None, None, :]
self.assertAllClose(jitter_flows, jitter_flows_expected)
def test_color_default_augment(self):
normalized_images = processors.normalize_image(
self._frames, False, tf.float32)
no_augmented_images = processors.color_default_augm(
normalized_images, False, 0.0, 0.0)
color_augmented_images = processors.color_default_augm(
normalized_images, False, 1.0, 0.0)
color_dropped_images = processors.color_default_augm(
normalized_images, False, 0.0, 1.0)
self.assertAllEqual(no_augmented_images.shape, normalized_images.shape)
self.assertAllEqual(color_augmented_images.shape, normalized_images.shape)
self.assertAllEqual(color_dropped_images.shape, normalized_images.shape)
self.assertAllEqual(normalized_images, no_augmented_images)
self.assertNotAllEqual(normalized_images, color_augmented_images)
self.assertNotAllEqual(normalized_images, color_dropped_images)
self.assertAllEqual(color_dropped_images[:, :, :, 0],
color_dropped_images[:, :, :, 1])
self.assertAllEqual(color_dropped_images[:, :, :, 0],
color_dropped_images[:, :, :, 2])
def test_space_to_depth(self):
output_frames_1 = processors.space_to_depth(self._frames, 2, 3)
output_frames_2 = processors.space_to_depth(self._frames, 3, 2)
output_frames_3 = processors.space_to_depth(
self._frames, spatial_block_size=2)
self.assertAllEqual(output_frames_1.shape, (3, 30, 40, 54))
self.assertAllEqual(output_frames_2.shape, (2, 45, 60, 36))
self.assertAllEqual(output_frames_3.shape, (6, 45, 60, 12))
def test_crop_or_pad_words(self):
input_words_indices = tf.expand_dims(tf.range(10, dtype=tf.int32), axis=0)
output_words_indices_1 = processors.crop_or_pad_words(
input_words_indices, 5)
output_words_indices_2 = processors.crop_or_pad_words(
input_words_indices, 15)
self.assertAllEqual(output_words_indices_1, [list(range(5))])
self.assertAllEqual(output_words_indices_2,
[[i for i in range(10)] + [0] * 5])
def test_tokenize(self):
tokenizer = tokenizers.WordTokenizer(
_VOCAB_PATH) # OSS: removed internal filename loading.
tokenizer.initialize()
input_features = {'text': tf.constant(['hello world', 'hello', 'world'])}
output_features = processors.tokenize(input_features, tokenizer, 'text',
'indices', False, False, 4, True)
self.assertAllEqual(output_features['text'],
['hello world', 'hello', 'world'])
self.assertAllEqual(output_features['indices'],
[[4, 5, 0, 0], [4, 0, 0, 0], [5, 0, 0, 0]])
class PostprocessTest(tf.test.TestCase):
def test_batched_video_transpose(self):
input_tensor = tf.constant([[[1, 2], [3, 4], [5, 6]]])
output_tensor = processors.batched_video_transpose(input_tensor, (0, 2, 1))
self.assertAllEqual(output_tensor, [[[1, 3, 5], [2, 4, 6]]])
def test_batched_space_to_depth(self):
input_frames = tf.zeros((8, 30, 150, 210, 3))
output_frames_1 = processors.batched_space_to_depth(input_frames, 2, 3)
output_frames_2 = processors.batched_space_to_depth(input_frames, 3, 2)
output_frames_3 = processors.batched_space_to_depth(
input_frames, spatial_block_size=2)
self.assertAllEqual(output_frames_1.shape, (8, 15, 50, 70, 54))
self.assertAllEqual(output_frames_2.shape, (8, 10, 75, 105, 36))
self.assertAllEqual(output_frames_3.shape, (8, 30, 75, 105, 12))
if __name__ == '__main__':
tf.test.main()
| dmvr-master | dmvr/processors_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tokenizers."""
from __future__ import annotations
from collections.abc import Sequence
import os
from typing import Type, TypeVar
import clip.simple_tokenizer
from dmvr import tokenizers
from parameterized import parameterized
import tensorflow as tf
# Removed: Internal pyglib dependencies
_TESTDATA_DIR = os.path.join(os.path.dirname(__file__), 'testdata')
_MOCK_DATA = os.path.join(_TESTDATA_DIR, 'tokenizers')
_FILENAMES = {
tokenizers.SentencePieceTokenizer: 'spiece.model.1000.model',
tokenizers.WordTokenizer: 'word_vocab.txt',
tokenizers.BertTokenizer: 'bert_word_vocab.txt',
tokenizers.ClipTokenizer: clip.simple_tokenizer.default_bpe(),
}
T = TypeVar('T', bound=tokenizers.TextTokenizer)
def _get_tokenizer(cls: Type[T]) -> T:
filename = _FILENAMES[cls]
path = os.path.join(_MOCK_DATA, filename) # OSS: removed internal filename loading.
return cls(path)
def _tokenize_with_original_clip(
texts: str | Sequence[str],
context_length: int = 77) -> Sequence[Sequence[int]]:
# Code adapted from `clip.tokenize` because it's not importable (only
# `clip.simple_tokenizer` is).
if isinstance(texts, str):
texts = [texts]
tokenizer = clip.simple_tokenizer.SimpleTokenizer()
sot_token = tokenizer.encoder['<|startoftext|>']
eot_token = tokenizer.encoder['<|endoftext|>']
all_tokens = [[sot_token] + tokenizer.encode(text) + [eot_token]
for text in texts]
result = []
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
raise RuntimeError(f'Input {texts[i]} is too long for context length'
f' {context_length}')
result.append(tokens + [0] * (context_length - len(tokens)))
return result
def _decode_with_original_clip(tokens_ids: Sequence[int]) -> str:
tokenizer = clip.simple_tokenizer.SimpleTokenizer()
text = tokenizer.decode(tokens_ids)
eos = '<|endoftext|>'
return text[:text.index(eos) + len(eos)]
class TokenizerTest(tf.test.TestCase):
@parameterized.expand(
((tokenizers.WordTokenizer,), (tokenizers.SentencePieceTokenizer,),
(tokenizers.BertTokenizer,), (tokenizers.ClipTokenizer,)))
def test_tokenizer(self, cls):
tokenizer = _get_tokenizer(cls)
tokenizer.initialize()
input_string = ['hello world']
tokenized = tokenizer.string_tensor_to_indices(
input_string, max_num_tokens=42)
self.assertEqual(tokenized.dtype, tf.int32)
tokenized = tokenized.numpy().tolist()[0]
self.assertLen(tokenized, 42)
self.assertEqual(tokenized[-1], tokenizer.pad_token)
detokenized = tokenizer.indices_to_string(tokenized)
self.assertEqual(detokenized, 'hello world')
@parameterized.expand(
((tokenizers.WordTokenizer,), (tokenizers.SentencePieceTokenizer,),
(tokenizers.BertTokenizer,), (tokenizers.ClipTokenizer,)))
def test_bos_eos(self, cls):
tokenizer = _get_tokenizer(cls)
tokenizer.initialize()
input_string = ['hello world']
tokenized = tokenizer.string_tensor_to_indices(
input_string, prepend_bos=True, append_eos=True)
tokenized = tokenized.numpy().tolist()[0]
self.assertEqual(tokenized[0], tokenizer.bos_token)
if tokenizer.pad_token != tokenizer.eos_token:
tokenized = [t for t in tokenized if t != tokenizer.pad_token]
self.assertEqual(tokenized[-1], tokenizer.eos_token)
@parameterized.expand(
((tokenizers.WordTokenizer,), (tokenizers.SentencePieceTokenizer,),
(tokenizers.BertTokenizer,), (tokenizers.ClipTokenizer,)))
def test_not_initialized(self, cls):
tokenizer = _get_tokenizer(cls)
input_string = ['hello world']
with self.assertRaises(RuntimeError):
tokenizer.string_tensor_to_indices(input_string)
@parameterized.expand((
(tokenizers.WordTokenizer,),
(tokenizers.SentencePieceTokenizer,),
))
def test_string_to_indices(self, cls):
tokenizer = _get_tokenizer(cls)
tokenizer.initialize()
input_string = 'hello world'
tokenized = tokenizer.string_to_indices(
input_string, prepend_bos=True, append_eos=True, max_num_tokens=42)
self.assertEqual(type(tokenized), list)
self.assertEqual(tokenized[0], tokenizer.bos_token)
tokenized = [t for t in tokenized if t != tokenizer.pad_token]
self.assertEqual(tokenized[-1], tokenizer.eos_token)
detokenized = tokenizer.indices_to_string(tokenized[1:-1])
self.assertEqual(detokenized, 'hello world')
def test_clip_tokenizer(self):
tokenizer = _get_tokenizer(tokenizers.ClipTokenizer)
tokenizer.initialize()
input_string = ['This is a test.', 'pushups']
actual_tokenized_tf = tokenizer.string_tensor_to_indices(
input_string, prepend_bos=True, append_eos=True, max_num_tokens=77)
expected_tokenized = _tokenize_with_original_clip(input_string)
actual_tokenized1 = actual_tokenized_tf.numpy().tolist()[0]
expected_tokenized1 = expected_tokenized[0]
self.assertEqual(actual_tokenized1, expected_tokenized1)
actual_decoded = tokenizer.indices_to_string(actual_tokenized1)
self.assertEqual(actual_decoded, 'this is a test .')
actual_tokenized2 = actual_tokenized_tf.numpy().tolist()[1]
expected_tokenized2 = expected_tokenized[1]
self.assertEqual(actual_tokenized2, expected_tokenized2)
actual_decoded = tokenizer.indices_to_string(actual_tokenized2)
self.assertEqual(actual_decoded, input_string[1])
if __name__ == '__main__':
tf.test.main()
| dmvr-master | dmvr/tokenizers_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for adding modalities."""
import functools
from typing import Optional
from typing import Union
from absl import logging
from dmvr import builders
from dmvr import processors
from dmvr import tokenizers
import tensorflow as tf
# ----------------------------------------------------------------------
# -------- Methods aggregating functions for a given modality. ---------
# ----------------------------------------------------------------------
def add_image(
parser_builder: builders.BaseParserBuilder,
sampler_builder: builders.SamplerBuilder,
decoder_builder: builders.DecoderBuilder,
preprocessor_builder: builders.PreprocessorBuilder,
postprocessor_builder: builders.PostprocessorBuilder,
input_feature_name: str = 'image/encoded',
output_feature_name: str = builders.IMAGE_FEATURE_NAME,
is_training: bool = True,
# Video related parameters.
num_frames: int = 32,
stride: int = 1,
num_test_clips: int = 1,
min_resize: int = 224,
resize_method: str = tf.image.ResizeMethod.BILINEAR,
crop_size: int = 200,
zero_centering_image: bool = False,
sync_random_state: bool = True,
is_rgb: Optional[bool] = True,
is_flow: bool = False,
random_flip: bool = True,
normalization_mean: Union[tf.Tensor, float] = 0,
normalization_std: Union[tf.Tensor, float] = 1,
) -> None:
"""Adds functions to process image feature to builders.
This function expects the input to be either a `tf.train.SequenceExample` (for
videos) and have the following structure:
```
feature_lists {
feature_list {
key: input_feature_name
value {
feature {
bytes_list {
value: jpeg_bytes
}
}
}
}
}
```
Or a `tf.train.Example` (for image only) and have the following structure:
```
features {
feature {
key: input_feature_name
value {
bytes_list {
value: "JPEG"
}
}
}
}
```
The corresponding `builders.ExampleParserBuilder` or
`builders.SequenceExampleParserBuilder` has to be given as parameter.
Args:
parser_builder: An instance of a `builders.BaseParserBuilder`.
sampler_builder: An instance of a `builders.SamplerBuilder`.
decoder_builder: An instance of a `builders.DecoderBuilder`.
preprocessor_builder: An instance of a `builders.PreprocessorBuilder`.
postprocessor_builder: An instance of a `builders.PostprocessorBuilder`.
input_feature_name: Name of the feature in the input `tf.train.Example` or
`tf.train.SequenceExample`. Exposing this as an argument allows using this
function for different image features within a single dataset.
output_feature_name: Name of the feature in the output features dictionary.
Exposing this as an argument allows using this function for different
image features within a single dataset.
is_training: Whether in training mode. If `True`, random sample, crop and
left right flip is used.
num_frames: Number of frames per subclip. For single images, use 1.
stride: Temporal stride to sample frames.
num_test_clips: Number of test clips (1 by default). If more than 1, this
will sample multiple linearly spaced clips within each video at test time.
If 1, then a single clip in the middle of the video is sampled. The clips
are aggregated in the batch dimension.
min_resize: Frames are resized so that `min(height, width)` is `min_resize`.
resize_method: A resizing method.
crop_size: Final size of the frame after cropping the resized frames. Both
height and width are the same.
zero_centering_image: If `True`, frames are normalized to values in [-1, 1].
If `False`, values in [0, 1].
sync_random_state: Whether to use stateful option to keep random operations
in sync between different modalities. All modalities having this option
`True` will use the same outcome in random operations such as sampling and
cropping.
is_rgb: If `True`, the number of channels in the JPEG is 3, if False, 1. If
is_flow is `True`, `is_rgb` should be set to `None` (see below).
is_flow: If `True`, the image is assumed to contain flow and will be
processed as such. Note that the number of channels in the JPEG for flow
is 3, but only two channels will be output corresponding to the valid
horizontal and vertical displacement.
random_flip: If `True`, a random horizontal flip is applied to the input
image. This augmentation may not be used if the label set contains
direction related classes, such as `pointing left`, `pointing right`, etc.
normalization_mean: value to subtract from the input image to normalize it.
normalization_std: value to divide by from the input image to normalize it.
"""
# Validate parameters.
if is_flow and is_rgb is not None:
raise ValueError('`is_rgb` should be `None` when requesting flow.')
if is_flow and not zero_centering_image:
raise ValueError('Flow contains displacement values that can be negative, '
'but `zero_centering_image` was set to `False`.')
if is_training and num_test_clips != 1:
logging.info('`num_test_clips` %d is ignored since `is_training` is true.',
num_test_clips)
# Parse frames or single image.
if isinstance(parser_builder, builders.SequenceExampleParserBuilder):
parser_builder.parse_feature(
feature_name=input_feature_name,
feature_type=tf.io.FixedLenSequenceFeature((), dtype=tf.string),
output_name=output_feature_name)
elif isinstance(parser_builder, builders.ExampleParserBuilder):
parser_builder.parse_feature(
feature_name=input_feature_name,
feature_type=tf.io.FixedLenFeature((), dtype=tf.string),
output_name=output_feature_name)
# Expand dimensions so single images have the same structure as videos.
sampler_builder.add_fn(
fn=lambda x: tf.expand_dims(x, axis=0),
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_expand_dims')
else:
raise ValueError('`parser_builder` has an unexpected type.')
# Temporal sampler.
if is_training:
# Sample random clip.
sampler_builder.add_fn(
# pylint: disable=g-long-lambda
fn=lambda x, s=None: processors.sample_sequence(
x, num_frames, True, stride, state=s),
# pylint: enable=g-long-lambda
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_random_sample',
# Use state to keep coherence between modalities if requested.
stateful=sync_random_state)
else:
if num_test_clips > 1:
# Sample linspace clips.
sampler_builder.add_fn(
# pylint: disable=g-long-lambda
fn=lambda x: processors.sample_linspace_sequence(
x, num_test_clips, num_frames, stride),
# pylint: enable=g-long-lambda
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_linspace_sample')
else:
# Sample middle clip.
sampler_builder.add_fn(
fn=lambda x: processors.sample_sequence(x, num_frames, False, stride),
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_middle_sample')
# Decode JPEG string to `tf.uint8`.
# Note that for flow, 3 channels are stored in the JPEG: the first two
# corresponds to horizontal and vertical displacement, respectively.
# The last channel contains zeros and is dropped later in the preprocessing.
# Hence, the output number of channels for flow is 2.
num_raw_channels = 3 if (is_rgb or is_flow) else 1
decoder_builder.add_fn(
fn=lambda x: processors.decode_jpeg(x, channels=num_raw_channels),
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_decode_jpeg')
if is_flow:
# Cast the flow to `tf.float32`, normalizing between [-1.0, 1.0].
preprocessor_builder.add_fn(
fn=lambda x: processors.normalize_image(x, zero_centering_image=True),
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_normalize')
# Resize images (resize happens only if necessary to save compute).
preprocessor_builder.add_fn(
# pylint: disable=g-long-lambda
fn=lambda x: processors.resize_smallest(
x, min_resize, is_flow=is_flow, method=resize_method),
# pylint: enable=g-long-lambda
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_resize_smallest')
if is_training:
# Standard image data augmentation: random crop and random flip.
preprocessor_builder.add_fn(
# pylint: disable=g-long-lambda
fn=lambda x, s=None: processors.crop_image(
x, crop_size, crop_size, True, state=s),
# pylint: enable=g-long-lambda
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_random_crop',
# Use state to keep coherence between modalities if requested.
stateful=sync_random_state)
if random_flip:
preprocessor_builder.add_fn(
# pylint: disable=g-long-lambda
fn=lambda x, s=None: processors.random_flip_left_right(
x, state=s, is_flow=is_flow),
# pylint: enable=g-long-lambda
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_random_flip',
# Use state to keep coherence between modalities if requested.
stateful=sync_random_state)
else:
# Central crop of the frames.
preprocessor_builder.add_fn(
fn=lambda x: processors.crop_image(x, crop_size, crop_size, False),
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_central_crop')
if is_flow:
# Keep only two channels for the flow: horizontal and vertical displacement.
preprocessor_builder.add_fn(
fn=lambda x: x[:, :, :, :2],
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_extract_flow_channels')
# Clip the flow to stay between [-1.0 and 1.0]
preprocessor_builder.add_fn(
fn=lambda x: tf.clip_by_value(x, -1.0, 1.0),
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_clip_flow')
else:
# Cast the frames to `tf.float32`, normalizing according to
# `zero_centering_image`.
preprocessor_builder.add_fn(
fn=lambda x: processors.normalize_image(x, zero_centering_image),
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_normalize')
preprocessor_builder.add_fn(
fn=lambda x: x - normalization_mean,
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_subtract_given_mean')
preprocessor_builder.add_fn(
fn=lambda x: x / normalization_std,
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_divide_by_given_std')
if num_test_clips > 1 and not is_training:
# In this case, multiple clips are merged together in batch dimension which
# will be `B * num_test_clips`.
postprocessor_builder.add_fn(
fn=lambda x: tf.reshape( # pylint: disable=g-long-lambda
x, (-1, num_frames, x.shape[2], x.shape[3], x.shape[4])),
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_reshape')
def add_label(
parser_builder: builders.BaseParserBuilder,
decoder_builder: builders.DecoderBuilder,
preprocessor_builder: builders.PreprocessorBuilder,
input_label_index_feature_name: str = 'clip/label/index',
output_label_index_feature_name: str = builders.LABEL_INDEX_FEATURE_NAME,
input_label_name_feature_name: Optional[str] = 'clip/label/text',
output_label_name_feature_name: Optional[str] = builders
.LABEL_NAME_FEATURE_NAME,
# Label related parameters.
is_multi_label: bool = False,
one_hot_label: bool = True,
num_classes: Optional[int] = None,
add_label_name: bool = False):
"""Adds functions to process label feature to builders.
This function expects the input to be either a `tf.train.SequenceExample`
(with the features in the context) or a `tf.train.Example`. The expected
structure is (or equivalent for `tf.train.Example`):
```
context {
feature {
key: input_label_index_feature_name
value {
int64_list {
value: 42
...
}
}
}
feature {
key: input_label_name_feature_name
value {
bytes_list {
value: "label_42"
...
}
}
}
}
```
The corresponding `builders.ExampleParserBuilder` or
`builders.SequenceExampleParserBuilder` has to be given as parameter.
Args:
parser_builder: An instance of a `builders.BaseParserBuilder`.
decoder_builder: An instance of a `builders.DecoderBuilder`.
preprocessor_builder: An instance of a `builders.PreprocessorBuilder`.
input_label_index_feature_name: Name of the label index feature in the input
`tf.train.Example` or `tf.train.SequenceExample`. Exposing this as an
argument allows using this function for different label features within a
single dataset.
output_label_index_feature_name: Name of the label index feature in the
output features dictionary. Exposing this as an argument allows using this
function for different label features within a single dataset.
input_label_name_feature_name: Name of the label name feature in the input
`tf.train.Example` or `tf.train.SequenceExample`. If `add_label_name` is
false, this option is ignored. Exposing this as an argument allows using
this function for different label features within a single dataset.
output_label_name_feature_name: Name of the label name feature in the output
features dictionary. If `add_label_name` is false, this option is ignored.
Exposing this as an argument allows using this function for different
label features within a single dataset.
is_multi_label: Whether raw data contains multiple labels per example.
one_hot_label: Return labels as one hot tensors. If `is_multi_label` is
`True`, one hot tensor might have multiple ones.
num_classes: Total number of classes in the dataset. It has to be provided
if `one_hot_label` is `True`.
add_label_name: Also return the name of the label. Not yet supported for
multi label.
"""
# Validate parameters.
if one_hot_label and not num_classes:
raise ValueError(
'`num_classes` must be given when requesting one hot label.')
if is_multi_label and not one_hot_label:
logging.warning(
'Multi label indices will be returned in a non fixed size dimension.')
if add_label_name and (input_label_name_feature_name is None or
output_label_name_feature_name is None):
raise ValueError(
'`input_label_name_feature_name` and `output_label_name_feature_name` '
'must be given when `add_label_name` is true.')
# Parse label.
if isinstance(parser_builder, builders.SequenceExampleParserBuilder):
parser_builder.parse_feature(
feature_name=input_label_index_feature_name,
feature_type=tf.io.VarLenFeature(dtype=tf.int64),
output_name=output_label_index_feature_name,
is_context=True)
if add_label_name:
parser_builder.parse_feature(
feature_name=input_label_name_feature_name,
feature_type=tf.io.VarLenFeature(dtype=tf.string),
output_name=output_label_name_feature_name,
is_context=True)
elif isinstance(parser_builder, builders.ExampleParserBuilder):
parser_builder.parse_feature(
feature_name=input_label_index_feature_name,
feature_type=tf.io.VarLenFeature(dtype=tf.int64),
output_name=output_label_index_feature_name)
if add_label_name:
parser_builder.parse_feature(
feature_name=input_label_name_feature_name,
feature_type=tf.io.VarLenFeature(dtype=tf.string),
output_name=output_label_name_feature_name)
else:
raise ValueError('`parser_builder` has an unexpected type.')
# Densify labels tensor in order to support multi label case.
decoder_builder.add_fn(
fn=tf.sparse.to_dense,
feature_name=output_label_index_feature_name,
fn_name=f'{output_label_index_feature_name}_sparse_to_dense')
if add_label_name:
decoder_builder.add_fn(
fn=tf.sparse.to_dense,
feature_name=output_label_name_feature_name,
fn_name=f'{output_label_name_feature_name}_sparse_to_dense')
if one_hot_label:
# Replace label index by one hot representation.
preprocessor_builder.add_fn(
fn=lambda x: tf.reduce_sum( # pylint: disable=g-long-lambda
input_tensor=tf.one_hot(x, num_classes),
axis=0),
feature_name=output_label_index_feature_name,
fn_name=f'{output_label_index_feature_name}_one_hot')
elif not is_multi_label:
preprocessor_builder.add_fn(
fn=lambda x: processors.set_shape(x, (1,)),
feature_name=output_label_index_feature_name,
fn_name=f'{output_label_index_feature_name}_set_shape')
if add_label_name and not is_multi_label:
preprocessor_builder.add_fn(
fn=lambda x: processors.set_shape(x, (1,)),
feature_name=output_label_name_feature_name,
fn_name=f'{output_label_name_feature_name}_set_shape')
def add_text(
parser_builder: builders.BaseParserBuilder,
decoder_builder: builders.DecoderBuilder,
preprocessor_builder: builders.PreprocessorBuilder,
tokenizer: tokenizers.TextTokenizer,
is_training: bool = True,
input_feature_name: str = 'caption/string',
output_raw_string_name: str = builders.TEXT_FEATURE_NAME,
output_feature_name: str = builders.TEXT_INDICES_FEATURE_NAME,
# Text related parameters.
prepend_bos: bool = False,
append_eos: bool = False,
keep_raw_string: bool = False,
max_num_captions: int = 1,
max_num_tokens: Optional[int] = 16,
sync_random_state: bool = False):
"""Adds functions to process text feature to builders.
This function expects the input to be either a `tf.train.SequenceExample`
(with the features in the context) or a `tf.train.Example`. The expected
structure is (or equivalent for `tf.train.Example`):
```
context {
feature {
key: input_feature_name
value {
bytes_list {
value: "Hello world!"
value: "This is a caption."
...
}
}
}
}
```
The corresponding `builders.ExampleParserBuilder` or
`builders.SequenceExampleParserBuilder` has to be given as parameter.
Args:
parser_builder: An instance of a `builders.BaseParserBuilder`.
decoder_builder: An instance of a `builders.DecoderBuilder`.
preprocessor_builder: An instance of a `builders.PreprocessorBuilder`.
tokenizer: An instance of a tokenizer.
is_training: Whether in training mode. This will be used to randomly sample
the captions.
input_feature_name: Name of the feature in the input `tf.train.Example` or
`tf.train.SequenceExample`. Exposing this as an argument allows using this
function for different text features within a single dataset.
output_raw_string_name: Name of the raw string in the output features
dictionary. Exposing this as an argument allows using this function for
different text features within a single dataset.
output_feature_name: Name of the feature in the output features dictionary.
Exposing this as an argument allows using this function for different text
features.
prepend_bos: Whether to prepend BOS token.
append_eos: Whether to append EOS token.
keep_raw_string: Whether to keep raw string.
max_num_captions: Maximum number of captions to keep. If there are more
captions in the proto, only the first `max_num_captions` will be returned
is `is_training` is set to `False`. If `is_training` is `True`, then
`max_num_captions` will be randomly sampled. Finally, if the proto
contains less than `max_num_captions`, we pad with empty strings to make
sure there are `max_num_captions` in total.
max_num_tokens: Maximum number of tokens to keep from the text for each
caption. If there are more tokens, sequence is cropped, if less, the
caption is padded using the tokenizer pad id. The sequence is unmodified
if max_num_tokens is None.
sync_random_state: Whether to use stateful option to keep random operations
in sync between different modalities. All modalities having this option
`True` will use the same outcome in random operations used for sampling
the captions.
"""
# Parse text indices.
if isinstance(parser_builder, builders.SequenceExampleParserBuilder):
parser_builder.parse_feature(
feature_name=input_feature_name,
feature_type=tf.io.VarLenFeature(dtype=tf.string),
output_name=output_raw_string_name,
is_context=True)
elif isinstance(parser_builder, builders.ExampleParserBuilder):
parser_builder.parse_feature(
feature_name=input_feature_name,
feature_type=tf.io.VarLenFeature(dtype=tf.string),
output_name=output_raw_string_name)
# Densify text tensor.
decoder_builder.add_fn(
fn=tf.sparse.to_dense,
feature_name=output_raw_string_name,
fn_name=f'{output_feature_name}_sparse_to_dense')
preprocessor_builder.add_fn(
# pylint: disable=g-long-lambda
lambda x, s=None: processors.sample_or_pad_non_sorted_sequence(
x, max_num_captions, b'', random=is_training, state=s),
# pylint: enable=g-long-lambda
feature_name=output_raw_string_name,
fn_name=f'{output_feature_name}_sample_captions',
# Use state to keep coherence between modalities if requested.
stateful=sync_random_state)
# Tokenize the sentence.
preprocessor_builder.add_fn(
fn=lambda x: processors.tokenize( # pylint: disable=g-long-lambda
x, tokenizer, output_raw_string_name, output_feature_name,
prepend_bos, append_eos, max_num_tokens, keep_raw_string),
fn_name=f'{output_feature_name}_tokenization')
if max_num_tokens is not None:
# Set text shape.
shape = (max_num_captions, max_num_tokens)
preprocessor_builder.add_fn(
fn=lambda x: processors.set_shape(x, shape),
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_set_shape')
def add_audio(
parser_builder: builders.BaseParserBuilder,
sampler_builder: builders.SamplerBuilder,
postprocessor_builder: builders.PostprocessorBuilder,
preprocessor_builder: Optional[builders.PreprocessorBuilder] = None,
input_feature_name: str = 'WAVEFORM/feature/floats',
output_feature_name: str = builders.AUDIO_FEATURE_NAME,
is_training: bool = True,
# Audio related parameters.
num_samples: int = 30720,
stride: int = 1,
sample_rate: Optional[int] = 48000,
target_sample_rate: Optional[int] = None,
num_test_clips: int = 1,
sync_random_state: bool = True):
"""Adds functions to process audio feature to builders.
This function expects the input to be either a `tf.train.SequenceExample` (for
videos) and have the following structure:
```
feature_lists {
feature_list {
key: input_feature_name
value {
feature {
float_list {
value: 0.0
value: 0.1
value: 0.2
...
}
}
}
}
}
```
Or a `tf.train.Example` (for image only) and have the following structure:
```
features {
feature {
key: input_feature_name
value {
float_list {
value: 0.0
value: 0.1
value: 0.2
...
}
}
}
}
```
The corresponding `builders.ExampleParserBuilder` or
`builders.SequenceExampleParserBuilder` has to be given as parameter.
Args:
parser_builder: An instance of a `builders.BaseParserBuilder`.
sampler_builder: An instance of a `builders.SamplerBuilder`.
postprocessor_builder: An instance of a `builders.PostprocessorBuilder`.
preprocessor_builder: An instance of a `builders.PreprocessorBuilder`.
input_feature_name: Name of the feature in the input `tf.train.Example` or
`tf.train.SequenceExample`. Exposing this as an argument allows using this
function for different audio features within a single dataset.
output_feature_name: Name of the feature in the output features dictionary.
Exposing this as an argument allows using this function for different
audio features within a single dataset
is_training: Whether in training mode. If `True`, random sample is used.
num_samples: Number of samples per subclip.
stride: Temporal stride to sample audio signal.
sample_rate: The original sample rate of the input audio stored in sstables.
target_sample_rate: If this is not None, the target new sample rate of the
waveforms. Fast Fourier Transforms will be triggered if true.
num_test_clips: Number of test clips (1 by default). If more than 1, this
will sample multiple linearly spaced clips within each audio at test time.
If 1, then a single clip in the middle of the audio is sampled. The clips
are aggregated in the batch dimension.
sync_random_state: Whether to use stateful option to keep random operations
in sync between different modalities. All modalities having this option
`True` will use the same outcome in random operations such as sampling and
cropping.
"""
# Validate parameters.
if is_training and num_test_clips != 1:
logging.info('`num_test_clips` %d is ignored since `is_training` is true.',
num_test_clips)
# Keep audio signal.
parser_builder.parse_feature(
feature_name=input_feature_name,
# Entire signal stored in one Feature.
feature_type=tf.io.VarLenFeature(dtype=tf.float32),
output_name=output_feature_name)
# Densify.
sampler_builder.add_fn(
fn=lambda x: tf.sparse.to_dense(x)[0],
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_sparse_to_dense')
# Temporal sampler.
if is_training:
# Sample random clip.
sampler_builder.add_fn(
# pylint: disable=g-long-lambda
fn=lambda x, s=None: processors.sample_sequence(
x, num_samples, True, stride, state=s),
# pylint: enable=g-long-lambda
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_random_sample',
# Use state to keep coherence between modalities if requested.
stateful=sync_random_state)
else:
if num_test_clips > 1:
# Sample linspace clips.
sampler_builder.add_fn(
# pylint: disable=g-long-lambda
fn=lambda x: processors.sample_linspace_sequence(
x, num_test_clips, num_samples, stride),
# pylint: enable=g-long-lambda
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_linspace_sample')
else:
# Sample middle clip.
sampler_builder.add_fn(
# pylint: disable=g-long-lambda
fn=lambda x: processors.sample_sequence(
x, num_samples, False, stride),
# pylint: enable=g-long-lambda
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_middle_sample')
# Apply FFTs to change the sample rate of the waveforms.
if preprocessor_builder is not None and target_sample_rate is not None:
preprocessor_builder.add_fn(
functools.partial(
processors.resample_audio,
num_subclips=num_test_clips,
in_sample_rate=sample_rate,
out_sample_rate=target_sample_rate,
is_training=is_training),
feature_name=builders.AUDIO_FEATURE_NAME)
if num_test_clips > 1 and not is_training:
# In this case, multiple clips are merged together in batch dimension which
# will be `B * num_test_clips`.
postprocessor_builder.add_fn(
fn=lambda x: tf.reshape(x, (-1, x.shape[-1])),
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_reshape')
def add_spectrogram(
preprocessor_builder: builders.PreprocessorBuilder,
postprocessor_builder: builders.PostprocessorBuilder,
input_feature_name: str = builders.AUDIO_FEATURE_NAME,
output_feature_name: str = builders.AUDIO_MEL_FEATURE_NAME,
is_training: bool = True,
sample_rate: int = 48000,
spectrogram_type: str = 'logmf',
frame_length: int = 2048,
frame_step: int = 1024,
num_features: int = 80,
lower_edge_hertz: float = 80.0,
upper_edge_hertz: float = 7600.0,
preemphasis: Optional[float] = None,
normalize_audio: bool = False,
num_test_clips: int = 1):
"""Adds functions to process audio spectrogram feature to builders.
Note that this function does not extract and parse audio feature. Instead, it
should be used after a `add_audio` function. The output spectrogram is of the
shape [batch_size, num_frames, num_features].
Args:
preprocessor_builder: An instance of a `builders.PreprocessorBuilder`.
postprocessor_builder: An instance of a `builders.PostprocessorBuilder`.
input_feature_name: Name of the feature in the input features dictionary.
Exposing this as an argument allows using this function for different
audio features.
output_feature_name: Name of the feature in the output features dictionary.
Exposing this as an argument allows using this function for different
audio features.
is_training: If the current mode is training or not.
sample_rate: The sample rate of the input audio.
spectrogram_type: The type of the spectrogram to be extracted from the
waveform. Can be either `spectrogram`, `logmf`, and `mfcc`.
frame_length: The length of each spectrogram frame.
frame_step: The stride of spectrogram frames.
num_features: The number of spectrogram features.
lower_edge_hertz: Lowest frequency to consider.
upper_edge_hertz: Highest frequency to consider.
preemphasis: The strength of pre-emphasis on the waveform. If None, no
pre-emphasis will be applied.
normalize_audio: Whether to normalize the waveform or not.
num_test_clips: Number of test clips (1 by default). If more than 1, this
will sample multiple linearly spaced clips within each audio at test time.
If 1, then a single clip in the middle of the audio is sampled. The clips
are aggregated in the batch dimension.
"""
# Validate parameters.
if is_training and num_test_clips != 1:
logging.info('`num_test_clips` %d is ignored since `is_training` is true.',
num_test_clips)
# Extract audio spectrograms.
preprocessor_builder.add_fn(
functools.partial(
processors.compute_audio_spectrogram,
num_subclips=num_test_clips,
sample_rate=sample_rate,
spectrogram_type=spectrogram_type,
frame_length=frame_length,
frame_step=frame_step,
num_features=num_features,
lower_edge_hertz=lower_edge_hertz,
upper_edge_hertz=upper_edge_hertz,
normalize=normalize_audio,
preemphasis=preemphasis,
audio_feature_name=input_feature_name,
spectrogram_feature_name=output_feature_name))
if num_test_clips > 1 and not is_training:
# In this case, multiple clips are merged together in batch dimension which
# will be `B * num_test_clips`.
postprocessor_builder.add_fn(
fn=lambda x: tf.reshape(x, (-1, x.shape[-2], x.shape[-1])),
feature_name=output_feature_name,
fn_name=f'{output_feature_name}_reshape')
| dmvr-master | dmvr/modalities.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| dmvr-master | dmvr/__init__.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sources for reading and decoding raw binary data files."""
import abc
from typing import Optional, Union
import tensorflow as tf
class Source(abc.ABC):
"""Base class for sources.
Sources are objects reading from binary files and generating an initial
`tf.data.Dataset` with the serialized examples. Deserializing the examples is
not responsibility of the `Source` (it should be done by the parser).
For each different type of storage (e.g. TFRecords, image files, text files),
a subclass can be implemented.
"""
@abc.abstractmethod
def load_and_decode_shard(
self,
shard: Union[str, tf.Tensor] # Shape () and type `tf.string`.
) -> tf.data.Dataset:
"""Decodes a single raw input file into a `tf.data.Dataset`.
Args:
shard: Path to a single file with encoded data.
Returns:
A `tf.data.Dataset` object containing a key (this can be a file name,
index, empty or any other useful bits) and a raw example (both encoded as
bytes). Current supported types of examples are `tf.train.Example` and
`tf.train.SequenceExample` (see `builders.BaseParserBuilder`).
"""
class TFRecordsSource(Source):
"""Source for TFRecords data format."""
def __init__(self, compression_type: Optional[str] = None):
self._compression_type = compression_type
def load_and_decode_shard(
self,
shard: Union[str, tf.Tensor] # Shape () and type `tf.string`.
) -> tf.data.Dataset:
ds = tf.data.TFRecordDataset(shard, compression_type=self._compression_type)
# TFRecords do not provide an index or key per example. Use shard path as
# key, since it can be useful later for retrieval.
key = shard.encode('utf-8') if isinstance(shard, str) else shard
ds = ds.map(lambda example: (key, example))
return ds
| dmvr-master | dmvr/sources.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils."""
from typing import Optional, Sequence
import tensorflow as tf
# ----------------------------------------------------------------------
# ------------------------ Experimental utils. -------------------------
# ----------------------------------------------------------------------
def combine_datasets(datasets: Sequence[tf.data.Dataset],
batch_size: int = 1,
weights: Optional[Sequence[float]] = None,
seed: Optional[int] = None) -> tf.data.Dataset:
"""Combines multiple datasets into a single one.
THIS IS AN EXPERIMENTAL FEATURE AND MIGHT BE REMOVED AT ANY TIME.
This function combines multiple datasets into a single one by sampling
elements from each one with the given probabilities. All input datasets must
have the same structure and Tensor shapes.
Args:
datasets: A list of batched datasets. All datasets should have the same
structure and Tensor shapes.
batch_size: Batch size of the resulting dataset.
weights: A list of the same length as datasets of floats where `weights[i]`
represents the probability with which an element should be sampled from
`datasets[i]`. If `None`, defaults to a uniform distribution across
datasets.
seed: A deterministic seed to use when sampling.
Returns:
A dataset that interleaves elements from datasets at random, according to
weights if provided, otherwise with uniform probability. The resulting
dataset is batched.
"""
datasets = [ds.unbatch() for ds in datasets]
combined_ds = tf.data.experimental.sample_from_datasets(
datasets, weights, seed)
return combined_ds.batch(batch_size, drop_remainder=True)
| dmvr-master | dmvr/utils.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for processing datasets features."""
from typing import Any, Optional, Sequence, Union
from dmvr import builders
from dmvr import tokenizers
import tensorflow as tf
# ----------------------------------------------------------------------
# ----------------------------- Utilities. -----------------------------
# ----------------------------------------------------------------------
def _get_random_sampling_offset(sequence: tf.Tensor,
num_steps: int,
stride: int,
seed: Optional[int] = None) -> tf.Tensor:
"""Calculates the initial offset for a sequence where all steps will fit.
Args:
sequence: Any tensor where the first dimension is timesteps.
num_steps: Number of steps (e.g. frames) to take.
stride: Distance to sample between timesteps.
seed: A deterministic seed to use when sampling.
Returns:
The first index to begin sampling from. A best effort is made to provide a
starting index such that all requested steps fit within the sequence (i.e.
`offset + 1 + (num_steps - 1) * stride` < len(sequence)`). If this is not
satisfied, the starting index is always 0.
"""
sequence_length = tf.shape(input=sequence)[0]
max_offset = tf.maximum(sequence_length - (num_steps - 1) * stride, 1)
return tf.random.uniform((),
maxval=tf.cast(max_offset, dtype=tf.int32),
dtype=tf.int32,
seed=seed)
def sample_or_pad_sequence_indices(sequence: tf.Tensor, num_steps: int,
repeat_sequence: bool, stride: int,
offset: int) -> tf.Tensor:
"""Returns indices to take for sampling or padding a sequence to fixed size.
Args:
sequence: Any tensor where the first dimension is timesteps.
num_steps: Number of steps (e.g. frames) to take.
repeat_sequence: A boolean indicates whether the sequence will repeat to
have enough steps for sampling. If `False`, a runtime error is thrown if
`num_steps` * `stride` is longer than sequence length.
stride: Distance to sample between timesteps.
offset: Offset(s) to be used for sampling.
Returns:
Indices to gather from the sequence tensor to get a fixed size sequence.
"""
sequence_length = tf.shape(input=sequence)[0]
sel_idx = tf.range(sequence_length)
if repeat_sequence:
# Repeats sequence until `num_steps` are available in total.
num_repeats = tf.cast(
tf.math.ceil(
tf.divide(
tf.cast(num_steps * stride + offset, dtype=tf.float32),
tf.cast(sequence_length, dtype=tf.float32))), dtype=tf.int32)
sel_idx = tf.tile(sel_idx, [num_repeats])
steps = tf.range(offset, offset + num_steps * stride, stride)
return tf.gather(sel_idx, steps)
# ----------------------------------------------------------------------
# ----------------- Methods used in sample functions. ------------------
# ----------------------------------------------------------------------
def sample_linspace_sequence(sequence: tf.Tensor,
num_windows: int,
num_steps: int,
stride: int = 1) -> tf.Tensor:
"""Samples `num_windows` segments from sequence with linearly spaced offsets.
The samples are concatenated in a single Tensor in order to have the same
format structure per timestep (e.g. a single frame). If `num_steps` * `stride`
is bigger than the number of timesteps, the sequence is repeated. This
function can be used in evaluation to extract enough segments in order to span
the entire sequence.
Args:
sequence: Any tensor where the first dimension is timesteps.
num_windows: Number of windows to be retrieved from the sequence.
num_steps: Number of steps (e.g. frames) to take in each window.
stride: Distance to sample between timesteps.
Returns:
A single tensor with first dimension `num_windows` * `num_steps`. The tensor
contains the concatenated list of `num_windows` tensors which offsets have
been linearly spaced from input.
"""
sequence_length = tf.shape(input=sequence)[0]
max_offset = tf.maximum(0, sequence_length - num_steps * stride)
offsets = tf.linspace(0.0, tf.cast(max_offset, tf.float32), num_windows)
offsets = tf.cast(offsets, tf.int32)
all_indices = []
for i in range(num_windows):
all_indices.append(
sample_or_pad_sequence_indices(
sequence=sequence,
num_steps=num_steps,
repeat_sequence=True, # Will repeat the sequence if request more.
stride=stride,
offset=offsets[i]))
indices = tf.concat(all_indices, axis=0)
indices.set_shape((num_windows * num_steps,))
output = tf.gather(sequence, indices)
return output
def sample_sequence(
sequence: tf.Tensor,
num_steps: int,
random: bool,
stride: int = 1,
seed: Optional[int] = None,
state: Optional[builders.ProcessorState] = None) -> tf.Tensor:
"""Samples a single segment of size `num_steps` from a given sequence.
If `random` is not `True`, this function will simply sample the central window
of the sequence. Otherwise, a random offset will be chosen in a way that the
desired `num_steps` might be extracted from the sequence.
In order to keep coherence among different sequences sampled using random true
(e.g. image and audio), an optional state is accepted as parameter and used to
keep track of the first offset, using a proportional offset to sample from the
second sequence.
Args:
sequence: Any tensor where the first dimension is timesteps.
num_steps: Number of steps (e.g. frames) to take.
random: A boolean indicating whether to random sample the single window. If
`True`, the offset is randomized. If `False`, the middle frame minus half
of `num_steps` is the first frame.
stride: Distance to sample between timesteps.
seed: A deterministic seed to use when sampling.
state: A mutable dictionary where keys are strings. The dictionary might
contain 'sample_offset_proportion' as key with metadata useful for
sampling. It will be modified with added metadata if needed. This can be
used to keep consistency between sampling of different sequences.
Returns:
A single tensor with first dimension `num_steps` with the sampled segment.
"""
sequence_length = tf.shape(input=sequence)[0]
sequence_length = tf.cast(sequence_length, tf.float32)
if random:
if state and 'sample_offset_proportion' in state:
# Read offset from state to ensure consistent offsets for different
# modalities.
offset = state['sample_offset_proportion'] * sequence_length
offset = tf.cast(tf.math.round(offset), tf.int32)
else:
offset = _get_random_sampling_offset(
sequence=sequence,
num_steps=num_steps,
stride=stride,
seed=seed)
if state is not None:
# Update state.
sample_offset_proportion = tf.cast(offset, tf.float32) / sequence_length
state['sample_offset_proportion'] = sample_offset_proportion
else:
offset = tf.maximum(
0, tf.cast((sequence_length - num_steps * stride) // 2, tf.int32))
indices = sample_or_pad_sequence_indices(
sequence=sequence,
num_steps=num_steps,
repeat_sequence=True, # Will repeat the sequence if request more.
stride=stride,
offset=offset)
indices.set_shape((num_steps,))
output = tf.gather(sequence, indices)
return output
def sample_or_pad_non_sorted_sequence(
sequence: tf.Tensor,
max_num_steps: int,
pad_value: Any,
random: bool,
seed: Optional[int] = None,
state: Optional[builders.ProcessorState] = None,
state_key: str = 'sample_sequence_random_perm') -> tf.Tensor:
"""Samples or pads (with `pad_value`) elements from the input sequence.
The input sequence can be multidimensional, but the sampling or pads will
only happen in the first dimension.
Args:
sequence: Any tensor where the first dimension is timesteps.
max_num_steps: Maximum number of steps to be kept from the input. If the
input contains more, it's sampled, if less, it's padded.
pad_value: Value to be used when padding. Same type as `sequence`.
random: A boolean indicating whether to random sample from the input. If
`True`, a random permutation is taken. If `False`, the first
`max(max_num_steps, sequence_length)` elements are taken.
seed: A deterministic seed to use when sampling.
state: A mutable dictionary where keys are strings. The dictionary might
contain an entry with `state_key` as key with metadata useful for
sampling. It will be modified with added metadata if needed. This can be
used to keep consistency between sampling of different sequences. Note
that a runtime error will be raised in case state is provided but the
sequences that one tries to sync are of different lenghts.
state_key: Name of the state entry that controls the random sampling.
Returns:
A single tensor with first dimension `max_num_steps` with the sampled
elements.
Raises:
tf.errors.InvalidArgumentError: if state is provided but the sequences that
one tries to sync are of different lengths.
"""
sequence_length = tf.shape(input=sequence)[0]
if random:
if state and state_key in state:
# Read offset from state to ensure consistent offsets for different
# modalities.
random_perm = state[state_key]
tf.debugging.assert_equal(
sequence_length, tf.shape(input=random_perm)[0],
('Trying to sync the sampling of two sequences that do not have the '
'same number of elements!'))
else:
random_perm = tf.argsort(tf.random.uniform((sequence_length,), seed=seed))
if state is not None:
state[state_key] = random_perm
sequence = tf.gather(sequence, random_perm)
padding_pattern = [[0, tf.maximum(0, max_num_steps - sequence_length)],]
num_dim = len(tf.shape(input=sequence))
if num_dim > 1:
padding_pattern.extend([[0, 0]] * (num_dim - 1))
return tf.pad(
tensor=sequence[:max_num_steps],
paddings=padding_pattern,
constant_values=pad_value)
# ----------------------------------------------------------------------
# ----------------- Methods used in decode functions. ------------------
# ----------------------------------------------------------------------
def decode_jpeg(image_string: tf.Tensor, channels: int = 0) -> tf.Tensor:
"""Decodes JPEG raw bytes string into a RGB uint8 tensor.
Args:
image_string: A tensor of type strings with the raw JPEG bytes where the
first dimension is timesteps.
channels: Number of channels of the JPEG image. Allowed values are 0, 1 and
3. If 0, the number of channels will be calculated at runtime and no
static shape is set.
Returns:
A `tf.Tensor` of shape [T, H, W, C] of type `tf.uint8` with the decoded
images.
"""
return tf.map_fn(
lambda x: tf.image.decode_jpeg(x, channels=channels),
image_string, back_prop=False, dtype=tf.uint8)
# ----------------------------------------------------------------------
# --------------- Methods used in preprocess functions. ----------------
# ----------------------------------------------------------------------
def set_shape(
inputs: tf.Tensor,
shape: Union[tf.TensorShape, Sequence[Optional[int]]]) -> tf.Tensor:
"""Sets the shape of the given tensor and returns it."""
inputs.set_shape(shape)
return inputs
def crop_image(frames: tf.Tensor,
height: int,
width: int,
random: bool = False,
seed: Optional[int] = None,
state: Optional[builders.ProcessorState] = None) -> tf.Tensor:
"""Crops the images in the given sequence of images.
If requested size is bigger than image size, image is padded with 0. If not
random cropping, a central crop is performed.
Args:
frames: A tensor of dimension [timesteps, input_h, input_w, channels].
height: Cropped image height.
width: Cropped image width.
random: A boolean indicating if crop should be randomized.
seed: A deterministic seed to use when random cropping.
state: A mutable dictionary where keys are strings. The dictionary might
contain 'crop_offset_proportion' as key with metadata useful for cropping.
It will be modified with added metadata if needed. This can be used to
keep consistency between cropping of different sequences of images.
Returns:
A tensor of shape [timesteps, output_h, output_w, channels] of same type as
input with the cropped images.
"""
if random:
# Random spatial crop. tf.image.random_crop is not used since the offset is
# needed to ensure consistency between crops on different modalities.
shape = tf.shape(input=frames)
# If a static_shape is available (e.g. when using this method from add_image
# method), it will be used to have an output tensor with static shape.
static_shape = frames.shape.as_list()
seq_len = shape[0] if static_shape[0] is None else static_shape[0]
channels = shape[3] if static_shape[3] is None else static_shape[3]
size = tf.convert_to_tensor(value=(seq_len, height, width, channels))
if state and 'crop_offset_proportion' in state:
# Use offset set by a previous cropping: [0, offset_h, offset_w, 0].
offset = state['crop_offset_proportion'] * tf.cast(shape, tf.float32)
offset = tf.cast(tf.math.round(offset), tf.int32)
else:
# Limit of possible offset in order to fit the entire crop:
# [1, input_h - target_h + 1, input_w - target_w + 1, 1].
limit = shape - size + 1
offset = tf.random.uniform(
shape=(4,),
dtype=tf.int32,
maxval=tf.int32.max,
seed=seed) % limit # [0, offset_h, offset_w, 0]
if state is not None:
# Update state.
offset_proportion = tf.cast(offset, tf.float32) / tf.cast(
shape, tf.float32)
state['crop_offset_proportion'] = offset_proportion
frames = tf.slice(frames, offset, size)
else:
# Central crop or pad.
frames = tf.image.resize_with_crop_or_pad(frames, height, width)
return frames
def resize_smallest(frames: tf.Tensor,
min_resize: int,
is_flow: bool = False,
method: str = tf.image.ResizeMethod.BILINEAR) -> tf.Tensor:
"""Resizes frames so that `min(height, width)` is equal to `min_resize`.
This function will do nothing if the `min(height, width)` is already equal to
`min_resize`. This allows to save compute time.
Args:
frames: A tensor of dimension [timesteps, input_h, input_w, channels].
min_resize: Minimum size of the final image dimensions.
is_flow: If is flow, will modify the raw values to account for the resize.
For example, if the flow image is resized by a factor k, we need to
multiply the flow values by the same factor k since one pixel displacement
in the resized image corresponds to only 1/k pixel displacement in the
original image.
method: A resizing method.
Returns:
A tensor of shape [timesteps, output_h, output_w, channels] of same type as
input, where `min(output_h, output_w)` is `min_resize`.
"""
if is_flow and frames.dtype != tf.float32:
raise ValueError('If `is_flow`, frames should be given in `tf.float32`.')
shape = tf.shape(input=frames)
input_h = shape[1]
input_w = shape[2]
output_h = tf.maximum(min_resize, (input_h * min_resize) // input_w)
output_w = tf.maximum(min_resize, (input_w * min_resize) // input_h)
def resize_fn():
frames_resized = tf.image.resize(
frames, (output_h, output_w), method=method)
return tf.cast(frames_resized, frames.dtype)
should_resize = tf.math.logical_or(tf.not_equal(input_w, output_w),
tf.not_equal(input_h, output_h))
frames = tf.cond(
pred=should_resize, true_fn=resize_fn, false_fn=lambda: frames)
if is_flow:
# Apply a multiplier to keep the right magnitude in the flow.
frames = frames * tf.cast(output_h / input_h, tf.float32)
return frames
def random_flip_left_right(frames: tf.Tensor,
seed: Optional[int] = None,
state: Optional[builders.ProcessorState] = None,
is_flow: bool = False) -> tf.Tensor:
"""Flips all the frames (consistently) with a probability of 50%.
Args:
frames: A tensor of dimension [timesteps, input_h, input_w, channels].
seed: A seed to use for the random sampling.
state: A mutable dictionary where keys are strings. The dictionary might
contain 'flip_left_right_is_flipped' as key with metadata useful for
flipping. It will be modified with added metadata if needed. This can be
used to keep consistency between flipping of different sequences of
images.
is_flow: If is flow and the image is flipped, the horizontal component
of the flow will be multiplied by -1 to account for the symmetry.
Returns:
A tensor of shape [timesteps, output_h, output_w, channels] eventually
flipped left right.
"""
if state and 'flip_left_right_is_flipped' in state:
is_flipped = state['flip_left_right_is_flipped']
else:
is_flipped = tf.random.uniform((), minval=0, maxval=2, dtype=tf.int32,
seed=seed)
if state is not None:
# Update state.
state['flip_left_right_is_flipped'] = is_flipped
frames = tf.cond(pred=tf.equal(is_flipped, 1),
true_fn=lambda: tf.image.flip_left_right(frames),
false_fn=lambda: frames)
if is_flow:
# Multiply horizontal component by -1.0 if `is_flipped`.
channel_mult = tf.constant([-1.0, 1.0, 1.0])[None, None, None, :]
frames = tf.cond(pred=tf.equal(is_flipped, 1),
true_fn=lambda: channel_mult * frames,
false_fn=lambda: frames)
return frames
def normalize_image(frames: tf.Tensor,
zero_centering_image: bool,
dtype: tf.dtypes.DType = tf.float32) -> tf.Tensor:
"""Normalizes images.
Args:
frames: A tensor of numbers.
zero_centering_image: If `True`, results are in [-1, 1], if `False`, results
are in [0, 1].
dtype: Type of output tensor.
Returns:
A Tensor of same shape as the input and of the given type.
"""
frames = tf.cast(frames, dtype)
if zero_centering_image:
frames = frames * (2.0 / 255.0) - 1.0
else:
frames /= 255.0
return frames
def scale_jitter_augm(
frames: tf.Tensor,
min_scale_factor: float = 0.8,
max_scale_factor: float = 1.2,
prob: float = 0.8,
seed: Optional[int] = None,
state: Optional[builders.ProcessorState] = None,
is_flow: bool = False,
method: str = tf.image.ResizeMethod.BILINEAR,
) -> tf.Tensor:
"""Applies scale jitter to videos with probability `prob`.
In details this will independently sample a factor along the height and the
width of the frames and rescale the video accordingly.
Args:
frames: A tensor of dimension [timesteps, input_h, input_w, channels].
min_scale_factor: Minimum scale factor to sample.
max_scale_factor: Maximum scale factor to sample.
prob: The probability that the scale of the video is going to be jittered.
seed: A seed to use for the random sampling.
state: A mutable dictionary where keys are strings. The dictionary might
contain 'scale_jitter_augm_info' as key with metadata useful for
jittering. It will be modified with added metadata if needed. This can be
used to keep consistency between jittering of different sequences of
images.
is_flow: If is flow, will modify the raw values to account for the resize.
For example, if the flow image is resized by a factor k, we need to
multiply the flow values by the same factor k since one pixel displacement
in the resized image corresponds to only 1/k pixel displacement in the
original image.
method: A resizing method.
Returns:
A tensor of shape [timesteps, output_h, output_w, channels] which spatial
dimensions have eventually been modified with the same type as the input.
"""
if not 0. <= prob <= 1.0:
raise ValueError(f'`prob` should be in [0, 1] but {prob} was given.')
def scale_augment(frames: tf.Tensor,
h_scale: tf.float32,
w_scale: tf.float32) -> tf.Tensor:
"""Do scale jitter."""
_, input_height, input_width, _ = tf.unstack(tf.shape(input=frames))
rdm_resize_height = tf.cast(
h_scale * tf.cast(input_height, tf.float32), tf.int32)
rdm_resize_width = tf.cast(
w_scale * tf.cast(input_width, tf.float32), tf.int32)
resize_shape = tf.stack([rdm_resize_height, rdm_resize_width])
frames = tf.cast(
tf.image.resize(frames, resize_shape, method=method),
frames.dtype)
if is_flow:
channel_mult = tf.stack([h_scale, w_scale, 1.0])[None, None, None, :]
# Apply a multiplier to keep the right magnitude in the flow.
frames = frames * channel_mult
return frames
if state and 'scale_jitter_augm_info' in state:
h_scale, w_scale, coin_toss = state['scale_jitter_augm_info']
else:
h_scale = tf.random.uniform(
[], minval=min_scale_factor, maxval=max_scale_factor, dtype=tf.float32,
seed=seed)
w_scale = tf.random.uniform(
[], minval=min_scale_factor, maxval=max_scale_factor, dtype=tf.float32,
seed=seed)
coin_toss = tf.random.uniform(
[], minval=0, maxval=1, dtype=tf.float32, seed=seed)
if state is not None:
# Update state.
state['scale_jitter_augm_info'] = (h_scale, w_scale, coin_toss)
frames = tf.cond(
pred=tf.less(coin_toss, tf.cast(prob, tf.float32)),
true_fn=lambda: scale_augment(frames, h_scale=h_scale, w_scale=w_scale),
false_fn=lambda: frames)
return frames
def color_default_augm(frames: tf.Tensor,
zero_centering_image: bool = False,
prob_color_augment: float = 0.8,
prob_color_drop: float = 0.0,
seed: Optional[int] = None):
"""Standard color augmentation for videos.
Args:
frames: A float32 tensor of shape [timesteps, input_h, input_w, channels].
zero_centering_image: If `True`, results are in [-1, 1], if `False`, results
are in [0, 1].
prob_color_augment: Probability of applying color augmentation.
prob_color_drop: Probability of droping the colors to gray scale.
seed: A seed to use for the random sampling.
Returns:
A tensor of same shape as the input with color eventually altered.
"""
if frames.dtype != tf.float32:
raise ValueError(f'`frames` should be in float32 (but was {frames.dtype}).')
if not 0. <= prob_color_augment <= 1.0:
raise ValueError(
f'`prob_color_augment` ({prob_color_augment} given) should be in '
'[0, 1].')
if not 0. <= prob_color_drop <= 1.0:
raise ValueError(
f'`prob_color_drop` ({prob_color_drop} given) should be in [0, 1].')
def color_augment(video: tf.Tensor) -> tf.Tensor:
"""Do standard color augmentations."""
# Note the same augmentation will be applied to all frames of the video.
if zero_centering_image:
video = 0.5 * (video + 1.0)
video = tf.image.random_brightness(video, max_delta=32. / 255.)
video = tf.image.random_saturation(video, lower=0.6, upper=1.4)
video = tf.image.random_contrast(video, lower=0.6, upper=1.4)
video = tf.image.random_hue(video, max_delta=0.2)
video = tf.clip_by_value(video, 0.0, 1.0)
if zero_centering_image:
video = 2 * (video-0.5)
return video
def color_drop(video: tf.Tensor) -> tf.Tensor:
"""Do color drop."""
video = tf.image.rgb_to_grayscale(video)
video = tf.tile(video, [1, 1, 1, 3])
return video
# Eventually applies color augmentation.
coin_toss_color_augment = tf.random.uniform(
[], minval=0, maxval=1, dtype=tf.float32, seed=seed)
frames = tf.cond(
pred=tf.less(coin_toss_color_augment,
tf.cast(prob_color_augment, tf.float32)),
true_fn=lambda: color_augment(frames),
false_fn=lambda: frames)
# Eventually applies color drop.
coin_toss_color_drop = tf.random.uniform(
[], minval=0, maxval=1, dtype=tf.float32, seed=seed)
frames = tf.cond(
pred=tf.less(coin_toss_color_drop, tf.cast(prob_color_drop, tf.float32)),
true_fn=lambda: color_drop(frames),
false_fn=lambda: frames)
return frames
def space_to_depth(frames: tf.Tensor,
temporal_block_size: int = 1,
spatial_block_size: int = 1) -> tf.Tensor:
"""Performs per frame space to depth.
Args:
frames: A tensor of dimension [T, H, W, C].
temporal_block_size: Size of the block for temporal dimension.
spatial_block_size: Size of the block for spatial dimensions.
Returns:
A tensor of shape [T / t_b, H / s_b, W / s_b, t_b * s_b * s_b * C] with the
same type as the input, where t_b is the `temporal_block_size` and s_b is
the `spatial_block_size`.
"""
t, h, w, c = frames.shape.as_list()
frames = tf.reshape(frames, (
t // temporal_block_size, temporal_block_size, h // spatial_block_size,
spatial_block_size, w // spatial_block_size, spatial_block_size, c))
frames = tf.transpose(a=frames, perm=(0, 2, 4, 1, 3, 5, 6))
frames = tf.reshape(frames, (
t // temporal_block_size, h // spatial_block_size,
w // spatial_block_size,
temporal_block_size * (spatial_block_size ** 2) * c))
return frames
def crop_or_pad_words(words: tf.Tensor,
max_num_words: int,
pad_value: int = 0) -> tf.Tensor:
"""Crop or pad given sequence of word indices.
Args:
words: Tensor of shape [T, sentence_length] of word indices.
max_num_words: Maximum number of words in final result.
pad_value: Value to be used in paddings.
Returns:
A Tensor of shape [T, max_num_words].
"""
num_words = tf.shape(input=words)[1]
words = tf.pad(
tensor=words[:, :max_num_words],
paddings=((0, 0), (0, tf.maximum(0, max_num_words - num_words))),
constant_values=pad_value)
words.set_shape((None, max_num_words))
return words
def tokenize(features: builders.FeaturesDict,
tokenizer: tokenizers.TextTokenizer,
raw_string_name: str,
tokenized_name: str,
prepend_bos: bool,
append_eos: bool,
max_num_tokens: int,
keep_raw_string: bool) -> builders.FeaturesDict:
"""Tokenize raw string with tokenizer.
Args:
features: A dictionary of features.
tokenizer: An instance of a text tokenizer.
raw_string_name: The name of the raw string feature in features.
tokenized_name: The name of the desired tokenized feature in the output.
prepend_bos: Whether to prepend BOS in the tokenizer.
append_eos: Whether to append EOS in the tokenizer.
max_num_tokens: Number of tokens in final result. The tokenized sentence
will be either crop or padded using the tokenizer pad token ID.
keep_raw_string: Whether to keep the raw string in the output.
Returns:
A FeaturesDict containing the tokenized string.
"""
raw_caption = features[raw_string_name]
tokenized = tokenizer.string_tensor_to_indices(
raw_caption, prepend_bos=prepend_bos, append_eos=append_eos,
max_num_tokens=max_num_tokens)
if not keep_raw_string:
del features[raw_string_name]
features[tokenized_name] = tokenized
return features
def _preemphasis(audio: tf.Tensor, coef: float = 0.97) -> tf.Tensor:
"""Scale up the high frequency components in the waveform.
Args:
audio: Input waveform.
coef: Pre-emphasis coefficient.
Returns:
Pre-emphasized audio.
"""
return tf.concat([audio[:1], audio[1:] - coef * audio[:-1]], axis=0)
def compute_audio_spectrogram(
features: builders.FeaturesDict,
num_subclips: int = 1,
sample_rate: int = 48000,
spectrogram_type: str = 'logmf',
frame_length: int = 2048,
frame_step: int = 1024,
num_features: int = 80,
lower_edge_hertz: float = 80.0,
upper_edge_hertz: float = 7600.0,
preemphasis: Optional[float] = None,
normalize: bool = False,
audio_feature_name: str = builders.AUDIO_MEL_FEATURE_NAME,
spectrogram_feature_name: str = builders.AUDIO_MEL_FEATURE_NAME,
fft_output_conversion: str = 'magnitude',
) -> builders.FeaturesDict:
"""Computes audio spectrograms.
Args:
features: A dictionary of features.
num_subclips: Number of test clips (1 by default). If more than 1, this will
sample multiple linearly spaced clips within each audio at test time.
If 1, then a single clip in the middle of the audio is sampled. The clips
are aggreagated in the batch dimension.
sample_rate: The sample rate of the input audio.
spectrogram_type: The type of the spectrogram to be extracted from the
waveform. Can be either `spectrogram`, `logmf`, and `mfcc`.
frame_length: The length of each spectroram frame.
frame_step: The stride of spectrogram frames.
num_features: The number of spectrogram features.
lower_edge_hertz: Lowest frequency to consider.
upper_edge_hertz: Highest frequency to consider.
preemphasis: The strength of pre-emphasis on the waveform. If None, no
pre-emphasis will be applied.
normalize: Whether to normalize the waveform or not.
audio_feature_name: The name of the raw audio feature in features.
spectrogram_feature_name: The name of the spectrogram feature in features.
fft_output_conversion: The string indicating the output conversion function.
Currently, only `magnitude` and `magnitude_squared` are supported.
Returns:
A FeaturesDict containing the extracted spectrograms.
Raises:
ValueError: if `spectrogram_type` is one of `spectrogram`, `logmf`, or
`mfcc`.
"""
if spectrogram_type not in ['spectrogram', 'logmf', 'mfcc']:
raise ValueError('Spectrogram type should be one of `spectrogram`, '
'`logmf`, or `mfcc`, got {}'.format(spectrogram_type))
if fft_output_conversion not in ['magnitude', 'magnitude_squared']:
raise ValueError(
'FFT output conversion should be one of `magnitude` or '
'`magnitude_squared, god {}`'.format(fft_output_conversion))
raw_audio = features[audio_feature_name]
if normalize:
raw_audio /= (
tf.reduce_max(tf.abs(raw_audio), axis=-1, keepdims=True) + 1e-8)
features[audio_feature_name] = raw_audio
if num_subclips > 1:
raw_audio = tf.reshape(raw_audio, [num_subclips, -1])
if preemphasis is not None:
raw_audio = _preemphasis(raw_audio, preemphasis)
def _extract_spectrogram(
waveform: tf.Tensor,
spectrogram_type: str) -> tf.Tensor:
stfts = tf.signal.stft(waveform,
frame_length=frame_length,
frame_step=frame_step,
fft_length=frame_length,
window_fn=tf.signal.hann_window,
pad_end=True)
if fft_output_conversion == 'magnitude_squared':
stfts = tf.square(stfts)
spectrograms = tf.abs(stfts)
if spectrogram_type == 'spectrogram':
return spectrograms[..., :num_features]
# Warp the linear scale spectrograms into the mel-scale.
num_spectrogram_bins = stfts.shape[-1]
linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
num_features, num_spectrogram_bins, sample_rate, lower_edge_hertz,
upper_edge_hertz)
mel_spectrograms = tf.tensordot(
spectrograms, linear_to_mel_weight_matrix, 1)
mel_spectrograms.set_shape(spectrograms.shape[:-1].concatenate(
linear_to_mel_weight_matrix.shape[-1:]))
# Compute a stabilized log to get log-magnitude mel-scale spectrograms.
log_mel_spectrograms = tf.math.log(mel_spectrograms + 1e-6)
if spectrogram_type == 'logmf':
return log_mel_spectrograms
# Compute MFCCs from log_mel_spectrograms and take the first 13.
mfccs = tf.signal.mfccs_from_log_mel_spectrograms(
log_mel_spectrograms)[..., :13]
return mfccs
spectrogram = _extract_spectrogram(raw_audio, spectrogram_type)
features[spectrogram_feature_name] = spectrogram
return features
def _resample_audio_fft(
x: tf.Tensor,
in_sample_rate: int,
out_sample_rate: int,
resolution_bits: Optional[float] = None) -> tf.Tensor:
"""Resample audio using FFTs.
Args:
x: Input audio signal.
in_sample_rate: The original sample rate of the input audio.
out_sample_rate: The target sample rate.
resolution_bits: Resolution bits used to scale the FFTs. If None no scaling
is used.
Returns:
The resampled audio signal.
"""
axis = -1 # tf.signal.fft operates on the innermost dimension of x
if in_sample_rate == out_sample_rate:
return x
scale = 2**(resolution_bits - 1) if resolution_bits else None
if scale:
x /= scale
factor = float(out_sample_rate) / in_sample_rate
original_size = tf.shape(x)[axis]
resampled_size = tf.cast(
tf.cast(original_size, dtype=tf.float32) * factor, dtype=tf.int32)
x_ = tf.signal.fft(tf.cast(x, dtype=tf.complex64))
shape = x.get_shape().as_list()
rank = len(shape)
sl_beg = [slice(None)] * rank
sl_end = [slice(None)] * rank
min_size = tf.minimum(resampled_size, original_size)
sl_beg[axis] = slice(0, (min_size + 1) // 2)
sl_end[axis] = slice(-(min_size - 1) // 2, None)
# Compute padding: empty unless upsampling (resampled_size > original_size).
pad_shape = list(shape)
pad_shape[axis] = tf.maximum(0, resampled_size - original_size)
padding = tf.zeros(pad_shape, dtype=x_.dtype)
y_ = tf.concat([x_[sl_beg], padding, x_[sl_end]], axis=axis)
y = tf.signal.ifft(y_)
y = tf.math.real(y) * factor
# Deliberately subtract 1 to prevent clipped values from going out of range.
y = tf.clip_by_value(y, -1, 1)
if scale:
y *= scale - 1
if shape[axis] is not None:
shape[axis] = int(shape[axis] * factor)
y.set_shape(shape)
return y
def resample_audio(
audio: tf.Tensor,
in_sample_rate: int,
out_sample_rate: int,
is_training: bool = True,
num_subclips: int = 1,
) -> tf.Tensor:
"""Resamples raw audio.
Args:
audio: Input audio signal.
in_sample_rate: The original sample rate of the input audio.
out_sample_rate: The target sample rate.
is_training: If the current stage is training.
num_subclips: Number of test clips (1 by default). If more than 1, this will
sample multiple linearly spaced clips within each audio at test time.
If 1, then a single clip in the middle of the audio is sampled. The clips
are aggreagated in the batch dimension.
Returns:
The resampled audio signal.
"""
if num_subclips > 1 and not is_training:
audio = tf.reshape(audio, [num_subclips, -1])
return _resample_audio_fft(audio, in_sample_rate, out_sample_rate)
# ----------------------------------------------------------------------
# --------------- Methods used in postprocess functions. ---------------
# ----------------------------------------------------------------------
def batched_video_transpose(batched_img: tf.Tensor,
perm: Sequence[int]) -> tf.Tensor:
"""Transposes the given Tensor (used to transpose on host instead of TPU)."""
return tf.transpose(a=batched_img, perm=perm)
def batched_space_to_depth(frames: tf.Tensor,
temporal_block_size: int = 1,
spatial_block_size: int = 1) -> tf.Tensor:
"""Performs per batch space to depth.
Args:
frames: A tensor of dimension [B, T, H, W, C].
temporal_block_size: Size of the block for temporal dimension.
spatial_block_size: Size of the block for spatial dimensions.
Returns:
A tensor of shape [B, T / t_b, H / s_b, W / s_b, t_b * s_b * s_b * C] with
the same type as the input, where t_b is the `temporal_block_size` and s_b
is the `spatial_block_size`.
"""
_, t, h, w, c = frames.shape.as_list()
frames = tf.reshape(frames, (
-1, t // temporal_block_size, temporal_block_size,
h // spatial_block_size, spatial_block_size, w // spatial_block_size,
spatial_block_size, c))
frames = tf.transpose(a=frames, perm=(0, 1, 3, 5, 2, 4, 6, 7))
frames = tf.reshape(frames, (
-1, t // temporal_block_size, h // spatial_block_size,
w // spatial_block_size,
temporal_block_size * (spatial_block_size ** 2) * c))
return frames
| dmvr-master | dmvr/processors.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for video_dataset."""
import os
from typing import List, Union
from dmvr import builders
from dmvr import sources
from dmvr import video_dataset
from parameterized import parameterized
import tensorflow as tf
class _TestTFRecordsSource(sources.Source):
def load_and_decode_shard(self,
shard: Union[str, tf.Tensor]) -> tf.data.Dataset:
ds = tf.data.TFRecordDataset(shard)
ds = ds.map(lambda example: (b'test_key', example))
return ds
class _TestVideoDatasetFactory(
video_dataset.BaseVideoDatasetFactory):
def __init__(self, shards: List[str]):
super().__init__(shards, builders.SequenceExampleParserBuilder,
_TestTFRecordsSource())
def _build(self,
sample_offset: int = 0,
multiply_by_2: bool = False,
reduce_max: bool = False,
keep_idx: bool = False):
self.parser_builder.parse_feature(
'sequence', tf.io.FixedLenSequenceFeature((), dtype=tf.int64))
if keep_idx:
self.parser_builder.parse_feature(
'idx', tf.io.FixedLenFeature((), dtype=tf.int64), is_context=True)
self.sampler_builder.add_fn(
lambda x: x[sample_offset:(sample_offset + 50)], 'sequence')
self.decoder_builder.add_fn(
lambda x: tf.cast(x, tf.uint8), 'sequence')
if multiply_by_2:
self.preprocessor_builder.add_fn(lambda x: 2 * x, 'sequence')
if reduce_max:
self.postprocessor_builder.add_fn(
lambda x: tf.reduce_max(input_tensor=x, axis=1), 'sequence')
class BaseVideoDatasetFactoryTest(tf.test.TestCase):
def setUp(self):
super().setUp()
shards = []
tmp_dir = self.get_temp_dir()
# Generate TFRecords of 5 shards with serialized SequenceExamples in the
# format ('sequence', [[0], [1], ..., [99]]) plus the shard and element
# indices.
for shard_idx in range(5):
shard = os.path.join(tmp_dir,
'example-{:05}-of-00005.tfrecord'.format(shard_idx))
shards.append(shard)
# Create fake `tf.train.SequenceExample`.
seq_example = tf.train.SequenceExample()
for i in range(100):
seq_example.feature_lists.feature_list.get_or_create(
'sequence').feature.add().int64_list.value[:] = [i]
with tf.io.TFRecordWriter(shard) as builder:
for idx in range(10):
seq_example.context.feature.get_or_create(
'idx').int64_list.value[:] = [shard_idx * 10 + idx]
builder.write(seq_example.SerializeToString())
self._factory = _TestVideoDatasetFactory(shards)
def test_basic(self):
ds = self._factory.configure().make_dataset(batch_size=2)
data = next(iter(ds))
self.assertSetEqual(set(data.keys()), set(['sequence']))
self.assertAllEqual(data['sequence'], [list(range(50))] * 2)
def test_configure(self):
ds = self._factory.configure(10, True, True).make_dataset(batch_size=2)
data = next(iter(ds))
self.assertSetEqual(set(data.keys()), set(['sequence']))
self.assertAllEqual(data['sequence'], [59 * 2] * 2)
def test_configure_exception(self):
with self.assertRaises(ValueError) as _:
self._factory.make_dataset(batch_size=2)
with self.assertRaises(ValueError) as _:
self._factory.configure().configure()
def test_keep_key(self):
ds = self._factory.configure().make_dataset(batch_size=2, keep_key=True)
data = next(iter(ds))
self.assertSetEqual(set(data.keys()),
set(['sequence', builders.KEY_FEATURE_NAME]))
self.assertAllEqual(data[builders.KEY_FEATURE_NAME].shape, (2,))
self.assertEqual(data[builders.KEY_FEATURE_NAME][0].numpy(), b'test_key')
self.assertEqual(data[builders.KEY_FEATURE_NAME][1].numpy(), b'test_key')
def test_override_preprocess_fn(self):
# Data shouldn't be multiplied by 2.
ds = self._factory.configure(multiply_by_2=True).make_dataset(
batch_size=2, override_preprocess_fn=lambda x: x)
data = next(iter(ds))
self.assertSetEqual(set(data.keys()), set(['sequence']))
self.assertAllEqual(data['sequence'], [list(range(50))] * 2)
def test_no_shuffle(self):
# Set block_length to guarantee reading all examples from the first shard.
ds = self._factory.configure(keep_idx=True).tune(
block_length=5).make_dataset(shuffle=False, batch_size=5)
data = next(iter(ds))
self.assertSetEqual(set(data.keys()), set(['sequence', 'idx']))
self.assertAllEqual(data['idx'], [0, 1, 2, 3, 4])
def test_filter_read(self):
self._factory.filter_builder.add_filter_fn(
lambda fd: tf.not_equal(fd[builders.KEY_FEATURE_NAME], 'test_key'),
builders.Phase.READ)
ds = self._factory.configure().make_dataset(batch_size=10, keep_key=True)
with self.assertRaises(StopIteration) as _:
next(iter(ds))
@parameterized.expand(
((builders.Phase.PARSE,), (builders.Phase.SAMPLE,),
(builders.Phase.DECODE,), (builders.Phase.PREPROCESS,)))
def test_filter(self, phase):
def keep_even_idx(features_dict):
idx = features_dict['idx']
return tf.equal(idx % 2, 0)
self._factory.filter_builder.add_filter_fn(keep_even_idx, phase)
# Set block_length to guarantee reading examples in key order.
ds = self._factory.configure(keep_idx=True).tune(
block_length=10).make_dataset(shuffle=False, batch_size=10)
data = next(iter(ds))
self.assertSetEqual(set(data.keys()), set(['sequence', 'idx']))
self.assertAllEqual(data['idx'], range(0, 20, 2))
def test_filter_postprocess(self):
self._factory.filter_builder.add_filter_fn(
lambda fd: tf.not_equal(fd['idx'][0], 0), # Filter first batch.
builders.Phase.POSTPROCESS)
# Set block_length to guarantee reading examples in key order.
ds = self._factory.configure(keep_idx=True).tune(
block_length=10).make_dataset(shuffle=False, batch_size=10)
data = next(iter(ds))
self.assertSetEqual(set(data.keys()), set(['sequence', 'idx']))
self.assertAllEqual(data['idx'], range(10, 20))
def test_ignore_processing_errors(self):
def fail_decode(idx):
# Fail for all odd indices.
error = tf.assert_equal(idx % 2, tf.zeros((), dtype=tf.int64))
with tf.control_dependencies([error]):
return idx
self._factory.decoder_builder.add_fn(fail_decode, 'idx')
# Set block_length to guarantee reading examples in key order.
ds = self._factory.configure(keep_idx=True).tune(
block_length=10).make_dataset(
shuffle=False, batch_size=10, ignore_processing_errors=True)
data = next(iter(ds))
self.assertSetEqual(set(data.keys()), set(['sequence', 'idx']))
self.assertAllEqual(data['idx'], range(0, 20, 2))
if __name__ == '__main__':
tf.test.main()
| dmvr-master | dmvr/video_dataset_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic constructors for video datasets."""
import abc
from typing import Any, List, Optional, Type, TypeVar
from absl import logging
from dmvr import builders
from dmvr import sources
import tensorflow as tf
# Types.
T = TypeVar('T', bound=builders.BaseParserBuilder)
NestedStructure = Any
class BaseVideoDatasetFactory(abc.ABC):
"""Base class to build final `tf.data.Dataset` objects from files.
Glossary:
- A source is an object reading binary files in disk (e.g. TFRecords, image
files) and outputting serialized examples (e.g. `tf.train.SequenceExample`).
- A parser is an object reading serialized examples (e.g.
`tf.train.SequenceExample`) and outputting a `builders.FeaturesDict`.
- A processor is an object transforming features dictionary.
- The data processing pipeline is organised in phases. A phase is an unit of
the data processing graph and will have one parser or processor.
- Builders are helpers designed to allow the user to easily customize the data
processing graph by adding functions to each phase.
Principle:
All datasets created with this factory follow the same abstraction:
a `parse_fn`, a `sample_fn`, a `decode_fn`, a `preprocess_fn` and a
`postprocess_fn` are used to control the flow of dataset creation besides
normal dataset operations. These functions are created from builders, allowing
the user to build a graph of data processing operations. In details, the
following steps are followed when creating a dataset:
- Read shards from file system using the given `source.Source`.
- Apply `parse_fn` to output values of the `source` (as bytes) to build a
dictionary of raw features. The parse function should only parse the useful
bytes of the serialized input example (e.g. `tf.train.SequenceExample`) and
put the features in a `builders.FeaturesDict` format. `parser_builder` can
be used to easily add more features / modalities.
- Apply `sample_fn` to sequence features contained in the dictionary in
order to select the desired elements of the sequence, e.g. sample a subset
of frames from the entire stored video. `sampler_builder` can be used to
modify or add sampling options.
- Apply `decode_fn` to convert raw formats to the final format. E.g. decode
JPEG string `tf.Tensor` to a `tf.Tensor` of `uint8`. `decoder_builder` can
be used.
- Apply `preprocess_fn`. E.g. crop images, process audio and text.
`preprocessor_builder` can be used.
- Batch, shuffle, prefetch and do other basic operations with the dataset.
- Apply `postprocess_fn` to batched examples. E.g. transpose batches.
`postprocessor_builder` can be used.
After each one of the data processing functions, a filter is applied in order
to keep only desirable elements in the dataset. These filters can be
customized by using the `filter_builder`.
A conventional use of this factory consists of implementing a subclass for a
specific dataset, overriding the `_build` method where all common processing
of the specific dataset can be added using the builders.
The client of the dataset is able to create a factory, configure it, possibly
add custom extra processing steps and use it to make a dataset.
Usage:
```python
class KineticsFactory(BaseVideoDatasetFactory):
def __init__(self, subset: str):
shards = ['path/to/kinetics/tfrecords/records-00001-of-00500.tfrecord',
...]
shards = filter_by_subset(shards, subset)
super().__init__(shards)
def _build(self, frame_height: int, frame_width: int, frame_count: int):
self.parser_builder.parse_feature(
image_seq_example_feature_name,
tf.io.FixedLenSequenceFeature((), dtype=tf.string),
builders.IMAGE_FEATURE_NAME)
self.sampler_builder.add_fn(
lambda x: sample_sequence_fn(x, frame_count),
builders.IMAGE_FEATURE_NAME)
self.decoder_builder.add_fn(decode_frames_fn, builders.IMAGE_FEATURE_NAME)
self.preprocessor_builder.add_fn(
lambda x: resize_frames(x, frame_height, frame_width),
builders.IMAGE_FEATURE_NAME)
# Other processing functions adding text and label.
# Dataset client code:
factory = KineticsFactory(subset='test').configure(
frame_height=224, frame_width=224, frame_count=8)
# Add extra custom preprocess functions:
def my_custom_text_tokenizer(text: tf.Tensor) -> tf.Tensor:
# Tokenize text string.
return tokenized_tensor
def my_custom_add_word_indices(
features_dict: builders.FeaturesDict) -> builders.FeaturesDict:
tokenized_text = features_dict[builders.TEXT_FEATURE_NAME]
features_dict[builders.TEXT_INDICES_FEATURE_NAME] = text_to_indices(
tokenized_text)
return features_dict
(factory.preprocess_builder.add_fn(my_custom_tokenizer,
builders.TEXT_FEATURE_NAME)
.add_fn(my_custom_add_word_indices))
# Add filter:
def keep_only_label_zero(fetures_dict: builders.FeaturesDict) -> tf.Tensor:
return tf.equal(features_dict[builders.LABEL_INDEX_FEATURE_NAME], 0)
factory.filter_builder.add_filter_fn(
keep_only_label_zero, builders.Phase.PARSE)
# Create dataset:
ds = factory.make_dataset(batch_size=16)
```
The factory exposes the process functions builders to the client, allowing
simple modifications to the functions. Common process functions, as crop,
resize, etc. should be implemented in common modules.
See builders documentation for more details.
"""
def __init__(self,
shards: List[str],
parser_builder_class: Type[T] = builders
.SequenceExampleParserBuilder,
source: sources.Source = sources.TFRecordsSource()):
"""Initializes the `BaseVideoDatasetFactory`.
Args:
shards: List of paths to shards containing the data files. Each one of the
paths will be passed to the `source`, that will read the data and output
examples (that will be fed into the parse function generated by the
`parser_builder_class`). Therefore, `shards`, `parser_builder_class` and
`source` have to be consistent.
parser_builder_class: A parser builder class able to parse examples of the
types contained in `shards` files.
source: Source to be used to load raw binary files and decoding it into
examples (encoded as bytes).
"""
self._shards = shards
self._source = source
# Initialize all function builders.
self.parser_builder = parser_builder_class()
self.sampler_builder = builders.SamplerBuilder()
self.decoder_builder = builders.DecoderBuilder()
self.preprocessor_builder = builders.PreprocessorBuilder()
self.postprocessor_builder = builders.PostprocessorBuilder()
# Initialize filters.
self.filter_builder = builders.FilterBuilder()
# Default tune parameters.
self._shuffle_buffer = 256
self._num_parser_threads = 16
self._num_process_threads = tf.data.experimental.AUTOTUNE
self._num_postprocess_threads = 4
self._parser_buffer_size = 64
self._postprocess_buffer_size = 1
self._prefetch_buffer_size = 8
self._cycle_length = None
self._num_parallel_calls_interleave = tf.data.experimental.AUTOTUNE
self._block_length = None
self._seed = None
self._duplicate_proto = None
self._is_configured = False
def configure(self, *args, **kwargs) -> 'BaseVideoDatasetFactory':
"""Configures all parse and process functions of this factory.
This function should be called exactly once per factory instance and will
delegate builders configuration to `_build` method.
Args:
*args: Positional arguments passed to `_build` function.
**kwargs: Non positional arguments passed to `_build` function.
Returns:
This instance of the factory.
Raises:
ValueError: Method has already been called.
"""
if self._is_configured:
raise ValueError(
'`configure` has already been called. The method should be called '
'only once to avoid duplicated process functions.')
self._is_configured = True
self._build(*args, **kwargs)
return self
def tune(self,
shuffle_buffer: Optional[int] = None,
num_parser_threads: Optional[int] = None,
num_process_threads: Optional[int] = None,
num_postprocess_threads: Optional[int] = None,
parser_buffer_size: Optional[int] = None,
postprocess_buffer_size: Optional[int] = None,
prefetch_buffer_size: Optional[int] = None,
cycle_length: Optional[int] = None,
num_parallel_calls_interleave: Optional[int] = None,
block_length: Optional[int] = None,
seed: Optional[int] = None,
duplicate_proto: Optional[int] = None):
"""Changes the dataset creation parameters.
This method should be used to change the default parameters used to create
the dataset in order to improve speed, memory or other. Only given
parameters will be changed, the others will remain the same.
Args:
shuffle_buffer: The buffer size for shuffle operation. This affects the
randomness of the output. It must be specified if `shuffle` is `True`.
num_parser_threads: Number of threads to use for the parsing operation.
`tf.data.experimental.AUTOTUNE` can be used to auto-tune.
num_process_threads: Number of threads to use for map operations in
sample, decode and preprocess. `tf.data.experimental.AUTOTUNE` can be
used to auto-tune.
num_postprocess_threads: Number of threads to use for map operations in
postprocess. `tf.data.experimental.AUTOTUNE` can be used to auto-tune.
parser_buffer_size: Buffer size of the sample, decode and preprocess
operation.
postprocess_buffer_size: Buffer size of the postprocess operation.
prefetch_buffer_size: Size of the final prefetch buffer.
cycle_length: The number of shards that will be processed concurrently.
`tf.data.experimental.AUTOTUNE` can be used to auto-tune.
num_parallel_calls_interleave: The number of parallel calls to the
interleave method. `tf.data.experimental.AUTOTUNE` can be used to
auto-tune.
block_length: The number of consecutive elements to produce from each
shard.
seed: Random seed of the shuffle operations.
duplicate_proto: Number of duplicates to make for each loaded proto.
Typically different augmentations will be applied for each copy, so
this can reduce disk reads without harming training performance.
This is applied after the post read function, but before the shuffle
buffer.
Returns:
This instance of the factory.
"""
self._shuffle_buffer = shuffle_buffer or self._shuffle_buffer
self._num_parser_threads = num_parser_threads or self._num_parser_threads
self._num_process_threads = num_process_threads or self._num_process_threads
self._num_postprocess_threads = (
num_postprocess_threads or self._num_postprocess_threads)
self._parser_buffer_size = parser_buffer_size or self._parser_buffer_size
self._postprocess_buffer_size = (
postprocess_buffer_size or self._postprocess_buffer_size)
self._prefetch_buffer_size = (
prefetch_buffer_size or self._prefetch_buffer_size)
self._cycle_length = cycle_length or self._cycle_length
self._num_parallel_calls_interleave = (
num_parallel_calls_interleave or self._num_parallel_calls_interleave)
self._block_length = block_length or self._block_length
self._seed = seed or self._seed
self._duplicate_proto = duplicate_proto or self._duplicate_proto
return self
# ----------------------------------------------------------------------
# ---------- Methods that must be implemented by child class. ----------
# ----------------------------------------------------------------------
@abc.abstractmethod
def _build(self, *args, **kwargs) -> None:
"""Builds the data processing graph."""
# ----------------------------------------------------------------------
# -------- Methods that should only be overridden if necessary. --------
# ----------------------------------------------------------------------
def make_dataset(
self,
shuffle: bool = True,
num_epochs: Optional[int] = None,
batch_size: Optional[int] = 16,
padded_batch: bool = False,
padded_batch_shapes: NestedStructure = None,
drop_remainder: bool = True,
keep_key: bool = False,
cache: bool = False,
override_preprocess_fn: Optional[builders.Processor] = None,
**experimental_kwargs
) -> tf.data.Dataset:
"""Creates a `tf.data.Dataset` instance of the given dataset.
Args:
shuffle: Whether output data is shuffled.
num_epochs: Number of epochs to cycle through before stopping. If `None`,
this will read samples indefinitely.
batch_size: If an int, an extra leading batch dimension will be present
for all features. If `None`, then no batching is done and no extra batch
dimension is added.
padded_batch: Whether to use `padded_batch` instead of `batch` method.
Padded batch pads a batch of examples to a given output shape. It pads
all examples to the longest one in that batch. This could be used for
sequence data.
padded_batch_shapes: `padded_shapes` to be passed to `padded_batch`.
drop_remainder: Whether to drop any remainder after the last full-size
batch. If `True`, the batch dimension of the resulting op is known;
otherwise, the batch dimension may be `None` in cases where `num_epochs`
is finite and `batch_size` > 1, since the final remainder batch may be
smaller than the usual batch size.
keep_key: Whether to keep the `builders.Source` key as a feature in the
final dictionary. The key for the key in the dictionary is
`builders.KEY_FEATURE_NAME`.
cache: Whether to cache the dataset in RAM. Note that this should only
be used if the dataset can fit in RAM as otherwise it will lead to
out of memory error.
override_preprocess_fn: Function to use instead of built preprocess_fn.
**experimental_kwargs: Other arguments used for experimental features.
These can be removed at any time without prior notice.
Returns:
An instance of the dataset.
Raises:
ValueError: Factory has not been configured.
ValueError: `shuffle_buffer` is `None` when dataset is shuffled.
ValueError: `batch_size` is not `None`, `padded_batch` is `False` and
`padded_batch_shapes` is not `None`.
"""
if not self._is_configured:
raise ValueError('Factory has not been configured. Call `configure` '
'method before `make_dataset`.')
# Build functions or use its overrides.
parse_fn = self.parser_builder.build()
sample_fn = self.sampler_builder.build()
decode_fn = self.decoder_builder.build()
preprocess_fn = override_preprocess_fn or self.preprocessor_builder.build()
postprocess_fn = self.postprocessor_builder.build()
# Filter functions.
filter_fn_post_read = self.filter_builder.build(builders.Phase.READ)
filter_fn_post_parse = self.filter_builder.build(builders.Phase.PARSE)
filter_fn_post_sample = self.filter_builder.build(builders.Phase.SAMPLE)
filter_fn_post_decode = self.filter_builder.build(builders.Phase.DECODE)
filter_fn_post_preprocess = self.filter_builder.build(
builders.Phase.PREPROCESS)
filter_fn_post_postprocess = self.filter_builder.build(
builders.Phase.POSTPROCESS)
if shuffle and self._shuffle_buffer is None:
raise ValueError(
'`shuffle_buffer` cannot be `None` if dataset is shuffled.')
def parse_example(key: tf.Tensor,
raw_example: tf.Tensor) -> builders.FeaturesDict:
"""Decodes bytes of example and parse it into a features dictionary."""
output = parse_fn(raw_example)
# Potentially parse the key.
if keep_key:
output[builders.KEY_FEATURE_NAME] = key
return output
ds = tf.data.Dataset.from_tensor_slices(self._shards)
if shuffle:
# Shuffling the shards and not only the examples later is important.
ds = ds.shuffle(len(self._shards), seed=self._seed)
ds = ds.interleave(
self._source.load_and_decode_shard,
cycle_length=self._cycle_length,
block_length=self._block_length,
num_parallel_calls=self._num_parallel_calls_interleave,
deterministic=not shuffle)
# At this point, the features dictionary is not yet created. We artificially
# create one with the key only to make the interface uniform.
ds = ds.filter(
lambda key, _: filter_fn_post_read({builders.KEY_FEATURE_NAME: key}))
if self._duplicate_proto is not None:
def duplicate_fn(x, y):
return (tf.stack([x] * self._duplicate_proto),
tf.stack([y] * self._duplicate_proto))
ds = ds.map(duplicate_fn)
ds = ds.unbatch()
if not cache:
ds = ds.repeat(num_epochs)
if shuffle:
ds = ds.shuffle(self._shuffle_buffer, seed=self._seed)
# Parse.
ds = ds.map(
parse_example,
num_parallel_calls=self._num_parser_threads,
deterministic=not shuffle)
ds = ds.filter(filter_fn_post_parse)
if cache:
# We cache the dataset after the parsing operation. This means that we
# cache the raw protos before any random operations happen. This can avoid
# IO issues when the dataset fits in RAM. Note that this is the optimal
# place to cache the data (caching before would have no effect as that
# would only be caching a list of files, caching after would be not
# possible due to the random operations that needs to happen after the
# `ds.repeat` operation, making it impossible to cache as the dataset
# would be unbounded).
ds = ds.cache()
ds = ds.repeat(num_epochs)
if shuffle:
ds = ds.shuffle(self._shuffle_buffer, seed=self._seed)
else:
ds = ds.prefetch(self._parser_buffer_size)
# Sample.
ds = ds.map(
sample_fn,
num_parallel_calls=self._num_process_threads,
deterministic=not shuffle)
ds = ds.filter(filter_fn_post_sample)
# Decode.
ds = ds.map(
decode_fn,
num_parallel_calls=self._num_process_threads,
deterministic=not shuffle)
ds = ds.filter(filter_fn_post_decode)
# Preprocess.
ds = ds.map(
preprocess_fn,
num_parallel_calls=self._num_process_threads,
deterministic=not shuffle)
ds = ds.filter(filter_fn_post_preprocess)
if experimental_kwargs.get('unbatch_after_preprocessing', False):
ds = ds.unbatch()
if experimental_kwargs.get('ignore_processing_errors', False):
ds = ds.apply(tf.data.experimental.ignore_errors())
if batch_size is not None:
if padded_batch:
ds = ds.padded_batch(
batch_size=batch_size,
padded_shapes=padded_batch_shapes,
drop_remainder=drop_remainder)
else:
if padded_batch_shapes is not None:
raise ValueError(
'`padded_batch` is `False`, `padded_batch_shapes` must be `None`,'
f'but is {padded_batch_shapes}.')
ds = ds.batch(batch_size, drop_remainder=drop_remainder)
# Postprocess.
ds = ds.prefetch(self._postprocess_buffer_size)
ds = ds.map(
postprocess_fn,
num_parallel_calls=self._num_postprocess_threads,
deterministic=not shuffle)
ds = ds.filter(filter_fn_post_postprocess)
ds = ds.prefetch(self._prefetch_buffer_size)
logging.info('Dataset created successfully')
return ds
| dmvr-master | dmvr/video_dataset.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple tokenizer interface with basic implementations."""
import abc
from typing import Optional, Sequence, Union
import clip.simple_tokenizer
import tensorflow as tf
import tensorflow_text
import sentencepiece as spm
class TextTokenizer(abc.ABC):
"""Base class for text tokenizers."""
def initialize(self):
"""Initializes tensorflow tables and models."""
return
@abc.abstractmethod
def string_tensor_to_indices(self,
string_tensor: Union[tf.Tensor, Sequence[str]],
prepend_bos: bool = False,
append_eos: bool = False,
max_num_tokens: Optional[int] = 32) -> tf.Tensor:
"""Tokenizes input text, mapping a tensor of strings to a tensor of ints.
Args:
string_tensor: Input string tensor of shape [num_texts].
prepend_bos: Whether to prepend the BOS (beginning of sentence) token to
the output tokens.
append_eos: Whether to append the EOS (end of sentence) token to the
output tokens.
max_num_tokens: Maximum number of tokens to return per caption. If
provided, the tokens will be padded / cut at the given size. If not, a
tensor of unknown size will be returned.
Returns:
A `tf.int32` tensor of shape [num_texts, `max_num_tokens`] if
`max_num_tokens` is provided or [num_texts, max_num_tokens_in_batch]
otherwise.
"""
@abc.abstractmethod
def indices_to_string(self, indices: Sequence[int]) -> str:
"""Detokenizes, mapping a python sequence of indices to a string."""
@property
@abc.abstractmethod
def vocab_size(self) -> int:
"""Returns the vocabulary size."""
@property
@abc.abstractmethod
def pad_token(self) -> int:
"""Returns index of the PAD token."""
@property
@abc.abstractmethod
def bos_token(self) -> int:
"""Returns index of the BOS token."""
@property
@abc.abstractmethod
def eos_token(self) -> int:
"""Returns index of the EOS token."""
@property
@abc.abstractmethod
def unk_token(self) -> int:
"""Returns index of the UNK token."""
class SentencePieceTokenizer(TextTokenizer):
"""SentencePiece tokenizer from a pre-trained SentencePiece model.
Pre-trained models are provided in multiple repositories around the web. See
https://github.com/google/sentencepiece for info on how to train new models on
specific corpus.
"""
def __init__(self, model_path: str):
"""Initializes the `SentencePieceTokenizer`.
Args:
model_path: Path to the '.model' file.
"""
self._model_path = model_path
self._sp_model = spm.SentencePieceProcessor()
self._sp_model.Load(model_path)
self._vocab_size = self._sp_model.GetPieceSize()
self._bos_token = self._sp_model.bos_id()
self._eos_token = self._sp_model.eos_id()
self._pad_token = self._sp_model.pad_id()
self._unk_token = self._sp_model.unk_id()
self._tf_sp_model = None
def initialize(self):
with tf.io.gfile.GFile(self._model_path, 'rb') as f:
self._tf_sp_model = tensorflow_text.SentencepieceTokenizer(
model=f.read(), out_type=tf.int32, add_bos=True, add_eos=True)
def string_tensor_to_indices(self,
string_tensor: Union[tf.Tensor, Sequence[str]],
prepend_bos: bool = False,
append_eos: bool = False,
max_num_tokens: Optional[int] = 32) -> tf.Tensor:
if self._tf_sp_model is None:
raise RuntimeError('Model was not initialized. Call `initialize` method.')
tokenized = self._tf_sp_model.tokenize(string_tensor)
tokenized = tokenized if prepend_bos else tokenized[..., 1:]
tokenized = tokenized if append_eos else tokenized[..., :-1]
# Pad to `max_num_tokens`.
shape = None if max_num_tokens is None else [None, max_num_tokens]
tokenized = tokenized.to_tensor(default_value=self._pad_token, shape=shape)
return tokenized
def indices_to_string(self, indices: Sequence[int]) -> str:
return self._sp_model.DecodeIds(indices)
def string_to_indices(self,
string: str,
prepend_bos: bool = False,
append_eos: bool = False,
max_num_tokens: Optional[int] = 32) -> Sequence[int]:
"""Tokenizes, mapping a python string to a sequence of indices."""
tokenized = self._sp_model.EncodeAsIds(string)
tokenized = [self._bos_token] * prepend_bos + tokenized
tokenized += [self._eos_token] * append_eos
if max_num_tokens:
tokenized = tokenized[:max_num_tokens]
num_tokens = len(tokenized)
tokenized = tokenized + [self._pad_token] * (max_num_tokens - num_tokens)
return tokenized
@property
def vocab_size(self):
return self._vocab_size
@property
def pad_token(self):
return self._pad_token
@property
def bos_token(self):
return self._bos_token
@property
def eos_token(self):
return self._eos_token
@property
def unk_token(self):
return self._unk_token
class WordTokenizer(TextTokenizer):
"""Vocabulary based word tokenizer."""
PAD = '<pad>'
BOS = '<bos>'
EOS = '<eos>'
UNK = '<unk>'
def __init__(self, vocabulary_path: str):
"""Initializes the `WordTokenizer`.
Args:
vocabulary_path: A path to a vocabulary file. The vocabulary is a simple
text file where each line is of the form: 'idx_word word' or simply
'word' (the line index will be used). The vocabulary should at least
contain the words: '<pad>', '<bos>', '<eos>' and '<unk>'.
"""
# Parse the vocabulary. The expected format is either one word per line (and
# the index for that word will be the line index) or an index and a word,
# split by space.
idx2word = {}
with tf.io.gfile.GFile(vocabulary_path) as f:
for line_idx, line in enumerate(f):
line = line.strip().split(' ')
if len(line) not in [1, 2]:
raise ValueError(f'Line {line_idx} of vocabulary file, with contents '
f'\'{line}\' is malformed')
idx, word = line if len(line) == 2 else (line_idx, line[0])
idx = int(idx)
if idx in idx2word:
raise ValueError(
f'Vocabulary contains two words with same index {idx}.')
if word != word.lower():
raise ValueError(f'Word {word} with index {idx} is not lower case.')
idx2word[idx] = word
# Validate.
if len(idx2word) != len(set(idx2word.values())):
raise ValueError('Words in vocabulary are not unique.')
basic_tokens = {self.PAD, self.BOS, self.EOS, self.UNK}
if basic_tokens & set(idx2word.values()) != basic_tokens:
raise ValueError(
f'Vocabulary does not contain all basic tokens {basic_tokens}.')
self._idx2word = idx2word
self._word2idx = {v: k for k, v in idx2word.items()}
self._vocab_size = len(idx2word)
self._pad_token = self._word2idx[self.PAD]
self._bos_token = self._word2idx[self.BOS]
self._eos_token = self._word2idx[self.EOS]
self._unk_token = self._word2idx[self.UNK]
self._tf_word2idx = None
self._tf_whitespace_tokenizer = None
def initialize(self):
ids_tensor = tf.constant([i for w, i in self._word2idx.items()],
dtype=tf.int32)
words_tensor = tf.constant([w for w, i in self._word2idx.items()],
dtype=tf.string)
self._tf_whitespace_tokenizer = tensorflow_text.WhitespaceTokenizer()
self._tf_word2idx = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(words_tensor, ids_tensor),
self._unk_token)
def string_tensor_to_indices(self,
string_tensor: Union[tf.Tensor, Sequence[str]],
prepend_bos: bool = False,
append_eos: bool = False,
max_num_tokens: Optional[int] = 32) -> tf.Tensor:
if self._tf_word2idx is None or self._tf_whitespace_tokenizer is None:
raise RuntimeError('Model was not initialized. Call `initialize` method.')
# Remove punctuation.
string_tensor = tf.strings.regex_replace(string_tensor, '[[:punct:]]', '')
# Lower case.
string_tensor = tf.strings.lower(string_tensor)
if prepend_bos:
string_tensor = self.BOS.encode('utf-8') + b' ' + string_tensor
if append_eos:
string_tensor += b' ' + self.EOS.encode('utf-8')
# Separate words by whitespace.
tokenized = self._tf_whitespace_tokenizer.tokenize(string_tensor)
# Map word to indices.
tokenized = self._tf_word2idx.lookup(tokenized)
# Pad to `max_num_tokens`.
shape = None if max_num_tokens is None else [None, max_num_tokens]
tokenized = tokenized.to_tensor(default_value=self._pad_token, shape=shape)
return tokenized
def indices_to_string(self, indices: Sequence[int]) -> str:
# Cut at `EOS` or `PAD`.
idx_list_cut = []
for token_id in indices:
if token_id in [self._pad_token, self._eos_token]:
break
idx_list_cut.append(token_id)
# Decode back to string.
words_list = [self._idx2word[idx] for idx in idx_list_cut]
return ' '.join(words_list)
def string_to_indices(self,
string: str,
prepend_bos: bool = False,
append_eos: bool = False,
max_num_tokens: Optional[int] = 32) -> Sequence[int]:
"""Tokenizes, mapping a python string to a sequence of indices."""
string = string.translate(
str.maketrans('', '', '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'))
string = string.lower()
words = string.split(' ')
tokenized = [self._word2idx.get(w, self._unk_token) for w in words]
tokenized = [self._bos_token] * prepend_bos + tokenized
tokenized += [self._eos_token] * append_eos
if max_num_tokens:
tokenized = tokenized[:max_num_tokens]
num_tokens = len(tokenized)
tokenized = tokenized + [self._pad_token] * (max_num_tokens - num_tokens)
return tokenized
@property
def vocab_size(self):
return self._vocab_size
@property
def pad_token(self):
return self._pad_token
@property
def bos_token(self):
return self._bos_token
@property
def eos_token(self):
return self._eos_token
@property
def unk_token(self):
return self._unk_token
class BertTokenizer(TextTokenizer):
"""BERT tokenizer.
Standard BERT vocabularies can be found in tf hub.
"""
PAD = '[PAD]'
CLS = '[CLS]'
SEP = '[SEP]'
BOS = CLS
EOS = SEP
UNK = '[UNK]'
def __init__(self, vocabulary_path: str):
"""Initializes the `BertTokenizer`.
Args:
vocabulary_path: A path to a vocabulary file. The vocabulary is a simple
text file where each line is of the form: 'token'. The vocabulary should
at least contain the words: '[PAD]', '[CLS]', '[SEP]' and '[UNK]'.
"""
# Parse the vocabulary.
idx2word = {}
self._vocabulary_path = vocabulary_path
with tf.io.gfile.GFile(vocabulary_path) as f:
for idx, line in enumerate(f):
word = line.strip()
idx2word[idx] = word
# Validate.
if len(idx2word) != len(set(idx2word.values())):
raise ValueError('Words in vocabulary are not unique.')
basic_tokens = {self.PAD, self.BOS, self.EOS, self.UNK}
if basic_tokens & set(idx2word.values()) != basic_tokens:
raise ValueError(
f'Vocabulary does not contain all basic tokens {basic_tokens}.')
self._idx2word = idx2word
self._word2idx = {v: k for k, v in idx2word.items()}
self._vocab_size = len(idx2word)
self._pad_token = self._word2idx[self.PAD]
self._bos_token = self._word2idx[self.BOS]
self._eos_token = self._word2idx[self.EOS]
self._unk_token = self._word2idx[self.UNK]
self._tf_tokenizer = None
def initialize(self):
self._tf_tokenizer = tensorflow_text.BertTokenizer(
self._vocabulary_path,
token_out_type=tf.int32,
unknown_token=self.UNK,
lower_case=True)
def string_tensor_to_indices(self,
string_tensor: Union[tf.Tensor, Sequence[str]],
prepend_bos: bool = False,
append_eos: bool = False,
max_num_tokens: Optional[int] = 32) -> tf.Tensor:
if self._tf_tokenizer is None:
raise RuntimeError('Model was not initialized. Call `initialize` method.')
batch_size = tf.shape(input=string_tensor)[0]
tokenized = self._tf_tokenizer.tokenize(string_tensor)
tokenized = tokenized.merge_dims(-2, -1)
if append_eos:
eos_tensor = tf.ragged.constant([self._eos_token])
eos_tensor = tf.tile(eos_tensor, [batch_size])
eos_tensor = tf.expand_dims(eos_tensor, axis=1)
tokenized = tf.concat([tokenized, eos_tensor], axis=1)
if prepend_bos:
bos_tensor = tf.ragged.constant([self._bos_token])
bos_tensor = tf.tile(bos_tensor, [batch_size])
bos_tensor = tf.expand_dims(bos_tensor, axis=1)
tokenized = tf.concat([bos_tensor, tokenized], axis=1)
# Pad to `max_num_tokens`.
shape = None if max_num_tokens is None else [None, max_num_tokens]
tokenized = tokenized.to_tensor(default_value=self._pad_token, shape=shape)
return tokenized
def indices_to_string(self, indices: Sequence[int]) -> str:
# Cut at `EOS` or `PAD`.
idx_list_cut = []
for token_id in indices:
if token_id in [self._pad_token, self._eos_token]:
break
idx_list_cut.append(token_id)
# Decode back to string.
word_iter = (self._idx2word[idx] for idx in idx_list_cut)
return ' '.join(word_iter).replace(' ##', '')
@property
def vocab_size(self):
return self._vocab_size
@property
def pad_token(self):
return self._pad_token
@property
def bos_token(self):
return self._bos_token
@property
def eos_token(self):
return self._eos_token
@property
def unk_token(self):
return self._unk_token
@property
def cls_token(self):
return self._bos_token
@property
def sep_token(self):
return self._eos_token
class ClipTokenizer(TextTokenizer):
"""CLIP tokenizer."""
BOS = '<|startoftext|>'
EOS = '<|endoftext|>'
UNK = EOS
def __init__(
self,
vocabulary_path: Optional[str] = None,
) -> None:
"""Initializes the `ClipTokenizer`.
Args:
vocabulary_path: A path to a CLIP-style vocabulary file.
"""
self._tokenizer = clip.simple_tokenizer.SimpleTokenizer(vocabulary_path)
self._vocab_size = len(self._tokenizer.encoder)
self._pad_token = 0
self._bos_token = self._tokenizer.encoder[self.BOS]
self._eos_token = self._tokenizer.encoder[self.EOS]
self._unk_token = self._tokenizer.encoder[self.UNK]
self._initialized = False
def initialize(self) -> None:
self._initialized = True
def _clip_tokenize(self, texts: Union[tf.Tensor,
Sequence[str]]) -> tf.RaggedTensor:
if isinstance(texts, tf.Tensor):
texts = [text.decode('utf-8') for text in texts._numpy().tolist()] # pylint: disable=protected-access
return tf.ragged.constant([self._tokenizer.encode(text) for text in texts],
dtype=tf.int32)
def string_tensor_to_indices(self,
string_tensor: Union[tf.Tensor, Sequence[str]],
prepend_bos: bool = False,
append_eos: bool = False,
max_num_tokens: Optional[int] = 77) -> tf.Tensor:
if not self._initialized: # To satisfy the tests.
raise RuntimeError('Model was not initialized. Call `initialize` method.')
batch_size = tf.shape(input=string_tensor)[0]
tokenized = tf.py_function(
func=self._clip_tokenize,
inp=[string_tensor],
Tout=tf.RaggedTensorSpec([None, None], dtype=tf.int32))
if append_eos:
eos_tensor = tf.ragged.constant([self._eos_token])
eos_tensor = tf.tile(eos_tensor, [batch_size])
eos_tensor = tf.expand_dims(eos_tensor, axis=1)
tokenized = tf.concat([tokenized, eos_tensor], axis=1)
if prepend_bos:
bos_tensor = tf.ragged.constant([self._bos_token])
bos_tensor = tf.tile(bos_tensor, [batch_size])
bos_tensor = tf.expand_dims(bos_tensor, axis=1)
tokenized = tf.concat([bos_tensor, tokenized], axis=1)
# Pad to `max_num_tokens`.
shape = None if max_num_tokens is None else [None, max_num_tokens]
return tokenized.to_tensor(default_value=self._pad_token, shape=shape)
def indices_to_string(self, indices: Sequence[int]) -> str:
text = self._tokenizer.decode(i for i in indices if i != self._pad_token)
start_pos = len(self.BOS) if text.startswith(self.BOS) else 0
end_pos = text.index(self.EOS) if self.EOS in text else None
return text[start_pos:end_pos].strip()
@property
def vocab_size(self) -> int:
return self._vocab_size
@property
def pad_token(self) -> int:
return self._pad_token
@property
def bos_token(self) -> int:
return self._bos_token
@property
def eos_token(self) -> int:
return self._eos_token
@property
def unk_token(self) -> int:
return self._unk_token
| dmvr-master | dmvr/tokenizers.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Install script for setuptools."""
import imp # pylint: disable=deprecated-module
import setuptools
# Additional requirements for testing.
# Discussion on `pytype==2021.8.11`: pytype latest version raises some issues
# tracked in https://github.com/google/pytype/issues/1359. This version is
# borrowed from https://github.com/deepmind/acme/blob/master/setup.py#L71
testing_require = [
'pytest-xdist',
'pytype', # to be compatible with dm-acme
]
setuptools.setup(
name='enn',
description=(
'Epistemic neural networks. '
'A library for probabilistic inference via neural networks.'
),
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
url='https://github.com/deepmind/enn',
author='DeepMind',
author_email='[email protected]',
license='Apache License, Version 2.0',
version=imp.load_source('_metadata', 'enn/_metadata.py').__version__,
keywords='probabilistic-inference python machine-learning',
packages=setuptools.find_packages(),
# Discussion on pinning versions: As of 2023-03-31, `dm-acme==0.4.0` is the
# latest version that supports Python 3.7, 3.8, and 3.9. However, this
# version works only with `tensorflow==2.8.0`, `tensorflow-datasets==4.6.0`,
# and `tensorflow_probability==0.15.0` as specified in
# https://github.com/deepmind/acme/blob/master/setup.py#L39.
# Moreover, our library does not require loading `tensorflow_probability`,
# it is just loaded to pin to the specific version required by dm-acme.
install_requires=[
'absl-py',
'chex',
'dill',
'dm-haiku',
'jax',
'jaxline',
'numpy',
'optax',
'pandas',
'rlax',
'plotnine',
'scikit-learn',
'tensorflow', # to be compatible with dm-acme
'tensorflow-datasets', # to be compatible with dm-acme
'tensorflow_probability', # to be compatible with dm-acme
'typing-extensions',
],
extras_require={
'testing': testing_require,
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| enn-master | setup.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Package metadata for enn.
This is kept in a separate module so that it can be imported from setup.py, at
a time when enn's dependencies may not have been installed yet.
"""
__version__ = '0.1.0'
| enn-master | enn/_metadata.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for enn.utils."""
from absl.testing import absltest
from absl.testing import parameterized
from enn import base
from enn import networks
from enn import utils
import jax
import jax.numpy as jnp
class UtilsTest(parameterized.TestCase):
@parameterized.parameters([
[networks.PrngIndexer(), 23],
[networks.ScaledGaussianIndexer(10), 32],
[networks.GaussianWithUnitIndexer(5), 40],
[networks.EnsembleIndexer(13), 50],
])
def test_batch_indexer(self, indexer: base.EpistemicIndexer,
batch_size: int):
batch_indexer = utils.make_batch_indexer(indexer, batch_size)
batch_index = batch_indexer(jax.random.PRNGKey(0))
# Check that the batch index is of the right leading dimension
assert batch_index.shape[0] == batch_size
# Check that they are not all identical
assert not jnp.isclose(
batch_index,
jnp.broadcast_to(batch_index[0], batch_index.shape),
).all()
if __name__ == '__main__':
absltest.main()
| enn-master | enn/utils_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for logging to the terminal.
Forked from third_party/py/acme/utils/loggers/terminal.py
"""
import logging
import time
import typing as tp
import numpy as np
import tree
import typing_extensions
LoggingData = tp.Mapping[str, tp.Any]
class Logger(typing_extensions.Protocol):
"""A logger has a `write` method."""
def write(self, data: LoggingData):
"""Writes `data` to destination (file, terminal, database, etc)."""
def _tensor_to_numpy(value: tp.Any):
if hasattr(value, 'numpy'):
return value.numpy() # tf.Tensor (TF2).
if hasattr(value, 'device_buffer'):
return np.asarray(value) # jnp.DeviceArray.
return value
def _to_numpy(values: tp.Any):
"""Converts tensors in a nested structure to numpy.
Converts tensors from TensorFlow to Numpy if needed without importing TF
dependency.
Args:
values: nested structure with numpy and / or TF tensors.
Returns:
Same nested structure as values, but with numpy tensors.
"""
return tree.map_structure(_tensor_to_numpy, values)
def _format_key(key: str) -> str:
"""Internal function for formatting keys."""
return key.replace('_', ' ').title()
def _format_value(value: tp.Any) -> str:
"""Internal function for formatting values."""
value = _to_numpy(value)
if isinstance(value, (float, np.number)):
return f'{value:0.3f}'
return f'{value}'
def serialize(values: LoggingData) -> str:
"""Converts `values` to a pretty-printed string.
This takes a dictionary `values` whose keys are strings and returns
a formatted string such that each [key, value] pair is separated by ' = ' and
each entry is separated by ' | '. The keys are sorted alphabetically to ensure
a consistent order, and snake case is split into words.
For example:
values = {'a': 1, 'b' = 2.33333333, 'c': 'hello', 'big_value': 10}
# Returns 'A = 1 | B = 2.333 | Big Value = 10 | C = hello'
values_string = serialize(values)
Args:
values: A dictionary with string keys.
Returns:
A formatted string.
"""
return ' | '.join(f'{_format_key(k)} = {_format_value(v)}'
for k, v in sorted(values.items()))
class TerminalLogger:
"""Logs to terminal."""
def __init__(
self,
label: str = '',
print_fn: tp.Callable[[str], None] = logging.info,
serialize_fn: tp.Callable[[LoggingData], str] = serialize,
time_delta: float = 0.0,
):
"""Initializes the logger.
Args:
label: label string to use when logging.
print_fn: function to call which acts like print.
serialize_fn: function to call which transforms values into a str.
time_delta: How often (in seconds) to write values. This can be used to
minimize terminal spam, but is 0 by default---ie everything is written.
"""
self._print_fn = print_fn
self._serialize_fn = serialize_fn
self._label = label and f'[{_format_key(label)}] '
self._time = time.time()
self._time_delta = time_delta
def write(self, data: LoggingData):
now = time.time()
if (now - self._time) > self._time_delta:
self._print_fn(f'{self._label}{self._serialize_fn(data)}')
self._time = now
def make_default_logger(label: str, time_delta: float = 0.0) -> TerminalLogger:
return TerminalLogger(label=label, time_delta=time_delta)
| enn-master | enn/loggers.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Epistemic neural networks for uncertainty representation."""
| enn-master | enn/__init__.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility functions."""
# TODO(author7): move this file to enn.datasets.
import dataclasses
from typing import Optional
from absl import flags
import chex
from enn import base
from enn.datasets import base as ds_base
import jax
import jax.numpy as jnp
import numpy as np
from sklearn import datasets
import tensorflow as tf
import tensorflow_datasets as tfds
FLAGS = flags.FLAGS
def make_batch_indexer(indexer: base.EpistemicIndexer,
batch_size: int) -> base.EpistemicIndexer:
"""Batches an EpistemicIndexer to produce batch_size index samples."""
fold_in = jax.vmap(jax.random.fold_in, in_axes=[None, 0])
batch_array = jnp.arange(batch_size)
def batch_indexer(key: chex.PRNGKey) -> base.Index:
batch_keys = fold_in(key, batch_array)
return jax.vmap(indexer)(batch_keys)
return batch_indexer
def _clean_batch_data(data: ds_base.ArrayBatch) -> ds_base.ArrayBatch:
"""Checks some of the common shape/index issues for dummy data.."""
# Make sure that the data has a separate batch dimension
if data.y.ndim == 1:
data = dataclasses.replace(data, y=data.y[:, None])
# Data index to identify each instance
if data.data_index is None:
data = dataclasses.replace(data, data_index=np.arange(len(data.y))[:, None])
# Weights to say how much each data.point is work
if data.weights is None:
data = dataclasses.replace(data, weights=np.ones(len(data.y))[:, None])
return data
def make_batch_iterator(data: ds_base.ArrayBatch,
batch_size: Optional[int] = None,
seed: int = 0) -> ds_base.ArrayBatchIterator:
"""Converts toy-like training data to batch_iterator for sgd training."""
data = _clean_batch_data(data)
n_data = len(data.y)
if not batch_size:
batch_size = n_data
ds = tf.data.Dataset.from_tensor_slices(data).cache()
ds = ds.shuffle(min(n_data, 50 * batch_size), seed=seed)
ds = ds.repeat().batch(batch_size)
return iter(tfds.as_numpy(ds))
def make_test_data(n_samples: int = 20) -> ds_base.ArrayBatchIterator:
"""Generate a simple dataset suitable for classification or regression."""
x, y = datasets.make_moons(n_samples, noise=0.1, random_state=0)
return make_batch_iterator(ds_base.ArrayBatch(x=x, y=y))
| enn-master | enn/utils.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base classes for Epistemic Neural Network design in JAX / Haiku."""
import dataclasses
import typing as tp
import chex
import haiku as hk
import typing_extensions
################################################################################
# ENN definition
Input = tp.TypeVar('Input') # Inputs to neural network
Output = tp.TypeVar('Output') # Outputs of neural network
Index = tp.Any # Epistemic index
class EpistemicIndexer(typing_extensions.Protocol):
"""Generates indices for the ENN from random keys."""
def __call__(self, key: chex.PRNGKey) -> Index:
"""Samples a single index for the epistemic network."""
class ApplyFn(typing_extensions.Protocol[Input, Output]):
"""Applies the ENN at given parameters, state, inputs, index."""
def __call__(self,
params: hk.Params,
state: hk.State,
inputs: Input,
index: Index) -> tp.Tuple[Output, hk.State]:
"""Applies the ENN at given parameters, state, inputs, index."""
class InitFn(typing_extensions.Protocol[Input]):
"""Initializes the ENN with state at given rng_key, inputs, index."""
def __call__(self,
rng_key: chex.PRNGKey,
inputs: Input,
index: Index) -> tp.Tuple[hk.Params, hk.State]:
"""Initializes the ENN with state at given rng_key, inputs, index."""
@dataclasses.dataclass
class EpistemicNetwork(tp.Generic[Input, Output]):
"""Convenient pairing of Haiku transformed function and index sampler."""
apply: ApplyFn[Input, Output]
init: InitFn[Input]
indexer: EpistemicIndexer
################################################################################
# Loss functions and training
Data = tp.TypeVar('Data') # General training data
LossMetrics = tp.Dict[str, chex.Array] # Metrics reported in training.
# Output of loss function includes (loss, (state, metrics))
LossOutput = tp.Tuple[chex.Array, tp.Tuple[hk.State, LossMetrics]]
class LossFn(typing_extensions.Protocol[Input, Output, Data]):
"""Calculates a loss based on one batch of data per random key."""
def __call__(self,
enn: EpistemicNetwork[Input, Output],
params: hk.Params,
state: hk.State,
batch: Data,
key: chex.PRNGKey) -> LossOutput:
"""Computes a loss based on one batch of data and a random key."""
| enn-master | enn/base.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Exposing the public methods of the metrics."""
# Base classes
from enn.metrics.base import AggregateMetricCalculator
from enn.metrics.base import average_sampled_log_likelihood
from enn.metrics.base import make_average_aggregator
from enn.metrics.base import MetricCalculator
from enn.metrics.base import MetricsState
from enn.metrics.base import PerExampleMetric
# Calibration
from enn.metrics.calibration import ExpectedCalibrationError
from enn.metrics.calibration import SingleBatchECE
# Joint
from enn.metrics.joint import calculate_joint_ll
from enn.metrics.joint import make_nll_joint_calculator
from enn.metrics.joint import make_nll_polyadic_calculator
# Marginal
from enn.metrics.marginal import categorical_log_likelihood
from enn.metrics.marginal import make_accuracy_calculator
from enn.metrics.marginal import make_nll_marginal_calculator
| enn-master | enn/metrics/__init__.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for enn.metrics."""
from absl.testing import absltest
from absl.testing import parameterized
from enn import metrics
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
class MetricsTest(parameterized.TestCase):
@parameterized.parameters([1, 3, 100])
def test_average_sampled_log_likelihood_all_neginf(self, num_sample: int):
"""Test that average of negative infinity log likelihood is neg infinity."""
log_likelihood = jnp.concatenate([jnp.array([-jnp.inf] * num_sample)])
avg_log_likelihood = metrics.average_sampled_log_likelihood(
log_likelihood)
self.assertTrue(jnp.isneginf(avg_log_likelihood))
@parameterized.parameters([3, 100])
def test_average_sampled_log_likelihood_single_neginf(self, num_sample: int):
"""Test that avg with one negative infinity log likelihood is correct."""
log_likelihood = jnp.concatenate([jnp.array([-jnp.inf]),
jnp.zeros(shape=(num_sample - 1,))])
avg_log_likelihood = metrics.average_sampled_log_likelihood(
log_likelihood)
expected_log_likelihood = jnp.log((num_sample -1) / num_sample)
self.assertAlmostEqual(
avg_log_likelihood, expected_log_likelihood,
msg=(f'Expected log likelihood to be {expected_log_likelihood} ',
f'but received {avg_log_likelihood}'),
delta=0.1/num_sample)
@parameterized.product(
ll_val=[-1000, -100, 10, 0],
num_sample=[1, 3, 100])
def test_average_sampled_log_likelihood_const_values(
self, ll_val: float, num_sample: int):
"""Test that average of equal log likelihood values is correct."""
log_likelihood = ll_val * jnp.ones(shape=(num_sample,))
avg_log_likelihood = metrics.average_sampled_log_likelihood(
log_likelihood)
self.assertAlmostEqual(
avg_log_likelihood, ll_val,
msg=(f'Expected log likelihood to be {ll_val} ',
f'but received {avg_log_likelihood}'),
delta=1e-5)
@parameterized.product(seed=[1, 2, 3, 4, 5])
def test_dyadic_matches(self, seed: int):
rng = hk.PRNGSequence(seed)
batch_size = 23
num_classes = 7
num_enn_samples = 13
# Form trial data
logits = jax.random.normal(
next(rng), [num_enn_samples, batch_size, num_classes])
labels = jax.random.randint(next(rng), [batch_size, 1], 0, num_classes)
# Make sure not huge NLL
new_calc = metrics.make_nll_polyadic_calculator(10, 2)
new_nll = new_calc(logits, labels)
assert np.isfinite(jnp.abs(new_nll))
@parameterized.product(
seed=[1000],
num_enn_samples=[1, 10,],
batch_size=[10],
num_classes=[2, 10,],
num_bins=[2, 10],
num_batches=[1, 5])
def test_ece_calculator(
self,
seed: int,
num_enn_samples: int,
batch_size: int,
num_classes: int,
num_bins: int,
num_batches: int,
):
"""Tests that CalibrationErrorCalculator is correct by comparing it with BatchCalibrationErrorCalculator."""
# We set this to `allow` (instead of the default `set`), because some
# internal broadcasting is being done in tfp_ece_calculator.
jax.config.update('jax_numpy_rank_promotion', 'allow')
# Generate a set of random logits and labels
rng = hk.PRNGSequence(seed)
logits_ls = []
labels_ls = []
for _ in range(num_batches):
logits = jax.random.normal(
next(rng), [num_enn_samples, batch_size, num_classes])
labels = jax.random.randint(
next(rng), shape=[batch_size, 1], minval=0, maxval=num_classes)
logits_ls.append(logits)
labels_ls.append(labels)
# Combining batches into one batch
stacked_logits = jnp.concatenate(logits_ls, axis=1)
stacked_labels = jnp.concatenate(labels_ls, axis=0)
# Compute ece using tfp ece calculator which can only work when all data is
# provided in one batch
tfp_ece_calculator = metrics.SingleBatchECE(
num_bins=num_bins)
tfp_ece = tfp_ece_calculator(logits=stacked_logits, labels=stacked_labels)
tfp_ece_value = float(tfp_ece)
# Compute ece using our ece calculator which can also work when data is
# is provided in multiple batches
our_ece_calculator = metrics.ExpectedCalibrationError(
num_bins=num_bins)
ece_state = None
for logits, labels in zip(logits_ls, labels_ls):
ece_state = our_ece_calculator(
logits=logits, labels=labels, state=ece_state)
if ece_state is not None:
our_ece_value = float(ece_state.value)
# Check that ece results by our calculator and tfp calculator are the same.
self.assertAlmostEqual(
our_ece_value, tfp_ece_value,
msg=f'our_ece_value={our_ece_value} not close enough to tfp_ece_value',
delta=5e-2,
)
if __name__ == '__main__':
absltest.main()
| enn-master | enn/metrics/metrics_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Metrics used to evaluate joint predictions."""
from typing import Tuple
import chex
from enn.metrics import base as metrics_base
from enn.metrics import marginal
import jax
import jax.numpy as jnp
def make_nll_polyadic_calculator(
tau: int = 10,
kappa: int = 2,
) -> metrics_base.MetricCalculator:
"""Returns a MetricCalculator that computes d_{KL}^{tau, kappa} metric."""
def joint_ll_repeat(logits: chex.Array,
labels: chex.Array,
key: chex.PRNGKey) -> float:
"""Calculates joint NLL tau inputs resampled from anchor points."""
# Shape checking
chex.assert_shape(logits, [kappa, None])
chex.assert_shape(labels, [kappa, 1])
# Compute log-likehood at the kappa anchor points
probs = jax.nn.softmax(logits)
assigned_probs = probs[jnp.arange(kappa), jnp.squeeze(labels)]
log_probs = jnp.log(assigned_probs)
# Sample with replacement from the anchor points and sum for joint ll
selected = jax.random.randint(key, shape=[tau], minval=0, maxval=kappa)
return jnp.sum(log_probs[selected])
def enn_nll(logits: chex.Array,
labels: chex.Array,
key: chex.Array) -> float:
"""Averages joint_ll_repeat over multiple ENN samples."""
# Shape checking
chex.assert_shape(logits, [None, kappa, None])
chex.assert_shape(labels, [kappa, 1])
# Averaging over ENN samples
batched_ll = jax.vmap(joint_ll_repeat, in_axes=[0, None, None])
lls = batched_ll(logits, labels, key)
return -1 * metrics_base.average_sampled_log_likelihood(lls) # pytype: disable=wrong-arg-types # numpy-scalars
def polyadic_nll(logits: chex.Array, labels: chex.Array) -> float:
"""Returns polyadic NLL based on repeated inputs.
Internally this function works by taking the batch of logits and then
"melting" it to add an extra dimension so that the batches we evaluate
likelihood are of size=kappa. This means that one batch_size=N*kappa becomes
N batches of size=kappa. For each of these batches of size kappa, we then
resample tau observations replacement from these two anchor points. The
function then returns the joint nll evaluated over this synthetic batch.
Args:
logits: [num_enn_samples, batch_size, num_classes]
labels: [batch_size, 1]
"""
# TODO(author2): Revisit metric/performance and sampling solution.
# Shape checking
chex.assert_rank(logits, 3)
chex.assert_shape(labels, [logits.shape[1], 1])
# We use the values of the sampled labels to specify a seed for the anchor
# point resampling. This is not necessary if the evaluation batches are
# sample i.i.d. but is a precaution against some other factor in sampling.
offset = jnp.arange(labels.shape[0])[:, None] * jnp.max(labels) * 10
seed = jnp.sum(labels * offset, dtype=jnp.int32)
# Creating synthetic batches of size=kappa then use vmap.
batched_logits, batched_labels = reshape_to_smaller_batches(
logits, labels, batch_size=kappa)
keys = jax.random.split(jax.random.PRNGKey(seed), batched_logits.shape[0])
nlls = jax.vmap(enn_nll, in_axes=0)(batched_logits, batched_labels, keys)
return jnp.mean(nlls)
return jax.jit(polyadic_nll)
def make_nll_joint_calculator(tau: int = 10) -> metrics_base.MetricCalculator:
"""Returns a MetricCalculator that computes d_{KL}^{tau} metric."""
def calculate_nll_joint(logits: chex.Array, labels: chex.Array) -> float:
"""Calculates joint nll."""
num_data = labels.shape[0]
assert num_data >= tau, f'num_data={num_data} should be at least tau!'
batched_logits, batched_labels = reshape_to_smaller_batches(
logits, labels, batch_size=tau)
num_batches = batched_labels.shape[0]
lls = jax.vmap(calculate_joint_ll)(
batched_logits, batched_labels)
chex.assert_shape(lls, (num_batches,))
return -1 * jnp.mean(lls)
return calculate_nll_joint
def calculate_joint_ll(logits: chex.Array, labels: chex.Array) -> float:
"""Computes joint log likelihood (ll) aggregated over enn samples.
Depending on data batch_size (can be inferred from logits and labels), this
function computes joint ll for tau=batch_size aggregated over enn samples. If
num_data is one, this function computes marginal ll.
Args:
logits: [num_enn_sample, num_data, num_classes]
labels: [num_data, 1]
Returns:
marginal log likelihood
"""
num_enn_samples, tau, num_classes = logits.shape
chex.assert_shape(labels, (tau, 1))
class_probs = jax.nn.softmax(logits)
chex.assert_shape(class_probs, (num_enn_samples, tau, num_classes))
batched_ll = jax.vmap(marginal.categorical_log_likelihood, in_axes=[0, None])
sampled_ll = batched_ll(class_probs, labels)
return metrics_base.average_sampled_log_likelihood(sampled_ll) # pytype: disable=wrong-arg-types # numpy-scalars
def reshape_to_smaller_batches(
logits: chex.Array,
labels: chex.Array,
batch_size: int,
) -> Tuple[chex.Array, chex.Array]:
"""Reshapes logits,labels to add leading batch_size dimension.
In case the size of logits and labels are such that they cannot be equally
divided into batches of size batch_size, extra data is discarded.
Args:
logits: has shape [num_enn_samples, num_data, num_classes]
labels: has shape [num_data, 1]
batch_size: desired output batch size.
Returns:
A tuple of batched_logits and batched_labels with shapes
batched_logits: (num_batches, num_enn_samples, batch_size, num_classes)
batched_labels: (num_batches, batch_size, 1)
"""
# Shape checking
assert logits.ndim == 3
num_enn_samples, num_data, num_classes = logits.shape
chex.assert_shape(labels, [num_data, 1])
assert num_data >= batch_size
##############################################################################
# 1. We split num_data to batches of size batch_size. To ensure that the split
# is possible, we might need to discard extra data.
num_batches = num_data // batch_size
num_extra_data = num_data % batch_size
num_data -= num_extra_data
# 1.1. Discard extra data if needed.
logits = logits[:, :num_data, :]
labels = labels[:num_data, :]
chex.assert_shape(logits, [num_enn_samples, num_data, num_classes])
chex.assert_shape(labels, [num_data, 1])
# 1.2. Split num_data to batches of size batch_size
batched_logits = logits.reshape(
[num_enn_samples, num_batches, batch_size, num_classes])
batched_labels = labels.reshape([num_batches, batch_size, 1])
##############################################################################
# 2. We want num_batches to be the leading axis. It is already the case for
# batched_labels, but we need to change axes for batched_logits.
batched_logits = batched_logits.swapaxes(0, 1)
chex.assert_shape(batched_logits,
[num_batches, num_enn_samples, batch_size, num_classes])
return batched_logits, batched_labels
| enn-master | enn/metrics/joint.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base for defining metrics."""
from typing import Optional, Dict, NamedTuple
import chex
import jax
import jax.numpy as jnp
import typing_extensions
class MetricCalculator(typing_extensions.Protocol):
"""Interface for evaluation of multiple posterior samples based on a metric."""
def __call__(self, logits: chex.Array, labels: chex.Array) -> float:
"""Calculates a metric based on logits and labels.
Args:
logits: An array of shape [A, B, C] where B is the batch size of data, C
is the number of outputs per data (for classification, this is
equal to number of classes), and A is the number of random samples for
each data.
labels: An array of shape [B, 1] where B is the batch size of data.
Returns:
A float number specifies the value of the metric.
"""
class PerExampleMetric(typing_extensions.Protocol):
"""Interface for metric per example."""
def __call__(self, logits: chex.Array, labels: chex.Array) -> chex.Array:
"""Calculates a metric based on logits and labels.
Args:
logits: An array of shape [A, B, C] where B is the batch size of data, C
is the number of outputs per data (for classification, this is
equal to number of classes), and A is the number of random samples for
each data.
labels: An array of shape [B, 1] where B is the batch size of data.
Returns:
A metric per example of shape [B,].
"""
class MetricsState(NamedTuple):
"""State for metrics aggregation, default value should work for init."""
value: float = 0. # Should keep track of final metric value post aggregation
count: int = 0 # The number of times the aggregator has been called
extra: Optional[Dict[str, chex.Array]] = None # Extra sufficient statistics.
class AggregateMetricCalculator(typing_extensions.Protocol):
def __call__(
self,
logits: chex.Array,
labels: chex.Array,
state: Optional[MetricsState] = None,
) -> MetricsState:
"""Aggregates metric calculated over logits and labels with state."""
def make_average_aggregator(
metric: MetricCalculator) -> AggregateMetricCalculator:
"""Keeps a running average of metric evaluated per batch."""
def agg_metric(
logits: chex.Array,
labels: chex.Array,
state: Optional[MetricsState] = None,
) -> MetricsState:
value = metric(logits, labels)
if state is None:
# Initialize state
state = MetricsState()
new_count = state.count + 1
new_value = (value + state.value * state.count) / new_count
return MetricsState(new_value, new_count)
return agg_metric
def average_sampled_log_likelihood(x: chex.Array) -> float:
"""Computes average log likelihood from samples.
This method takes several samples of log-likelihood, converts
them to likelihood (by exp), then takes the average, then
returns the logarithm over the average LogSumExp
trick is used for numerical stability.
Args:
x: chex.Array
Returns:
log-mean-exponential
"""
return jax.lax.cond(
jnp.isneginf(jnp.max(x)),
lambda x: -jnp.inf,
lambda x: jnp.log(jnp.mean(jnp.exp(x - jnp.max(x)))) + jnp.max(x),
operand=x,
)
| enn-master | enn/metrics/base.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Metrics used to evaluate marginal predictions."""
import chex
from enn.metrics import base as metrics_base
import jax
import jax.numpy as jnp
def make_nll_marginal_calculator() -> metrics_base.MetricCalculator:
"""Returns a MetricCalculator for marginal negative log likelihood (nll)."""
return lambda x, y: -1 * calculate_marginal_ll(x, y)
def make_accuracy_calculator() -> metrics_base.MetricCalculator:
"""Returns a MetricCalculator that calculate accuracy."""
return calculate_accuracy
def calculate_marginal_ll(logits: chex.Array, labels: chex.Array) -> float:
"""Computes marginal log likelihood (ll) aggregated over enn samples."""
unused_num_enn_samples, num_data, num_classes = logits.shape
chex.assert_shape(labels, (num_data, 1))
probs = jnp.mean(jax.nn.softmax(logits), axis=0)
chex.assert_shape(probs, [num_data, num_classes])
return categorical_log_likelihood(probs, labels) / num_data
def calculate_accuracy(logits: chex.Array, labels: chex.Array) -> float:
"""Computes classification accuracy (acc) aggregated over enn samples."""
chex.assert_rank(logits, 3)
unused_num_enn_samples, num_data, num_classes = logits.shape
chex.assert_shape(labels, [num_data, 1])
class_probs = jax.nn.softmax(logits)
mean_class_prob = jnp.mean(class_probs, axis=0)
chex.assert_shape(mean_class_prob, [num_data, num_classes])
predictions = jnp.argmax(mean_class_prob, axis=1)[:, None]
chex.assert_shape(predictions, [num_data, 1])
return jnp.mean(predictions == labels)
def categorical_log_likelihood(probs: chex.Array, labels: chex.Array) -> float:
"""Computes joint log likelihood based on probs and labels."""
num_data, unused_num_classes = probs.shape
assert len(labels) == num_data
assigned_probs = probs[jnp.arange(num_data), jnp.squeeze(labels)]
return jnp.sum(jnp.log(assigned_probs))
| enn-master | enn/metrics/marginal.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility functions for calculating calibration error."""
import dataclasses
from typing import Dict, Optional, Tuple
import chex
from enn.metrics import base as metrics_base
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
class ExpectedCalibrationError(metrics_base.AggregateMetricCalculator):
"""Computes expected calibration error (ECE) aggregated over enn samples.
Expected calibration error (Guo et al., 2017, Naeini et al., 2015) is a scalar
measure of calibration for probabilistic models. Calibration is defined as the
level to which the accuracy over a set of predicted decisions and true
outcomes associated with a given predicted probability level matches the
predicted probability. A perfectly calibrated model would be correct `p`% of
the time for all examples for which the predicted probability was `p`%, over
all values of `p`.
This metric can be computed as follows. First, convert the logits into
probablities and take average of the probabilities over enn samples. Second,
cut up the probability space interval [0, 1] into some number of bins. Then,
for each example, store the predicted class (based on a threshold of 0.5 in
the binary case and the max probability in the multiclass case), the predicted
probability corresponding to the predicted class, and the true label into the
corresponding bin based on the predicted probability. Then, for each bin,
compute the average predicted probability ("confidence"), the accuracy of the
predicted classes, and the absolute difference between the confidence and the
accuracy ("calibration error"). Expected calibration error can then be
computed as a weighted average calibration error over all bins, weighted based
on the number of examples per bin.
Perfect calibration under this setup is when, for all bins, the average
predicted probability matches the accuracy, and thus the expected calibration
error equals zero. In the limit as the number of bins goes to infinity, the
predicted probability would be equal to the accuracy for all possible
probabilities.
References:
1. Guo, C., Pleiss, G., Sun, Y. & Weinberger, K. Q. On Calibration of Modern
Neural Networks. in International Conference on Machine Learning (ICML)
cs.LG, (Cornell University Library, 2017).
2. Naeini, M. P., Cooper, G. F. & Hauskrecht, M. Obtaining Well Calibrated
Probabilities Using Bayesian Binning. Proc Conf AAAI Artif Intell 2015,
2901-2907 (2015).
"""
def __init__(self, num_bins: int):
self.num_bins = num_bins
def _get_init_stats(self,) -> metrics_base.MetricsState:
"""Returns initial sufficient statistics for ece."""
init_ece_stats = {
'correct_sums': jnp.zeros(self.num_bins),
'prob_sums': jnp.zeros(self.num_bins),
'counts': jnp.zeros(self.num_bins),
}
return metrics_base.MetricsState(
value=0,
count=0,
extra=init_ece_stats,
)
def __call__(
self,
logits: chex.Array,
labels: chex.Array,
state: Optional[metrics_base.MetricsState] = None,
) -> metrics_base.MetricsState:
"""Returns ece state."""
chex.assert_rank(logits, 3)
unused_num_enn_samples, num_data, num_classes = logits.shape
chex.assert_shape(labels, [num_data, 1])
class_probs = jax.nn.softmax(logits)
mean_class_prob = jnp.mean(class_probs, axis=0)
chex.assert_shape(mean_class_prob, [num_data, num_classes])
batch_stats = _compute_per_batch_ece_stat(
probs=mean_class_prob, labels=labels, num_bins=self.num_bins)
if state is None:
# Initialize state
state = self._get_init_stats()
# Update state
new_stats = jax.tree_util.tree_map(jnp.add, state.extra, batch_stats)
new_count = state.count + 1
new_value = _map_stats_to_ece(new_stats)
return metrics_base.MetricsState(
value=new_value,
count=new_count,
extra=new_stats,
)
@dataclasses.dataclass
class SingleBatchECE(metrics_base.MetricCalculator):
"""Computes expected calibration error (ECE) aggregated over enn samples.
Note: this calculator can be used only in the case where all data is provided
in ONE batch.
"""
num_bins: int
def __call__(self, logits: chex.Array, labels: chex.Array) -> float:
"""Returns ece."""
chex.assert_rank(logits, 3)
unused_num_enn_samples, num_data, num_classes = logits.shape
chex.assert_shape(labels, [num_data, 1])
class_probs = jax.nn.softmax(logits)
mean_class_prob = jnp.mean(class_probs, axis=0)
chex.assert_shape(mean_class_prob, [num_data, num_classes])
predictions = jnp.argmax(mean_class_prob, axis=1)[:, None]
chex.assert_shape(predictions, labels.shape)
# ece
mean_class_logits = jnp.log(mean_class_prob)
chex.assert_shape(mean_class_logits, (num_data, num_classes))
labels_true = jnp.squeeze(labels, axis=-1)
chex.assert_shape(labels_true, (num_data,))
labels_predicted = jnp.squeeze(predictions, axis=-1)
chex.assert_shape(labels_predicted, (num_data,))
return tfp.stats.expected_calibration_error(
num_bins=self.num_bins,
logits=mean_class_logits,
labels_true=labels_true,
labels_predicted=labels_predicted,
)
# JAX implementation of tf.histogram_fixed_width_bins
def _histogram_fixed_width_bins(values: chex.Array,
value_range: Tuple[float, float],
num_bins: int,) -> chex.Array:
"""Bins the given values for use in a histogram.
Args:
values: An array.
value_range: A tuple of the form (min_value, max_value). value <= min_value
will be mapped to the first bin and value >= max_value will be mapped to
the last bin.
num_bins: Number of histogram bins.
Returns:
An array holding the indices of the binned values whose shape matches
values.
"""
_, bin_edges = jnp.histogram(values, bins=num_bins, range=value_range)
return jnp.digitize(values, bins=bin_edges[1:])
# JAX implementation of tf.math.unsorted_segment_sum
def _unsorted_segment_sum(values: chex.Array,
segment_ids: chex.Array,
num_segments: int):
"""Computes the sum within segments of an array.
Args:
values: an array with the values to be summed.
segment_ids: an array with integer dtype that indicates the segments of
`values` (along its leading axis) to be summed. Values can be repeated and
need not be sorted.
num_segments: An int with nonnegative value indicating the number of
segments.
Returns:
An array representing the segment sums.
"""
return jax.ops.segment_sum(
values, segment_ids=segment_ids, num_segments=num_segments)
def _compute_per_batch_ece_stat(
probs: chex.Array,
labels: chex.Array,
num_bins: int,
) -> Dict[str, chex.Array]:
"""Computes sufficient statistics of Expected Calibration Error (ECE).
Args:
probs: An array of shape [num_data, num_classes].
labels: An array of shape [num_data, 1].
num_bins: Number of bins to maintain over the interval [0, 1].
Returns:
A dict of sufficient statistics.
"""
chex.assert_rank(probs, 2)
num_data, unused_num_classes = probs.shape
chex.assert_shape(labels, [num_data, 1])
# Compute predicted labels per example given class probabilities
pred_labels = jnp.argmax(probs, axis=-1)
# Compute maximum predicted probs per example given class probabilities
pred_probs = jnp.max(probs, axis=-1)
# Flatten labels to [num_data, ].
labels = jnp.squeeze(labels)
correct_preds = jnp.equal(pred_labels, labels)
correct_preds = jnp.asarray(correct_preds, dtype=jnp.float32)
bin_indices = _histogram_fixed_width_bins(
values=pred_probs, value_range=(0., 1.), num_bins=num_bins)
correct_sums = _unsorted_segment_sum(
values=correct_preds,
segment_ids=bin_indices,
num_segments=num_bins,
)
prob_sums = _unsorted_segment_sum(
values=pred_probs,
segment_ids=bin_indices,
num_segments=num_bins,
)
counts = _unsorted_segment_sum(
values=jnp.ones_like(bin_indices),
segment_ids=bin_indices,
num_segments=num_bins,
)
ece_state = {
'correct_sums': correct_sums,
'prob_sums': prob_sums,
'counts': counts,
}
return ece_state
def _map_stats_to_ece(ece_stats: Dict[str, chex.Array]) -> float:
"""Maps ece sufficient statistics to the ece value.
ECE = Sum over bins (|bin-acc - bin-conf| * bin-count / total-count)
Args:
ece_stats: A dict of sufficient statistics for calculating ece.
Returns:
ECE value.
"""
assert 'counts' in ece_stats
assert 'correct_sums' in ece_stats
assert 'prob_sums' in ece_stats
counts = ece_stats['counts']
accs = jnp.nan_to_num(ece_stats['correct_sums'] / counts)
confs = jnp.nan_to_num(ece_stats['prob_sums'] / counts)
total_count = jnp.sum(counts)
return jnp.sum(counts/ total_count * jnp.abs(accs - confs))
| enn-master | enn/metrics/calibration.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Prior losses are losses that regulate towards the prior."""
import dataclasses
from absl import logging
import chex
from enn import base
from enn import datasets
from enn import networks
from enn import utils
from enn.losses import base as losses_base
import haiku as hk
import jax
import jax.numpy as jnp
import typing_extensions
class FakeInputGenerator(typing_extensions.Protocol):
def __call__(self, batch: datasets.ArrayBatch,
key: chex.PRNGKey) -> chex.Array:
"""Generates a fake batch of input=x for use in prior regularization."""
@dataclasses.dataclass
class MatchingGaussianData(FakeInputGenerator):
"""Generates a fake batch of input=x for use in prior regularization."""
scale: float = 1.
def __call__(self, batch: datasets.ArrayBatch,
key: chex.PRNGKey) -> chex.Array:
"""Generates a fake batch of input=x for use in prior regularization."""
return jax.random.normal(key, batch.x.shape) * self.scale
def make_gaussian_dataset(batch_size: int,
input_dim: int,
seed: int = 0) -> datasets.ArrayBatchIterator:
"""Returns a batch iterator over random Gaussian data."""
sample_fn = jax.jit(lambda x: jax.random.normal(x, [batch_size, input_dim]))
def batch_iterator():
rng = hk.PRNGSequence(seed)
while True:
x = sample_fn(next(rng))
yield datasets.ArrayBatch(x=x, y=jnp.ones([x.shape[0], 1]))
return batch_iterator()
def variance_kl(var: chex.Array,
pred_log_var: chex.Array) -> chex.Array:
"""Compute the KL divergence between Gaussian variance with matched means."""
log_var = jnp.log(var)
pred_var = jnp.exp(pred_log_var)
return 0.5 * (pred_log_var - log_var + var / pred_var - 1)
# TODO(author3): Remove and use generate_batched_forward_at_data_with_state.
def generate_batched_forward_at_data(
num_index_sample: int, x: chex.Array,
enn: networks.EnnNoState, params: hk.Params,
key: chex.PRNGKey) -> networks.Output:
"""Generate enn output for batch of data with indices based on random key."""
batched_indexer = utils.make_batch_indexer(enn.indexer, num_index_sample)
batched_forward = jax.vmap(enn.apply, in_axes=[None, None, 0])
batched_out = batched_forward(params, x, batched_indexer(key))
return batched_out
def generate_batched_forward_at_data_with_state(
num_index_sample: int, x: chex.Array,
enn: networks.EnnArray, params: hk.Params,
key: chex.PRNGKey) -> networks.Output:
"""Generate enn output for batch of data with indices based on random key."""
batched_indexer = utils.make_batch_indexer(enn.indexer, num_index_sample)
batched_forward = jax.vmap(enn.apply, in_axes=[None, None, None, 0])
unused_state = {}
batched_out, unused_state = batched_forward(params, unused_state, x,
batched_indexer(key))
return batched_out
def l2_training_penalty(batched_out: networks.Output):
"""Penalize the L2 magnitude of the training network."""
if isinstance(batched_out, networks.OutputWithPrior):
return 0.5 * jnp.mean(jnp.square(batched_out.train))
else:
logging.warning('L2 weight penalty only works for OutputWithPrior.')
return 0.
def distill_mean_regression(
batched_out: networks.Output,
distill_out: networks.Output) -> chex.Array:
"""Train the mean of the regression to the distill network."""
observed_mean = jnp.mean(networks.parse_net_output(batched_out), axis=0)
distill_mean = jnp.squeeze(networks.parse_net_output(distill_out))
return jnp.mean(jnp.square(distill_mean - observed_mean))
def distill_mean_classification(
batched_out: networks.Output,
distill_out: networks.Output) -> chex.Array:
"""Train the mean of the classification to the distill network."""
batched_logits = networks.parse_net_output(batched_out)
batched_probs = jax.nn.softmax(batched_logits, axis=-1)
mean_probs = jnp.mean(batched_probs, axis=0)
distill_probs = jax.nn.softmax(
networks.parse_net_output(distill_out), axis=-1)
return jnp.mean(
jnp.sum(mean_probs * jnp.log(mean_probs / distill_probs), axis=1))
def distill_var_regression(batched_out: networks.Output,
distill_out: networks.Output) -> chex.Array:
"""Train the variance of the regression to the distill network."""
assert isinstance(distill_out, networks.OutputWithPrior)
observed_var = jnp.var(networks.parse_net_output(batched_out), axis=0)
return jnp.mean(variance_kl(observed_var, distill_out.extra['log_var']))
def distill_var_classification(
batched_out: networks.Output,
distill_out: networks.Output) -> chex.Array:
"""Train the variance of the classification to the distill network."""
assert isinstance(distill_out, networks.OutputWithPrior)
batched_logits = networks.parse_net_output(batched_out)
observed_var = jnp.var(jax.nn.softmax(batched_logits, axis=-1))
return jnp.mean(variance_kl(observed_var, distill_out.extra['log_var']))
@dataclasses.dataclass
class RegressionPriorLoss(losses_base.LossFnArray):
"""Regress fake data back to prior, and distill mean/var to mean_index."""
num_index_sample: int
input_generator: FakeInputGenerator = dataclasses.field(
default_factory=MatchingGaussianData
)
scale: float = 1.
distill_index: bool = False
def __call__(self, enn: networks.EnnArray,
params: hk.Params, state: hk.State, batch: datasets.ArrayBatch,
key: chex.PRNGKey) -> base.LossOutput:
index_key, data_key = jax.random.split(key)
fake_x = self.input_generator(batch, data_key)
# TODO(author2): Complete prior loss refactor --> MultilossExperiment
batched_out = generate_batched_forward_at_data_with_state(
self.num_index_sample,
fake_x,
enn,
params,
index_key,
)
# Regularize towards prior output
loss = self.scale * l2_training_penalty(batched_out)
# Distill aggregate stats to the "mean_index"
if hasattr(enn.indexer, 'mean_index') and self.distill_index:
distill_out = enn.apply(params, fake_x, enn.indexer.mean_index)
loss += distill_mean_regression(batched_out, distill_out)
loss += distill_var_regression(batched_out, distill_out)
return loss, (state, {}) # pytype: disable=bad-return-type # numpy-scalars
@dataclasses.dataclass
class ClassificationPriorLoss(losses_base.LossFnArray):
"""Penalize fake data back to prior, and distill mean/var to mean_index."""
num_index_sample: int
input_generator: FakeInputGenerator = dataclasses.field(
default_factory=MatchingGaussianData
)
scale: float = 1.
distill_index: bool = False
def __call__(
self,
enn: networks.EnnArray,
params: hk.Params,
state: hk.State,
batch: datasets.ArrayBatch,
key: chex.PRNGKey,
) -> base.LossOutput:
index_key, data_key = jax.random.split(key)
fake_x = self.input_generator(batch, data_key)
# TODO(author2): Complete prior loss refactor --> MultilossExperiment
batched_out = generate_batched_forward_at_data_with_state(
self.num_index_sample, fake_x, enn, params, index_key)
# Regularize towards prior output
loss = self.scale * l2_training_penalty(batched_out)
# Distill aggregate stats to the "mean_index"
if hasattr(enn.indexer, 'mean_index') and self.distill_index:
distill_out = enn.apply(params, fake_x, enn.indexer.mean_index)
loss += distill_mean_classification(batched_out, distill_out)
loss += distill_var_classification(batched_out, distill_out)
return loss, (state, {}) # pytype: disable=bad-return-type # numpy-scalars
| enn-master | enn/losses/prior_losses.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Collection of simple losses applied to one single index."""
import dataclasses
from typing import Callable, Optional
import chex
from enn import base
from enn import datasets
from enn import networks
from enn.losses import base as losses_base
import haiku as hk
import jax
import jax.numpy as jnp
@dataclasses.dataclass
class L2Loss(losses_base.SingleLossFnArray):
"""L2 regression applied to a single epistemic index."""
def __call__(self,
apply: networks.ApplyArray,
params: hk.Params,
state: hk.State,
batch: datasets.ArrayBatch,
index: base.Index,) -> base.LossOutput:
"""L2 regression applied to a single epistemic index."""
chex.assert_shape(batch.y, (None, 1))
chex.assert_shape(batch.data_index, (None, 1))
net_out, state = apply(params, state, batch.x, index)
net_out = networks.parse_net_output(net_out)
chex.assert_equal_shape([net_out, batch.y])
sq_loss = jnp.square(networks.parse_net_output(net_out) - batch.y)
if batch.weights is None:
batch_weights = jnp.ones_like(batch.data_index)
else:
batch_weights = batch.weights
chex.assert_equal_shape([batch_weights, sq_loss])
return jnp.mean(batch_weights * sq_loss), (state, {})
class XentLoss(losses_base.SingleLossFnArray):
"""Cross-entropy single index loss with network state as auxiliary."""
def __init__(self, num_classes: int):
assert num_classes > 1
super().__init__()
self.num_classes = num_classes
labeller = lambda x: jax.nn.one_hot(x, self.num_classes)
self._loss = xent_loss_with_custom_labels(labeller)
def __call__(
self,
apply: networks.ApplyArray,
params: hk.Params,
state: hk.State,
batch: datasets.ArrayBatch,
index: base.Index,
) -> base.LossOutput:
return self._loss(apply, params, state, batch, index)
def xent_loss_with_custom_labels(
labeller: Callable[[chex.Array], chex.Array]
) -> losses_base.SingleLossFnArray:
"""Factory method to create a loss function with custom labelling."""
def single_loss(
apply: networks.ApplyArray,
params: hk.Params,
state: hk.State,
batch: datasets.ArrayBatch,
index: base.Index,
) -> base.LossOutput:
"""Xent loss with custom labelling."""
chex.assert_shape(batch.y, (None, 1))
net_out, state = apply(params, state, batch.x, index)
logits = networks.parse_net_output(net_out)
labels = labeller(batch.y[:, 0])
softmax_xent = -jnp.sum(
labels * jax.nn.log_softmax(logits), axis=1, keepdims=True)
if batch.weights is None:
batch_weights = jnp.ones_like(batch.y)
else:
batch_weights = batch.weights
chex.assert_equal_shape([batch_weights, softmax_xent])
loss = jnp.mean(batch_weights * softmax_xent)
return loss, (state, {'loss': loss})
return single_loss
@dataclasses.dataclass
class AccuracyErrorLoss(losses_base.SingleLossFnArray):
"""Evaluates the accuracy error of a greedy logit predictor."""
num_classes: int
def __call__(self, apply: networks.ApplyArray,
params: hk.Params,
state: hk.State,
batch: datasets.ArrayBatch,
index: base.Index) -> base.LossOutput:
chex.assert_shape(batch.y, (None, 1))
net_out, state = apply(params, state, batch.x, index)
logits = networks.parse_net_output(net_out)
preds = jnp.argmax(logits, axis=1)
correct = (preds == batch.y[:, 0])
accuracy = jnp.mean(correct)
return 1 - accuracy, (state, {'accuracy': accuracy})
@dataclasses.dataclass
class ElboLoss(losses_base.SingleLossFnArray):
"""Standard VI loss (negative of evidence lower bound).
Given latent variable u with model density q(u), prior density p_0(u)
and likelihood function p(D|u) the evidence lower bound is defined as
ELBO(q) = E[log(p(D|u))] - KL(q(u)||p_0(u))
In other words, maximizing ELBO is equivalent to regularized log likelihood
maximization where regularization is encouraging the learned latent
distribution to be close to the latent prior as measured by KL.
"""
log_likelihood_fn: Callable[[networks.Output, datasets.ArrayBatch], float]
model_prior_kl_fn: Callable[
[networks.Output, hk.Params, base.Index], float]
temperature: Optional[float] = None
input_dim: Optional[int] = None
def __call__(
self,
apply: networks.ApplyArray,
params: hk.Params,
state: hk.State,
batch: datasets.ArrayBatch,
index: base.Index,
) -> base.LossOutput:
"""This function returns a one-sample MC estimate of the ELBO."""
out, state = apply(params, state, batch.x, index)
log_likelihood = self.log_likelihood_fn(out, batch)
model_prior_kl = self.model_prior_kl_fn(out, params, index)
chex.assert_equal_shape([log_likelihood, model_prior_kl])
if self.temperature and self.input_dim:
model_prior_kl *= jnp.sqrt(self.temperature) * self.input_dim
return model_prior_kl - log_likelihood, (state, {}) # pytype: disable=bad-return-type # numpy-scalars
@dataclasses.dataclass
class VaeLoss(losses_base.SingleLossFnArray):
"""VAE loss."""
log_likelihood_fn: Callable[[networks.OutputWithPrior, datasets.ArrayBatch],
float]
latent_kl_fn: Callable[[networks.OutputWithPrior], float]
def __call__(
self,
apply: networks.ApplyArray,
params: hk.Params,
state: hk.State,
batch: datasets.ArrayBatch,
index: base.Index,
) -> base.LossOutput:
net_out, state = apply(params, state, batch.x, index)
kl_term = self.latent_kl_fn(net_out)
log_likelihood = self.log_likelihood_fn(net_out, batch)
return kl_term - log_likelihood, (state, {}) # pytype: disable=bad-return-type # numpy-scalars
################################################################################
# The default single loss definitions above assume that the apply fn takes a
# state. Below we provide a wrapper to convert above single loss definitions to
# single loss functions which work with apply fn that doesn't take a state.
def wrap_single_loss_as_single_loss_no_state(
single_loss: losses_base.SingleLossFnArray,
constant_state: Optional[hk.State] = None,
) -> losses_base.SingleLossFnNoState:
"""Wraps a legacy enn single loss with no state as an enn single loss."""
if constant_state is None:
constant_state = {}
def new_loss(
apply: networks.ApplyNoState,
params: hk.Params,
batch: datasets.ArrayBatch,
index: base.Index,
) -> losses_base.LossOutputNoState:
apply_with_state = networks.wrap_apply_no_state_as_apply(apply)
loss, (unused_state, metrics) = single_loss(apply_with_state, params,
constant_state, batch, index)
return loss, metrics
return new_loss
| enn-master | enn/losses/single_index.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for ENN vi losses."""
from absl.testing import absltest
from absl.testing import parameterized
from enn.losses import vi_losses
import numpy as np
class LossesTest(parameterized.TestCase):
@parameterized.parameters([
[1., 0., 1.0],
[1., 0., 10.0],
[10., 1., 1.0],
[10., 1., 10.0],
[10., 10., 1.0],
])
def test_analytical_diagonal_linear_model_prior_kl_fn(
self, sigma: float, mu: float, sigma_0: float):
"""Tests the elbo function for the case of simple weights and biases."""
num_params = 10
kl_fn = vi_losses.get_analytical_diagonal_linear_model_prior_kl_fn(
num_samples=1, sigma_0=sigma_0)
w_scale = np.log(np.exp(sigma) - 1) # sigma = log(1 + exp(w))
params = {'layer': {
'w': w_scale * np.ones((num_params,)),
'b': mu * np.ones((num_params,))}}
kl = 0.5 * num_params * (
(sigma / sigma_0)**2
+ (mu / sigma_0)**2 - 1
- 2 * np.log(sigma / sigma_0))
kl_estimate = kl_fn(out=np.zeros((1, 2)),
params=params,
index=np.zeros((1, 2)))
kl = float(np.round(kl, 2))
kl_estimate = float(np.round(kl_estimate, 2))
self.assertBetween((kl_estimate - kl) / (kl + 1e-9), -1e-3, 1e-3,
f'prior KL estimate is {kl_estimate}, expected: {kl}')
if __name__ == '__main__':
absltest.main()
| enn-master | enn/losses/vi_losses_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Exposing the public methods of the losses."""
# Base
from enn.losses.base import average_single_index_loss
from enn.losses.base import average_single_index_loss_no_state
from enn.losses.base import LossFnArray
from enn.losses.base import LossFnNoState
from enn.losses.base import LossOutputNoState
from enn.losses.base import SingleLossFn
from enn.losses.base import SingleLossFnArray
from enn.losses.base import SingleLossFnNoState
# Categorical regression
from enn.losses.categorical_regression import Cat2HotRegression
from enn.losses.categorical_regression import transform_to_2hot
# Prior losses
from enn.losses.prior_losses import ClassificationPriorLoss
from enn.losses.prior_losses import generate_batched_forward_at_data
from enn.losses.prior_losses import generate_batched_forward_at_data_with_state
from enn.losses.prior_losses import make_gaussian_dataset
from enn.losses.prior_losses import MatchingGaussianData
from enn.losses.prior_losses import RegressionPriorLoss
# Single Index
from enn.losses.single_index import AccuracyErrorLoss
from enn.losses.single_index import ElboLoss
from enn.losses.single_index import L2Loss
from enn.losses.single_index import VaeLoss
from enn.losses.single_index import wrap_single_loss_as_single_loss_no_state
from enn.losses.single_index import xent_loss_with_custom_labels
from enn.losses.single_index import XentLoss
# Utils
from enn.losses.utils import add_data_noise
from enn.losses.utils import add_data_noise_no_state
from enn.losses.utils import add_l2_weight_decay
from enn.losses.utils import add_l2_weight_decay_no_state
from enn.losses.utils import combine_losses
from enn.losses.utils import combine_losses_as_metric
from enn.losses.utils import combine_losses_no_state
from enn.losses.utils import combine_losses_no_state_as_metric
from enn.losses.utils import combine_single_index_losses_as_metric
from enn.losses.utils import combine_single_index_losses_no_state_as_metric
from enn.losses.utils import CombineLossConfig
from enn.losses.utils import CombineLossConfigNoState
from enn.losses.utils import l2_weights_with_predicate
from enn.losses.utils import PredicateFn
from enn.losses.utils import wrap_loss_no_state_as_loss
from enn.losses.utils import wrap_single_loss_no_state_as_single_loss
# VAE losses
from enn.losses.vae_losses import binary_log_likelihood
from enn.losses.vae_losses import gaussian_log_likelihood
from enn.losses.vae_losses import get_log_likelihood_fn
from enn.losses.vae_losses import latent_kl_divergence
from enn.losses.vae_losses import latent_kl_fn
from enn.losses.vae_losses import LogLikelihoodFn
# VI losses
from enn.losses.vi_losses import get_analytical_diagonal_linear_model_prior_kl_fn
from enn.losses.vi_losses import get_analytical_hyperflow_model_prior_kl_fn
from enn.losses.vi_losses import get_analytical_linear_model_prior_kl_fn
from enn.losses.vi_losses import get_awgn_loglike_fn
from enn.losses.vi_losses import get_categorical_loglike_fn
from enn.losses.vi_losses import get_sample_based_model_prior_kl_fn
| enn-master | enn/losses/__init__.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for ENN vi losses."""
from absl.testing import absltest
from absl.testing import parameterized
from enn.losses import vae_losses
import numpy as np
class LossesTest(parameterized.TestCase):
@parameterized.product(input_size=[1, 10, 100], batch_size=[1, 10])
def test_binary_log_likelihood(self, input_size: int, batch_size: int):
"""Tests the binary log likelihood."""
x = np.zeros((batch_size, input_size))
output = np.zeros_like(x)
log_likelihood = vae_losses.binary_log_likelihood(x, output)
result = -1 * input_size * np.log(2) * np.ones((batch_size,))
np.testing.assert_almost_equal(
log_likelihood,
result,
decimal=3,
err_msg=f'log_likelihood is {log_likelihood}, expected: {result}')
@parameterized.product(input_size=[1, 10, 100], batch_size=[1, 10])
def test_gaussian_log_likelihood(self, input_size: int, batch_size: int):
"""Tests the binary log likelihood."""
x = np.zeros((batch_size, input_size))
mean = np.zeros_like(x)
log_var = np.zeros_like(x)
log_likelihood = vae_losses.gaussian_log_likelihood(x, mean, log_var)
result = -0.5 * input_size * np.log(2 * np.pi) * np.ones((batch_size,))
np.testing.assert_almost_equal(
log_likelihood,
result,
decimal=3,
err_msg=f'log_likelihood is {log_likelihood}, expected: {result}')
@parameterized.product(input_size=[1, 10, 100], batch_size=[1, 10])
def test_latent_kl(self, input_size: int, batch_size: int):
"""Tests the binary log likelihood."""
mean = np.zeros((batch_size, input_size))
log_var = np.zeros_like(mean)
log_likelihood = vae_losses.latent_kl_divergence(mean, log_var)
result = 0 * np.ones((batch_size,))
np.testing.assert_almost_equal(
log_likelihood,
result,
decimal=3,
err_msg=f'log_likelihood is {log_likelihood}, expected: {result}')
if __name__ == '__main__':
absltest.main()
| enn-master | enn/losses/vae_losses_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Helpful functions relating to losses."""
import dataclasses
from typing import Any, Callable, Dict, Generic, List, Optional, Sequence, Union
import chex
from enn import base
from enn import data_noise
from enn import datasets
from enn import networks
from enn.losses import base as losses_base
import haiku as hk
import jax
import jax.numpy as jnp
# Maps Haiku params (module_name, name, value) -> include or not
PredicateFn = Callable[[str, str, Any], bool]
# Specific type of loss function common in our losses
_LossFn = base.LossFn[base.Input, base.Output, base.Data]
_SingleLoss = losses_base.SingleLossFn[base.Input, base.Output, base.Data]
def l2_weights_with_predicate(
params: hk.Params,
predicate: Optional[PredicateFn] = None) -> jax.Array:
"""Sum of squares of parameter weights that passes predicate_fn."""
if predicate is not None:
params = hk.data_structures.filter(predicate, params)
return sum(jnp.sum(jnp.square(p)) for p in jax.tree_leaves(params))
def add_data_noise(
single_loss: _SingleLoss,
noise_fn: data_noise.DataNoiseBase[base.Data],
) -> _SingleLoss:
"""Applies a DataNoise function to each batch of data."""
def noisy_loss(
apply: base.ApplyFn[base.Input, base.Output],
params: hk.Params,
state: hk.State,
batch: base.Data,
index: base.Index,
) -> base.LossOutput:
noisy_batch = noise_fn(batch, index)
return single_loss(apply, params, state, noisy_batch, index)
return noisy_loss
def add_l2_weight_decay(
loss_fn: _LossFn,
scale: Union[float, Callable[[hk.Params], hk.Params]],
predicate: Optional[PredicateFn] = None
) -> _LossFn:
"""Adds scale * l2 weight decay to an existing loss function."""
try: # Scale is numeric.
scale = jnp.sqrt(scale)
scale_fn = lambda ps: jax.tree_util.tree_map(lambda p: scale * p, ps)
except TypeError:
scale_fn = scale # Assuming scale is a Callable.
def new_loss(
enn: base.EpistemicNetwork[base.Input, base.Output],
params: hk.Params, state: hk.State, batch: base.Data,
key: chex.PRNGKey) -> base.LossOutput:
loss, (state, metrics) = loss_fn(enn, params, state, batch, key)
decay = l2_weights_with_predicate(scale_fn(params), predicate)
total_loss = loss + decay
metrics['decay'] = decay
metrics['raw_loss'] = loss
return total_loss, (state, metrics)
return new_loss
def combine_single_index_losses_as_metric(
train_loss: _SingleLoss,
extra_losses: Dict[str, _SingleLoss],
) -> _SingleLoss:
"""Combines train_loss for training with extra_losses in metrics."""
def combined_loss(
apply: base.ApplyFn[base.Input, base.Output],
params: hk.Params, state: hk.State, batch: base.Data,
index: base.Index) -> base.LossOutput:
loss, (state, metrics) = train_loss(apply, params, state, batch, index)
for name, loss_fn in extra_losses.items():
extra_loss, (unused_state,
extra_metrics) = loss_fn(apply, params, state, batch, index)
metrics[f'{name}:loss'] = extra_loss
for key, value in extra_metrics.items():
metrics[f'{name}:{key}'] = value
return loss, (state, metrics)
return combined_loss
def combine_losses_as_metric(
train_loss: _LossFn, extra_losses: Dict[str, _LossFn]) -> _LossFn:
"""Combines train_loss for training with extra_losses in metrics."""
def combined_loss(enn: base.EpistemicNetwork[base.Input, base.Output],
params: hk.Params,
state: hk.State,
batch: base.Data,
key: chex.PRNGKey) -> base.LossOutput:
loss, (state, metrics) = train_loss(enn, params, state, batch, key)
for name, loss_fn in extra_losses.items():
extra_loss, (unused_state,
extra_metrics) = loss_fn(enn, params, state, batch, key)
metrics[f'{name}:loss'] = extra_loss
for key, value in extra_metrics.items():
metrics[f'{name}:{key}'] = value
return loss, (state, metrics)
return combined_loss
@dataclasses.dataclass
class CombineLossConfig(Generic[base.Input, base.Output, base.Data]):
loss_fn: _LossFn
name: str = 'unnamed' # Name for the loss function
weight: float = 1. # Weight to scale the loss by
# Module specialized to work only with Array inputs and Batch data.
_LossConfig = CombineLossConfig[chex.Array, networks.Output, base.Data]
def combine_losses(losses: Sequence[Union[_LossConfig, _LossFn]]) -> _LossFn:
"""Combines multiple losses into a single loss."""
clean_losses: List[CombineLossConfig] = []
for i, loss in enumerate(losses):
if not isinstance(loss, CombineLossConfig):
loss = CombineLossConfig(loss, name=f'loss_{i}')
clean_losses.append(loss)
def loss_fn(enn: base.EpistemicNetwork[base.Input, base.Output],
params: hk.Params,
state: hk.State,
batch: base.Data,
key: chex.PRNGKey) -> base.LossOutput:
combined_loss = 0.
combined_metrics = {}
for loss_config in clean_losses:
# Compute the loss types for use in conditional computation
# TODO(author3): This section is a bit of a hack, since we do not have a
# clear way to deal with network "state" when we combine multiple losses.
# For now, we just return the input state, but this is not correct when
# state is not empty.
loss, (unused_state,
metrics) = loss_config.loss_fn(enn, params, state, batch, key)
combined_metrics[f'{loss_config.name}:loss'] = loss
for name, value in metrics.items():
combined_metrics[f'{loss_config.name}:{name}'] = value
combined_loss += loss_config.weight * loss
return combined_loss, (state, combined_metrics)
return loss_fn
# The utility loss functions above assume that the enn has a state.
# Since an enn might not have a state, below we define utility loss functions
# for loss functions without state.
# TODO(author3): Remove these utility fns and use the one with state instead.
def wrap_loss_no_state_as_loss(
loss_fn: losses_base.LossFnNoState,
constant_state: Optional[hk.State] = None,
) -> losses_base.LossFnArray:
"""Wraps a legacy enn loss with no state as an enn loss."""
if constant_state is None:
constant_state = {}
def new_loss(
enn: networks.EnnArray,
params: hk.Params,
unused_state: hk.State,
batch: datasets.ArrayBatch,
key: chex.PRNGKey
) -> base.LossOutput:
enn = networks.wrap_enn_as_enn_no_state(enn)
loss, metrics = loss_fn(enn, params, batch, key)
return loss, (constant_state, metrics)
return new_loss
def wrap_single_loss_no_state_as_single_loss(
single_loss: losses_base.SingleLossFnNoState,
constant_state: Optional[hk.State] = None,
) -> losses_base.SingleLossFnArray:
"""Wraps a legacy enn single loss with no state as an enn single loss."""
if constant_state is None:
constant_state = {}
def new_loss(
apply: networks.ApplyArray,
params: hk.Params,
unused_state: hk.State,
batch: datasets.ArrayBatch,
index: base.Index,
) -> base.LossOutput:
def apply_no_state(params: hk.Params,
x: chex.Array,
z: base.Index) -> networks.Output:
output, unused_state = apply(params, constant_state, x, z)
return output
loss, metrics = single_loss(apply_no_state, params, batch, index)
return loss, (constant_state, metrics)
return new_loss
def add_data_noise_no_state(
single_loss: losses_base.SingleLossFnNoState,
noise_fn: data_noise.DataNoise,
) -> losses_base.SingleLossFnNoState:
"""Applies a DataNoise function to each batch of data."""
def noisy_loss(apply: networks.ApplyNoState,
params: hk.Params,
batch: datasets.ArrayBatch,
index: base.Index) -> losses_base.LossOutputNoState:
noisy_batch = noise_fn(batch, index)
return single_loss(apply, params, noisy_batch, index)
return noisy_loss
def add_l2_weight_decay_no_state(
loss_fn: losses_base.LossFnNoState,
scale: Union[float, Callable[[hk.Params], hk.Params]],
predicate: Optional[PredicateFn] = None
) -> losses_base.LossFnNoState:
"""Adds scale * l2 weight decay to an existing loss function."""
try: # Scale is numeric.
scale = jnp.sqrt(scale)
scale_fn = lambda ps: jax.tree_util.tree_map(lambda p: scale * p, ps)
except TypeError:
scale_fn = scale # Assuming scale is a Callable.
def new_loss(enn: networks.EnnNoState,
params: hk.Params,
batch: datasets.ArrayBatch,
key: chex.PRNGKey) -> losses_base.LossOutputNoState:
loss, metrics = loss_fn(enn, params, batch, key)
decay = l2_weights_with_predicate(scale_fn(params), predicate)
total_loss = loss + decay
metrics['decay'] = decay
metrics['raw_loss'] = loss
return total_loss, metrics
return new_loss
def combine_single_index_losses_no_state_as_metric(
train_loss: losses_base.SingleLossFnNoState,
extra_losses: Dict[str, losses_base.SingleLossFnNoState],
) -> losses_base.SingleLossFnNoState:
"""Combines train_loss for training with extra_losses in metrics."""
def combined_loss(apply: networks.ApplyNoState,
params: hk.Params, batch: datasets.ArrayBatch,
index: base.Index) -> losses_base.LossOutputNoState:
loss, metrics = train_loss(apply, params, batch, index)
for name, loss_fn in extra_losses.items():
extra_loss, extra_metrics = loss_fn(apply, params, batch, index)
metrics[f'{name}:loss'] = extra_loss
for key, value in extra_metrics.items():
metrics[f'{name}:{key}'] = value
return loss, metrics
return combined_loss
def combine_losses_no_state_as_metric(
train_loss: losses_base.LossFnNoState,
extra_losses: Dict[str, losses_base.LossFnNoState],
) -> losses_base.LossFnNoState:
"""Combines train_loss for training with extra_losses in metrics."""
def combined_loss(enn: networks.EnnNoState,
params: hk.Params,
batch: datasets.ArrayBatch,
key: chex.PRNGKey) -> losses_base.LossOutputNoState:
loss, metrics = train_loss(enn, params, batch, key)
for name, loss_fn in extra_losses.items():
extra_loss, extra_metrics = loss_fn(enn, params, batch, key)
metrics[f'{name}:loss'] = extra_loss
for key, value in extra_metrics.items():
metrics[f'{name}:{key}'] = value
return loss, metrics
return combined_loss
@dataclasses.dataclass
class CombineLossConfigNoState:
loss_fn: losses_base.LossFnNoState
name: str = 'unnamed' # Name for the loss function
weight: float = 1. # Weight to scale the loss by
def combine_losses_no_state(
losses: Sequence[Union[CombineLossConfigNoState, losses_base.LossFnNoState]]
) -> losses_base.LossFnNoState:
"""Combines multiple losses into a single loss."""
clean_losses: List[CombineLossConfigNoState] = []
for i, loss in enumerate(losses):
if not isinstance(loss, CombineLossConfigNoState):
loss = CombineLossConfigNoState(loss, name=f'loss_{i}')
clean_losses.append(loss)
def loss_fn(enn: networks.EnnNoState,
params: hk.Params,
batch: datasets.ArrayBatch,
key: chex.PRNGKey) -> losses_base.LossOutputNoState:
combined_loss = 0.
combined_metrics = {}
for loss_config in clean_losses:
# Compute the loss types for use in conditional computation
loss, metrics = loss_config.loss_fn(enn, params, batch, key)
combined_metrics[f'{loss_config.name}:loss'] = loss
for name, value in metrics.items():
combined_metrics[f'{loss_config.name}:{name}'] = value
combined_loss += loss_config.weight * loss
return combined_loss, combined_metrics
return loss_fn
| enn-master | enn/losses/utils.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementing categorical regression (MuZero-style) in JAX."""
import dataclasses
import chex
from enn import base
from enn import datasets
from enn import networks
from enn.losses import base as losses_base
import haiku as hk
import jax
import jax.numpy as jnp
import rlax
def transform_to_2hot(target: chex.Array,
support: chex.Array) -> chex.Array:
"""Converts a scalar target to a 2-hot encoding of the nearest support."""
target = jnp.clip(target, support.min(), support.max())
high_idx = jnp.sum(support < target)
num_bins = len(support)
low_value = support[high_idx - 1]
high_value = support[high_idx]
prob = (target - high_value) / (low_value - high_value)
lower_one_hot = prob * rlax.one_hot(high_idx - 1, num_bins)
upper_one_hot = (1 - prob) * rlax.one_hot(high_idx, num_bins)
return lower_one_hot + upper_one_hot
@dataclasses.dataclass
class Cat2HotRegression(losses_base.SingleLossFnArray):
"""Apply categorical loss to 2-hot regression target."""
def __call__(self, apply: networks.ApplyArray,
params: hk.Params,
state: hk.State,
batch: datasets.ArrayBatch,
index: base.Index) -> base.LossOutput:
chex.assert_shape(batch.y, (None, 1))
chex.assert_shape(batch.data_index, (None, 1))
# Forward network and check type
net_out, state = apply(params, state, batch.x, index)
assert isinstance(net_out, networks.CatOutputWithPrior)
# Form the target values in real space
target_val = batch.y - net_out.prior
# Convert values to 2-hot target probabilities
probs = jax.vmap(transform_to_2hot, in_axes=[0, None])(
jnp.squeeze(target_val), net_out.extra['atoms'])
probs = jnp.expand_dims(probs, 1)
xent_loss = -jnp.sum(probs * jax.nn.log_softmax(net_out.train), axis=-1)
if batch.weights is None:
batch_weights = jnp.ones_like(batch.data_index)
else:
batch_weights = batch.weights
chex.assert_equal_shape([batch_weights, xent_loss])
return jnp.mean(batch_weights * xent_loss), (state, {})
| enn-master | enn/losses/categorical_regression.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for ENN single index losses."""
from typing import Dict, Text
from absl.testing import absltest
from absl.testing import parameterized
from enn import base
from enn import datasets
from enn import networks
from enn.losses import base as losses_base
from enn.losses import single_index
import haiku as hk
import jax
import numpy as np
class DummySingleLossFn(losses_base.SingleLossFnArray):
"""A dummy loss fn that returns the normalized index as loss.
It also returns a constant dummy metrics. It is meant to be used with an
ensemble ENN. The index is assumed a uniform random integer in the interval
[0, num_ensemble). The loss is normalized such that its mean is 1.
"""
def __init__(self, num_ensemble: int, dummy_metrics: Dict[Text, int]):
self._num_ensemble = num_ensemble
self._dummy_metrics = dummy_metrics
def __call__(
self,
apply: networks.ApplyArray,
params: hk.Params,
state: hk.State,
batch: datasets.ArrayBatch,
index: base.Index,
) -> base.LossOutput:
"""Computes a loss based on one batch of data and one index."""
del apply, params, batch
return ((2 * index + 1) / self._num_ensemble, (state, self._dummy_metrics))
class AvgSingleIndexLossTest(absltest.TestCase):
def test_averaging(self):
"""Average of single loss fn should have same mean and smaller variance ."""
num_ensemble = 10
dummy_metrics = {'a': 0, 'b': 1}
# A dummy loss fn that returns the normalized index as loss and two constant
# metrics. Index is random but normalized such that its mean is 1.
single_loss_fn = DummySingleLossFn(num_ensemble, dummy_metrics)
num_index_samples = 100
loss_fn = losses_base.average_single_index_loss(
single_loss_fn, num_index_samples)
dummy_batch = datasets.ArrayBatch(x=np.ones([1, 1]), y=np.ones([1, 1]))
enn = networks.MLPEnsembleMatchedPrior(
output_sizes=[1],
num_ensemble=num_ensemble,
dummy_input=dummy_batch.x,
)
loss, (unused_new_state, metrics) = loss_fn(
enn=enn,
params=dict(),
state=dict(),
batch=dummy_batch,
key=jax.random.PRNGKey(0),
)
# Since the single loss has mean 1 the averaged loss also has mean 1 a
# variance proportional to 1/np.sqrt(num_index_samples).
self.assertAlmostEqual(
loss,
1.0,
delta=5 / np.sqrt(num_index_samples),
msg=f'Expected loss to be ~1.0 but it is {loss}')
self.assertDictEqual(
metrics, dummy_metrics,
f'expected metrics to be {dummy_metrics} but it is {metrics}')
class L2LossTest(absltest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
batch_size = 4
cls._batch = datasets.ArrayBatch(
x=np.expand_dims(np.arange(batch_size), 1),
y=np.zeros(shape=(batch_size, 1)),
data_index=np.expand_dims(np.arange(batch_size), 1),
)
cls._params = dict()
cls._state = dict()
cls._index = np.array([])
def test_null_bootstrapping(self):
"""Test computed loss is correct when there is no bootstrapping."""
apply = lambda p, s, x, i: (x[:, :1], s)
output, unused_state = apply(
self._params,
self._state,
self._batch.x,
self._index,
)
# y is zero, hence the loss is just the mean square of the output.
expected_loss = np.mean(np.square(output))
loss_fn = single_index.L2Loss()
loss, (unused_new_state, unused_metrics) = loss_fn(
apply=apply,
params=self._params,
state=self._state,
batch=self._batch,
index=self._index,
)
self.assertEqual(
loss, expected_loss,
(f'expected loss with null bootstrapping is {expected_loss}, '
f'but it is {loss}'))
class XentLossTest(parameterized.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._params = dict()
cls._state = dict()
cls._index = np.array([])
@parameterized.parameters([2, 3, 5])
def test_null_bootstrapping(self, num_classes: int):
"""Test computed loss is correct when there is no bootstrapping."""
loss_fn = single_index.XentLoss(num_classes)
batch_size = 4
batch = datasets.ArrayBatch(
x=np.expand_dims(np.arange(batch_size), 1),
y=np.random.random_integers(0, num_classes - 1, size=(batch_size, 1)),
data_index=np.expand_dims(np.arange(batch_size), 1),
)
# Test when apply always return a uniform distribution over labels
apply = lambda p, s, x, i: (np.ones(shape=(x.shape[0], num_classes)), s)
# Since the output is uniform the log loss is always log(1/num_classes).
expected_loss = -np.log(1.0 / num_classes)
loss, (unused_state, unused_metrics) = loss_fn(
apply=apply,
params=self._params,
state=self._state,
batch=batch,
index=self._index,
)
self.assertEqual(
loss, expected_loss,
(f'expected loss for uniform prediction is {expected_loss}, '
f'but it is {loss}'))
# Test when apply always predict the label to be 0.
logits = np.array([100] + [0] * (num_classes - 1))
apply = lambda p, s, x, i: (np.tile(logits, (x.shape[0], 1)), s)
# Compute the expected log loss.
expected_loss = (
jax.nn.logsumexp(logits) - np.mean(batch.y == 0) * logits[0])
loss, (unused_state, unused_metrics) = loss_fn(
apply=apply,
params=self._params,
state=self._state,
batch=batch,
index=self._index,
)
self.assertEqual(
loss, expected_loss,
(f'expected loss for predicting class 0 is {expected_loss}, '
f'but it is {loss}'))
@parameterized.parameters([2, 3, 5])
def test_zero_bootstrapping(self, num_classes: int):
"""Test computed loss is zero when bootstrap weights are zero."""
loss_fn = single_index.XentLoss(num_classes)
batch_size = 4
batch = datasets.ArrayBatch(
x=np.expand_dims(np.arange(batch_size), 1),
y=np.random.random_integers(0, num_classes - 1, size=(batch_size, 1)),
data_index=np.expand_dims(np.arange(batch_size), 1),
weights=np.zeros([batch_size, 1]),
)
# Test when apply always return a uniform distribution over labels
apply = lambda p, s, x, i: (np.ones(shape=(x.shape[0], num_classes)), s)
loss, (unused_state, unused_metrics) = loss_fn(
apply=apply,
params=self._params,
state=self._state,
batch=batch,
index=self._index,
)
self.assertEqual(
loss, 0.0, ('expected loss with zero bootstrapping weights to be zero, '
f'but it is {loss}'))
class ElboLossTest(absltest.TestCase):
def test_elbo_loss(self):
"""Compute the ELBO for some trivial loglikelihood and prior kl.
There is a dummy log_likelihood_fn that just returns the first argument
(out). and a dummy model_prior_kl_fn that returns 0. The elbo loss is equal
to model_prior_kl minus log_likelihood and hence should be -out.
"""
batch_size = 4
batch = datasets.ArrayBatch(
x=np.expand_dims(np.arange(batch_size), 1),
y=np.arange(batch_size),
)
params = dict()
state = dict()
apply = lambda p, s, x, i: (x[:, 0], s)
index = np.array([])
output, unused_state = apply(params, state, batch.x, index)
log_likelihood_fn = lambda out, batch: out
model_prior_kl_fn = lambda out, params, index: np.zeros_like(out)
elbo_loss = single_index.ElboLoss(
log_likelihood_fn=log_likelihood_fn,
model_prior_kl_fn=model_prior_kl_fn)
loss, (unused_state, unused_metrics) = elbo_loss(
apply=apply, params=params, state=state, batch=batch, index=index)
self.assertTrue((loss == -output).all(),
f'expected elbo loss to be {-output} but it is {loss}')
if __name__ == '__main__':
absltest.main()
| enn-master | enn/losses/single_index_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tools for computing VI loss."""
from typing import Callable
import chex
from enn import base
from enn import datasets
from enn import networks
import haiku as hk
import jax
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
import typing_extensions
tfd = tfp.distributions
def get_awgn_loglike_fn(
sigma_w: float) -> Callable[[networks.Output, datasets.ArrayBatch], float]:
"""Returns a function that computes the simple unnormalized log likelihood.
It assumes response variable is perturbed with additive iid Gaussian noise.
Args:
sigma_w: standard deviation of the additive Gaussian noise.
Returns:
A function that computes the log likelihood given data and output.
"""
def log_likelihood_fn(out: networks.Output, batch: datasets.ArrayBatch):
chex.assert_shape(batch.y, (None, 1))
err_sq = jnp.mean(jnp.square(networks.parse_net_output(out) - batch.y))
return -0.5 * err_sq / sigma_w**2
return log_likelihood_fn
def get_categorical_loglike_fn(
num_classes: int
) -> Callable[[networks.Output, datasets.ArrayBatch], float]:
"""Returns a function that computes the unnormalized log likelihood.
It assumes response variable has a categorical distribution.
Args:
num_classes: number of classes for the output.
Returns:
A function that computes the log likelihood given data and prediction.
"""
def log_likelihood_fn(out: networks.Output, batch: datasets.ArrayBatch):
chex.assert_shape(batch.y, (None, 1))
logits = networks.parse_net_output(out)
labels = jax.nn.one_hot(batch.y[:, 0], num_classes)
return jnp.mean(
jnp.sum(labels * jax.nn.log_softmax(logits), axis=1))
return log_likelihood_fn
def log_normal_prob(x: float, mu: float = 0, sigma: float = 1):
"""Compute log probability of x w.r.t a 1D Gaussian density."""
gauss = tfd.Normal(loc=mu, scale=sigma)
return gauss.log_prob(x)
def sum_log_scale_mixture_normal(
x: chex.Array,
sigma_1: float,
sigma_2: float,
mu_1: float = 0.,
mu_2: float = 0.,
pi: float = 1.,
) -> float:
"""Compute sum log probs of x w.r.t. a scale mixture of two 1D Gaussians.
Args:
x: an array for which we want to find probabilities.
sigma_1: Standard deviation of the first Gaussian denisty.
sigma_2: Standard deviation of the second Gaussian.
mu_1: Mean of the first Gaussian denisty.
mu_2: Mean of the second Gaussian denisty.
pi: Scale for mixture of two Gaussian densities. The two Gaussian
densities are mixed as
pi * Normal(mu_1, sigma_1) + (1 - pi) * Normal(mu_2, sigma_2)
Returns:
Sum of log probabilities.
"""
bimix_gauss = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(probs=[pi, 1.-pi]),
components_distribution=tfd.Normal(
loc=[mu_1, mu_2], # One for each component.
scale=[sigma_1, sigma_2]))
log_probs = bimix_gauss.log_prob(x)
return jnp.sum(log_probs)
def normal_log_prob(latent: chex.Array, sigma: float = 1, mu: float = 0):
"""Compute un-normalized log probability of a normal RV."""
latent, _ = jax.tree_flatten(latent)
latent = jax.tree_util.tree_map(lambda x: x.flatten(), latent)
latent = jnp.concatenate(latent)
latent_dim = len(latent)
latent_l2_sq = jnp.sum(jnp.square(latent - mu))
return -0.5 * (latent_dim * jnp.log(2 * jnp.pi * sigma**2)
+ latent_l2_sq / sigma**2)
class _KlLossFn(typing_extensions.Protocol):
"""Calculates a loss based on model output, params, and one index.."""
def __call__(
self,
out: networks.Output,
params: hk.Params,
index: base.Index,
) -> float:
"""Computes a loss based on model output, params, and one index."""
def get_sample_based_model_prior_kl_fn(
num_samples: float, sigma_1: float, sigma_2: float = 1., scale: float = 1.
) -> _KlLossFn:
"""Returns a function for computing the KL distance between model and prior.
Args:
num_samples: effective number of samples.
sigma_1: Standard deviation of the Gaussian denisty as the prior.
sigma_2: Standard deviation of the second Gaussian if scale mixture of two
Gaussian densities is used as the prior.
scale: Scale for mixture of two Gaussian densities. The two Gaussian
densities are mixed as
scale * Normal(0, sigma_1) + (1 - scale) * Normal(0, sigma_2)
"""
def model_prior_kl_fn(out: networks.Output, params: hk.Params,
index: base.Index) -> float:
"""Compute the KL distance between model and prior densities using samples."""
del index
latent = out.extra['hyper_index'] # pytype: disable=attribute-error # numpy-scalars
# Calculate prior
log_priors_sum = sum_log_scale_mixture_normal(latent, sigma_1,
sigma_2, pi=scale)
# Calculate variational posterior
predicate = lambda module_name, name, value: name == 'w'
# We have used 'w' in params as rho (used with softplus to calculate sigma)
# and 'b' in params as mu for the Gaussian density.
rhos, mus = hk.data_structures.partition(predicate, params)
mus, _ = jax.tree_flatten(mus)
mus = jnp.concatenate(mus, axis=0)
rhos, _ = jax.tree_flatten(rhos)
rhos = jnp.concatenate(rhos, axis=0)
chex.assert_equal_shape([rhos, mus])
# We use softplus to convert rho to sigma.
sigmas = jnp.log(1 + jnp.exp(rhos))
chex.assert_equal_shape([sigmas, mus, latent])
log_normal_prob_vectorized = jnp.vectorize(log_normal_prob)
log_var_posteriors = log_normal_prob_vectorized(latent, mus, sigmas)
log_var_posteriors_sum = jnp.sum(log_var_posteriors)
return (log_var_posteriors_sum - log_priors_sum) / num_samples
return model_prior_kl_fn
def get_analytical_diagonal_linear_model_prior_kl_fn(
num_samples: float, sigma_0: float
) -> _KlLossFn:
"""Returns a function for computing the KL distance between model and prior.
It assumes index to be standard Gaussian.
Args:
num_samples: effective number of samples.
sigma_0: Standard deviation of the Gaussian latent (params) prior.
Returns:
model_prior_kl_fn
"""
def model_prior_kl_fn(out: networks.Output, params: hk.Params,
index: base.Index) -> float:
"""Compute the KL distance between model and prior densities in a linear HM.
weights `w` and biases `b` are assumed included in `params`. The latent
variables (which are the parameters of the base network) are generated as u
= z @ log(1 + exp(w)) + b where z is the index variable. The index is
assumed to be a standard Gaussian.
This function also assumes a Gaussian prior distribution for the latent,
i.e., parameters of the base network, with variance sigma^2.
Args:
out: final output of the hypermodel, i.e., y = f_theta(x, z)
params: parameters of the hypermodel (Note that this is the parameters of
the hyper network since base network params are set by the hyper net.)
index: index z
Returns:
KL distance.
"""
del out, index # Here we compute the log prob from params directly.
predicate = lambda module_name, name, value: name == 'w'
weights, biases = hk.data_structures.partition(predicate, params)
biases, _ = jax.tree_flatten(biases)
biases = jnp.concatenate(biases, axis=0)
weights, _ = jax.tree_flatten(weights)
weights = jnp.concatenate(weights, axis=0)
scales = jnp.log(1 + jnp.exp(weights))
chex.assert_equal_shape([scales, biases])
return 0.5 / num_samples * (
jnp.sum(jnp.square(scales)) / (sigma_0 ** 2)
+ jnp.sum(jnp.square(biases)) / (sigma_0 ** 2)
- len(biases)
- 2 * jnp.sum(jnp.log(scales))
+ 2 * len(biases) * jnp.log(sigma_0)
)
return model_prior_kl_fn
def get_analytical_linear_model_prior_kl_fn(
num_samples: float, sigma_0: float
) -> _KlLossFn:
"""Returns a function for computing the KL distance between model and prior.
It assumes index to be Gaussian with standard deviation sigma_0.
Args:
num_samples: effective number of samples.
sigma_0: Standard deviation of the Gaussian latent (params) prior.
"""
def model_prior_kl_fn(out: networks.Output, params: hk.Params,
index: base.Index) -> float:
"""Compute the KL distance between model and prior densities in a linear HM.
weights `w` and biases `b` are assumed included in `params`. The latent
variables (which are the parameters of the base network) are generated as u
= z @ w + b where z is the index variable. The index is assumed Gaussian
*with variance equal to the prior variance* of the latent variables.
This function also assumes a Gaussian prior distribution for the latent,
i.e., parameters of the base network, and assumes the index to be Gaussian
*with variance equal to the prior variance* of the latent variables.
Args:
out: final output of the hypermodel, i.e., y = f_theta(x, z)
params: parameters of the hypermodel (Note that this is the parameters of
the hyper network since base network params are set by the hyper net.)
index: index z
Returns:
KL distance.
"""
del out, index # Here we compute the log prob from params directly.
predicate = lambda module_name, name, value: name == 'w'
weights, biases = hk.data_structures.partition(predicate, params)
biases, _ = jax.tree_flatten(biases)
biases = jnp.concatenate(biases, axis=0)
weights, _ = jax.tree_flatten(weights)
weights = jnp.concatenate(weights, axis=1)
chex.assert_equal_shape_suffix([weights, biases], 1)
weights_sq = weights @ weights.T
index_dim = weights_sq.shape[0]
# Make weights_sq PD for numerical stability
weights_sq += 1e-6 * jnp.eye(index_dim)
w_sq_eigvals = jnp.linalg.eigvalsh(weights_sq)
w_sq_inv = jnp.linalg.inv(weights_sq)
# Latent covariance is equal to \Sigma_W^2
sigma_u_log_det = jnp.sum(jnp.log(w_sq_eigvals))
sigma_u_trace = jnp.sum(w_sq_eigvals)
weights_biases = weights @ biases
chex.assert_equal(len(weights_biases), index_dim)
proj_biases_norm = weights_biases @ w_sq_inv @ weights_biases.T
return 0.5 / num_samples * (sigma_u_trace - index_dim - sigma_u_log_det
+ proj_biases_norm / sigma_0**2)
return model_prior_kl_fn
def get_analytical_hyperflow_model_prior_kl_fn(
num_samples: float, sigma_0: float
) -> _KlLossFn:
"""Returns a function for computing the KL distance between model and prior.
It assumes index to be Gaussian with standard deviation sigma_0.
Args:
num_samples: effective number of samples.
sigma_0: Standard deviation of the Gaussian latent (params) prior.
"""
def model_prior_kl_fn(out, params, index):
del params, index
return (jnp.squeeze(out.extra['log_prob'])
- normal_log_prob(out.extra['latent'], sigma_0)) / num_samples
return model_prior_kl_fn
| enn-master | enn/losses/vi_losses.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base for losses."""
from typing import Tuple
import chex
from enn import base
from enn import networks
from enn import utils
from enn.datasets import base as ds_base
import haiku as hk
import jax
import jax.numpy as jnp
import typing_extensions as te
class SingleLossFn(te.Protocol[base.Input, base.Output, base.Data,]):
"""Calculates a loss based on one batch of data per index.
You can use average_single_index_loss to make a base.LossFn out of the
SingleLossFn.
"""
def __call__(
self,
apply: base.ApplyFn[base.Input, base.Output],
params: hk.Params,
state: hk.State,
batch: base.Data,
index: base.Index,
) -> base.LossOutput:
"""Computes a loss based on one batch of data and one index."""
def average_single_index_loss(
single_loss: SingleLossFn[base.Input, base.Output, base.Data],
num_index_samples: int = 1,
) -> base.LossFn[base.Input, base.Output, base.Data]:
"""Average a single index loss over multiple index samples.
Note that the *network state* is also averaged over indices. This is not going
to be equivalent to num_index_samples updates sequentially. We may want to
think about alternative ways to do this, or set num_index_samples=1.
Args:
single_loss: loss function applied per epistemic index.
num_index_samples: number of index samples to average.
Returns:
LossFn that comprises the mean of both the loss and the metrics.
"""
def loss_fn(enn: base.EpistemicNetwork[base.Input, base.Output],
params: hk.Params,
state: hk.State,
batch: base.Data,
key: chex.PRNGKey) -> base.LossOutput:
# Apply the loss in parallel over num_index_samples different indices.
# This is the key logic to this loss function.
batched_indexer = utils.make_batch_indexer(enn.indexer, num_index_samples)
batched_loss = jax.vmap(single_loss, in_axes=[None, None, None, None, 0])
loss, (new_state, metrics) = batched_loss(
enn.apply, params, state, batch, batched_indexer(key))
# Take the mean over the synthetic index batch dimension
batch_mean = lambda x: jnp.mean(x, axis=0)
mean_loss = batch_mean(loss)
if new_state:
# TODO(author2): This section is a bit of a hack, since we do not have
# a clear way to deal with network "state" in the presence of epistemic
# index. We choose to average the state across epistemic indices and
# then perform basic error checking to make sure the shape is unchanged.
new_state = jax.tree_util.tree_map(batch_mean, new_state)
jax.tree_util.tree_map(lambda x, y: chex.assert_equal_shape([x, y]),
new_state, state)
mean_metrics = jax.tree_util.tree_map(batch_mean, metrics)
# TODO(author2): Adding a logging method for keeping track of state counter.
# This piece of code is only used for debugging/metrics.
if len(new_state) > 0: # pylint:disable=g-explicit-length-test
first_state_layer = new_state[list(new_state.keys())[0]]
mean_metrics['state_counter'] = jnp.mean(first_state_layer['counter'])
return mean_loss, (new_state, mean_metrics)
return loss_fn
# Loss modules specialized to work only with Array inputs and Batch data.
LossFnArray = base.LossFn[chex.Array, networks.Output, ds_base.ArrayBatch]
SingleLossFnArray = SingleLossFn[
chex.Array, networks.Output, ds_base.ArrayBatch
]
################################################################################
# The default loss definitions above assume that the enn has a state.
# Since an enn might not have a state, below we provide definitions for
# loss functions which work with networks.EnnNoState, specialized to work with
# Array inputs.
# Defining the type for the output of loss functions without state.
LossOutputNoState = Tuple[chex.Array, base.LossMetrics]
class LossFnNoState(te.Protocol):
"""Calculates a loss based on one batch of data per random key."""
def __call__(self,
enn: networks.EnnNoState,
params: hk.Params,
batch: ds_base.ArrayBatch,
key: chex.PRNGKey) -> LossOutputNoState:
"""Computes a loss based on one batch of data and a random key."""
class SingleLossFnNoState(te.Protocol):
"""Calculates a loss based on one batch of data per index.
You can use average_single_index_loss_no_state defined below to make a
LossFnNoState out of the SingleLossFnNoState.
"""
def __call__(self,
apply: networks.ApplyNoState,
params: hk.Params,
batch: ds_base.ArrayBatch,
index: base.Index) -> LossOutputNoState:
"""Computes a loss based on one batch of data and one index."""
def average_single_index_loss_no_state(
single_loss: SingleLossFnNoState,
num_index_samples: int = 1) -> LossFnNoState:
"""Average a single index loss over multiple index samples.
Args:
single_loss: loss function applied per epistemic index.
num_index_samples: number of index samples to average.
Returns:
LossFnNoState that comprises the mean of both the loss and the metrics.
"""
def loss_fn(enn: networks.EnnNoState,
params: hk.Params,
batch: ds_base.ArrayBatch,
key: chex.PRNGKey) -> LossOutputNoState:
batched_indexer = utils.make_batch_indexer(enn.indexer, num_index_samples)
batched_loss = jax.vmap(single_loss, in_axes=[None, None, None, 0])
loss, metrics = batched_loss(enn.apply, params, batch, batched_indexer(key))
batch_mean = lambda x: jnp.mean(x, axis=0)
return batch_mean(loss), jax.tree_util.tree_map(batch_mean, metrics)
return loss_fn
| enn-master | enn/losses/base.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tools for computing VAE loss.
Key derivation and algorithms taken from "Auto-Encoding Variational Bayes":
https://arxiv.org/abs/1312.6114 (Kingma & Welling, 2014).
"""
from typing import Callable
import chex
from enn import datasets
from enn import networks
import jax.numpy as jnp
from tensorflow_probability.substrates import jax as tfp
tfd = tfp.distributions
def binary_log_likelihood(x: chex.Array,
output: chex.Array) -> float:
"""Computes the binary log likelihood loss.
Args:
x: A batch of 1D binary inputs.
output: A batch of output logits (for class 1) of the network.
Returns:
Binary log likelihood loss - see Appendix C (Kingma & Welling, 2014)
"""
assert x.ndim == 2
chex.assert_equal_shape([x, output])
log_likelihood = jnp.sum(
x * output - jnp.logaddexp(0.0, output), axis=-1)
chex.assert_shape(log_likelihood, (x.shape[0],))
return jnp.mean(log_likelihood)
def gaussian_log_likelihood(x: chex.Array, mean: chex.Array,
log_var: chex.Array) -> float:
"""Computes the gaussian log likelihood loss.
Args:
x: A batch of 1D standardized inputs.
mean: A batch of mean of the output variable.
log_var: A batch of log of the variance of the output variable.
Returns:
Gaussian log likelihood loss - Appendix C of (Kingma & Welling, 2014).
"""
assert x.ndim == 2
chex.assert_equal_shape([x, mean, log_var])
def log_normal_prob(x: float, mu: float = 0, sigma: float = 1):
"""Compute log probability of x w.r.t a 1D Gaussian density."""
gauss = tfd.Normal(loc=mu, scale=sigma)
return gauss.log_prob(x)
log_normal_prob_vectorized = jnp.vectorize(log_normal_prob)
log_likelihoods = log_normal_prob_vectorized(x, mean,
jnp.exp(0.5 * log_var))
log_likelihood = jnp.sum(log_likelihoods, axis=-1)
chex.assert_shape(log_likelihood, (x.shape[0],))
return jnp.mean(log_likelihood)
def latent_kl_divergence(mean: chex.Array,
log_var: chex.Array) -> float:
"""Computes the KL divergence of latent distribution w.r.t. Normal(0, I).
Args:
mean: A batch of mean of the latent variable.
log_var: A batch of log of the variance of the latent variable.
Returns:
KL divergence - see Appendix B of (Kingma & Welling, 2014).
"""
assert mean.ndim == 2
chex.assert_equal_shape([mean, log_var])
kl = - 0.5 * jnp.sum(
1. + log_var - jnp.square(mean) - jnp.exp(log_var), axis=-1)
chex.assert_shape(kl, (mean.shape[0],))
return jnp.mean(kl)
def latent_kl_fn(net_out: networks.OutputWithPrior) -> float:
"""Thin wrapper around latent_kl_divergence with input validation."""
extra = net_out.extra
assert 'latent_mean' in extra
assert 'latent_log_var' in extra
return latent_kl_divergence(extra['latent_mean'], extra['latent_log_var'])
LogLikelihoodFn = Callable[[networks.OutputWithPrior, datasets.ArrayBatch],
float]
def get_log_likelihood_fn(bernoulli_decoder: bool) -> LogLikelihoodFn:
"""Returns a function for calculating KL divergence of latent distribution.
Args:
bernoulli_decoder: A boolean specifying whether the decoder is Bernoulli.
If it is False, the the decoder is considered to be Gaussian.
Returns:
log_likelihood_fn mapping OutputWithPrior, Batch -> float.
"""
def log_likelihood_fn(net_out: networks.OutputWithPrior,
batch: datasets.ArrayBatch) -> float:
extra = net_out.extra
assert 'out_mean' in extra
assert 'out_log_var' in extra
if bernoulli_decoder:
return binary_log_likelihood(batch.x, extra['out_mean'])
else:
return gaussian_log_likelihood(
batch.x, extra['out_mean'], extra['out_log_var'])
return log_likelihood_fn
| enn-master | enn/losses/vae_losses.py |
# pylint: disable=g-bad-file-header
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Exposing active learning."""
# Base classes
from enn.active_learning.base import ActiveLearner
from enn.active_learning.base import PerExamplePriority
from enn.active_learning.base import PriorityFn
from enn.active_learning.base import PriorityFnCtor
from enn.active_learning.base import PriorityOutput
# Priorities
from enn.active_learning.priorities import get_implemented_priority_fn_ctors
from enn.active_learning.priorities import get_per_example_priority
from enn.active_learning.priorities import get_priority_fn_ctor
from enn.active_learning.priorities import make_priority_fn_ctor
from enn.active_learning.priorities import make_scaled_mean_per_example
from enn.active_learning.priorities import make_scaled_std_per_example
from enn.active_learning.priorities import make_ucb_per_example
# Prioritized
from enn.active_learning.prioritized import PrioritizedBatcher
| enn-master | enn/active_learning/__init__.py |
# pylint: disable=g-bad-file-header
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""An active learner that uses priority functions to select data."""
import typing as tp
import chex
from enn import datasets
from enn.active_learning import base
from enn.active_learning import priorities
from enn.networks import forwarders
import haiku as hk
import jax
import jax.numpy as jnp
class PrioritizedBatcher(base.ActiveLearner):
"""Prioritizes bathces based on a priority fn."""
def __init__(
self,
enn_batch_fwd: forwarders.EnnBatchFwd[chex.Array],
acquisition_size: int = 64,
priority_fn_ctor: tp.Optional[base.PriorityFnCtor] = None,
):
"""Initializes the batcher."""
self._acquisition_size = acquisition_size
if priority_fn_ctor is None:
priority_fn_ctor = priorities.get_priority_fn_ctor('uniform')
self._priority_fn = priority_fn_ctor(enn_batch_fwd)
def sample_batch(
self,
params: hk.Params,
state: hk.State,
batch: datasets.ArrayBatch,
key: chex.PRNGKey,
) -> datasets.ArrayBatch:
"""Acquires data. This is the function per device (can get pmaped)."""
pool_size = len(batch.y)
candidate_scores, unused_metrics = self._priority_fn(
params, state, batch, key)
# Cannot acquire data more than batch size
acquisition_size = min(self._acquisition_size, pool_size)
selected_idxs = jnp.argsort(candidate_scores)[-acquisition_size:]
acquired_data = get_at_index(batch, selected_idxs)
return acquired_data
@property
def acquisition_size(self):
"""Return the acquisition size for the active learner."""
return self._acquisition_size
@acquisition_size.setter
def acquisition_size(self, size: int) -> None:
"""Overwrites the acquisition size. Useful when we pmap sample_batch."""
self._acquisition_size = size
_T = tp.TypeVar('_T')
def get_at_index(t: _T, idx: chex.Array) -> _T:
"""Gets values at the indices specified by idx array."""
return jax.tree_map(lambda x: x[idx], t)
| enn-master | enn/active_learning/prioritized.py |
# pylint: disable=g-bad-file-header
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base class for active learning."""
import abc
import typing as tp
import chex
from enn.datasets import base as ds_base
from enn.networks import forwarders
import haiku as hk
import typing_extensions
class ActiveLearner(abc.ABC):
"""Samples a batch from a pool of data for learning.
An active learner selects an "acquisition batch" with acquisition_size
elements from a "candidate batch" that is passed to sample_batch. This can be
used to prioritize data for learning.
"""
@abc.abstractmethod
def sample_batch(
self,
params: hk.Params,
state: hk.State,
batch: ds_base.ArrayBatch,
key: chex.PRNGKey,
) -> ds_base.ArrayBatch:
"""Samples a batch from a pool of data for learning."""
@property
@abc.abstractmethod
def acquisition_size(self) -> int:
"""Return the acquisition size for the active learner."""
@acquisition_size.setter
@abc.abstractmethod
def acquisition_size(self, size: int) -> None:
"""Overwrites the acquisition size. Useful when we pmap sample_batch."""
PriorityOutput = tp.Tuple[chex.Array, tp.Dict[str, chex.Array]]
class PriorityFn(typing_extensions.Protocol):
def __call__(
self,
params: hk.Params,
state: hk.State,
batch: ds_base.ArrayBatch,
key: chex.PRNGKey,
) -> PriorityOutput:
"""Assigns a priority score to a batch."""
class PriorityFnCtor(typing_extensions.Protocol):
def __call__(
self,
enn_batch_fwd: forwarders.EnnBatchFwd[chex.Array],
) -> PriorityFn:
"""Constructs a priority function base on an enn_batch_fwd."""
class PerExamplePriority(typing_extensions.Protocol):
"""Interface for priority per example."""
def __call__(
self,
logits: chex.Array,
labels: chex.Array,
key: chex.Array,
) -> chex.Array:
"""Calculates a priority score based on logits, labels, and a random key.
Args:
logits: An array of shape [A, B, C] where B is the batch size of data, C
is the number of outputs per data (for classification, this is equal to
number of classes), and A is the number of random samples for each data.
labels: An array of shape [B, 1] where B is the batch size of data.
key: A random key.
Returns:
A priority score per example of shape [B,].
"""
| enn-master | enn/active_learning/base.py |
# pylint: disable=g-bad-file-header
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Functions to prioritize batches of data based on ENN forward pass."""
import typing as tp
import chex
from enn import datasets
from enn.active_learning import base
from enn.networks import forwarders
import haiku as hk
import jax
import jax.numpy as jnp
def make_priority_fn_ctor(
per_example_priority: base.PerExamplePriority,) -> base.PriorityFnCtor:
"""Makes a priority function constructor from a per example priority."""
def make_priority_fn(
enn_batch_fwd: forwarders.EnnBatchFwd[datasets.ArrayBatch],
) -> base.PriorityFn:
"""Makes a priority function."""
def priority_fn(
params: hk.Params,
state: hk.State,
batch: datasets.ArrayBatch,
key: chex.PRNGKey,
) -> base.PriorityOutput:
logits = enn_batch_fwd(params, state, batch.x) # pytype: disable=wrong-arg-types # numpy-scalars
# Make sure labels have shape [num_data, 1] as expected by priority.
labels = batch.y
if labels.ndim == 1:
labels = jnp.expand_dims(labels, axis=1)
values = per_example_priority(logits, labels, key)
return values, {}
return priority_fn
return make_priority_fn # pytype: disable=bad-return-type # numpy-scalars
def uniform_per_example(
logits: chex.Array,
labels: chex.Array,
key: chex.PRNGKey,
) -> chex.Array:
"""Returns uniformly random scores per example."""
del logits
labels = jnp.squeeze(labels)
return jax.random.uniform(key, shape=labels.shape)
def variance_per_example(
logits: chex.Array,
labels: chex.Array,
key: chex.PRNGKey,
) -> chex.Array:
"""Calculates variance per example."""
del labels, key
unused_enn_samples, data_size, unused_num_classes = logits.shape
probs = jax.nn.softmax(logits)
variances = jnp.sum(jnp.var(probs, axis=0), axis=-1)
chex.assert_shape(variances, (data_size,))
return variances
def nll_per_example(
logits: chex.Array,
labels: chex.Array,
key: chex.PRNGKey,
) -> chex.Array:
"""Calculates negative log-likelihood (nll) per example."""
del key
unused_enn_samples, data_size, unused_num_classes = logits.shape
sample_probs = jax.nn.softmax(logits)
probs = jnp.mean(sample_probs, axis=0)
# Penalize with log loss
labels = labels.astype(jnp.int32)
labels = jnp.squeeze(labels)
true_probs = probs[jnp.arange(data_size), labels]
losses = -jnp.log(true_probs)
chex.assert_shape(losses, (data_size,))
return losses
def joint_nll_per_example(
logits: chex.Array,
labels: chex.Array,
key: chex.PRNGKey,
) -> chex.Array:
"""Calculates joint negative log-likelihood (nll) per example."""
del key
num_enn_samples, data_size, unused_num_classes = logits.shape
sample_probs = jax.nn.softmax(logits)
# Penalize with log loss
labels = labels.astype(jnp.int32)
labels = jnp.squeeze(labels)
true_probs = sample_probs[:, jnp.arange(data_size), labels]
tau = 10
repeated_lls = tau * jnp.log(true_probs)
chex.assert_shape(repeated_lls, (num_enn_samples, data_size))
# Take average of joint lls over num_enn_samples
joint_lls = jnp.mean(repeated_lls, axis=0)
chex.assert_shape(joint_lls, (data_size,))
return -1 * joint_lls
def entropy_per_example(
logits: chex.Array,
labels: chex.Array,
key: chex.PRNGKey,
) -> chex.Array:
"""Calculates entropy per example."""
del labels, key
unused_enn_samples, data_size, num_classes = logits.shape
sample_probs = jax.nn.softmax(logits)
probs = jnp.mean(sample_probs, axis=0)
chex.assert_shape(probs, (data_size, num_classes))
entropies = -1 * jnp.sum(probs * jnp.log(probs), axis=1)
chex.assert_shape(entropies, (data_size,))
return entropies
def margin_per_example(
logits: chex.Array,
labels: chex.Array,
key: chex.PRNGKey,
) -> chex.Array:
"""Calculates margin between top and second probabilities per example."""
# See e.g. use in PLEX paper: https://arxiv.org/abs/2207.07411
del labels, key
unused_enn_samples, data_size, num_classes = logits.shape
sample_probs = jax.nn.softmax(logits)
probs = jnp.mean(sample_probs, axis=0)
chex.assert_shape(probs, (data_size, num_classes))
sorted_probs = jnp.sort(probs)
margins = sorted_probs[:, -1] - sorted_probs[:, -2]
chex.assert_shape(margins, (data_size,))
# Return the *negative* margin
return -margins
def bald_per_example(
logits: chex.Array,
labels: chex.Array,
key: chex.PRNGKey,
) -> chex.Array:
"""Calculates BALD mutual information per example."""
del labels, key
num_enn_samples, data_size, num_classes = logits.shape
sample_probs = jax.nn.softmax(logits)
# Function to compute entropy
compute_entropy = lambda p: -1 * jnp.sum(p * jnp.log(p), axis=1)
# Compute entropy for average probabilities
mean_probs = jnp.mean(sample_probs, axis=0)
chex.assert_shape(mean_probs, (data_size, num_classes))
mean_entropy = compute_entropy(mean_probs)
chex.assert_shape(mean_entropy, (data_size,))
# Compute entropy for each sample probabilities
sample_entropies = jax.vmap(compute_entropy)(sample_probs)
chex.assert_shape(sample_entropies, (num_enn_samples, data_size))
models_disagreement = mean_entropy - jnp.mean(sample_entropies, axis=0)
chex.assert_shape(models_disagreement, (data_size,))
return models_disagreement
def var_ratios_per_example(
logits: chex.Array,
labels: chex.Array,
key: chex.PRNGKey,
) -> chex.Array:
"""Calculates the highest probability per example."""
del labels, key
unused_enn_samples, data_size, num_classes = logits.shape
sample_probs = jax.nn.softmax(logits)
probs = jnp.mean(sample_probs, axis=0)
chex.assert_shape(probs, (data_size, num_classes))
max_probs = jnp.max(probs, axis=1)
variation_ratio = 1 - max_probs
assert len(variation_ratio) == data_size
return variation_ratio
def make_ucb_per_example(
ucb_factor: float = 1.,
class_values: tp.Optional[chex.Array] = None,
) -> base.PerExamplePriority:
"""Creates a UCB-style priority metric."""
def compute_ucb(
logits: chex.Array,
labels: chex.Array,
key: chex.PRNGKey,
) -> chex.Array:
del labels, key
unused_enn_samples, data_size, num_classes = logits.shape
# Either use class values or default to just the first class
scale_values = class_values
if scale_values is None:
scale_values = jnp.zeros(num_classes).at[0].set(1)
probs = jax.nn.softmax(logits)
value = jnp.einsum('zbc,c->zb', probs, scale_values)
mean_values = jnp.mean(value, axis=0)
std_values = jnp.std(value, axis=0)
ucb_value = mean_values + ucb_factor * std_values
chex.assert_shape(ucb_value, (data_size,))
return ucb_value
return compute_ucb
def make_scaled_mean_per_example(
class_values: tp.Optional[chex.Array] = None,
) -> base.PerExamplePriority:
"""Creates a priority metric based on mean probs scaled by class_values."""
def compute_scaled_mean(
logits: chex.Array,
labels: chex.Array,
key: chex.PRNGKey,
) -> chex.Array:
del labels, key
unused_enn_samples, data_size, num_classes = logits.shape
# Either use class values or default to just the first class
scale_values = class_values
if scale_values is None:
scale_values = jnp.zeros(num_classes).at[0].set(1)
probs = jax.nn.softmax(logits)
values = jnp.einsum('zbc,c->zb', probs, scale_values)
mean_values = jnp.mean(values, axis=0)
chex.assert_shape(mean_values, (data_size,))
return mean_values
return compute_scaled_mean
def make_scaled_std_per_example(
class_values: tp.Optional[chex.Array] = None,
) -> base.PerExamplePriority:
"""Creates a priority metric based on std of probs scaled by class_values."""
def compute_scaled_std(
logits: chex.Array,
labels: chex.Array,
key: chex.PRNGKey,
) -> chex.Array:
del labels, key
unused_enn_samples, data_size, num_classes = logits.shape
# Either use class values or default to just the first class
scale_values = class_values
if scale_values is None:
scale_values = jnp.zeros(num_classes).at[0].set(1)
probs = jax.nn.softmax(logits)
values = jnp.einsum('zbc,c->zb', probs, scale_values)
std_values = jnp.std(values, axis=0)
chex.assert_shape(std_values, (data_size,))
return std_values
return compute_scaled_std
_PerExamplePriorities = {
'uniform': uniform_per_example,
'variance': variance_per_example,
'nll': nll_per_example,
'joint_nll': joint_nll_per_example,
'entropy': entropy_per_example,
'margin': margin_per_example,
'bald': bald_per_example,
'var_ratios': var_ratios_per_example,
'ucb': make_ucb_per_example(),
'scaled_mean': make_scaled_mean_per_example(),
'scaled_std': make_scaled_std_per_example(),
}
_PriorityFnCtors = {
key: make_priority_fn_ctor(value)
for key, value in _PerExamplePriorities.items()
}
def get_implemented_priority_fn_ctors() -> tp.Sequence[str]:
"""Returns the list of all supported priority function constructors."""
return list(_PriorityFnCtors.keys())
def get_priority_fn_ctor(name: str) -> base.PriorityFnCtor:
"""Returns a priority function constructor for the priority specified by `name`."""
assert name in get_implemented_priority_fn_ctors()
return _PriorityFnCtors[name]
def get_implemented_per_example_priorities() -> tp.Sequence[str]:
"""Returns the list of all supported per example priority functions."""
return list(_PerExamplePriorities.keys())
def get_per_example_priority(name: str) -> base.PerExamplePriority:
"""Returns a per example priority function for the priority specified by `name`."""
assert name in get_implemented_per_example_priorities()
return _PerExamplePriorities[name]
| enn-master | enn/active_learning/priorities.py |
# python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""ImageNet dataset with typical pre-processing."""
import dataclasses
import enum
import itertools as it
from typing import Dict, Optional, Sequence, Tuple
from enn.datasets import base as ds_base
from enn.datasets import utils as ds_utils
import jax
import jax.numpy as jnp
from jaxline import utils
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
class Split(enum.Enum):
"""Imagenet dataset split."""
TRAIN = 1271167
TRAIN_AND_VALID = 1281167
VALID = 10000
TEST = 50000
@property
def num_examples(self):
return self.value
MAX_NUM_TRAIN = Split.TRAIN_AND_VALID.num_examples
@dataclasses.dataclass
class Imagenet(ds_base.DatasetWithTransform):
"""Imagenet as jaxline dataset."""
train_batch: int = 128
eval_batch: int = 100
dataset_seed: int = 0
enable_double_transpose: bool = True
fake_data: bool = False
num_train: int = Split.TRAIN_AND_VALID.num_examples
train_ds_transformer: ds_base.DatasetTransformer = lambda x: x
eval_ds_transformers: Dict[
str, ds_base.DatasetTransformer] = ds_base.EVAL_TRANSFORMERS_DEFAULT
# Whether to add a leading axis of number of devices to the batches. If true,
# data batches have shape (number_devices, batch_size / number_devices, ...);
# otherwise, they have shape of (batch_size, ...).
train_data_parallelism: bool = True # Slices the train data into devices.
eval_data_parallelism: bool = False
@property
def num_classes(self) -> int:
return 1000
@property
def eval_input_shape(self) -> Sequence[int]:
return (224, 224, 3)
def train_dataset(self) -> ds_base.ArrayBatchIterator:
"""Returns the train dataset."""
def build_train_input() -> ds_base.ArrayBatchIterator:
"""See base class."""
# double-transpose-trick is only needed on TPU.
should_transpose_images = (
self.enable_double_transpose and
jax.local_devices()[0].platform == 'tpu')
return load(
Split.TRAIN_AND_VALID,
is_training=True,
transpose=should_transpose_images,
total_batch_size=self.train_batch,
data_parallelism=self.train_data_parallelism,
fake_data=self.fake_data,
seed=self.dataset_seed,
ds_transform=self.train_ds_transformer,
num_examples=self.num_train)
train_input = utils.py_prefetch(build_train_input)
return utils.double_buffer_on_gpu(train_input)
def eval_datasets(self) -> Dict[str, ds_base.ArrayBatchIterator]:
"""Returns the evaluation dataset."""
def build_eval_dataset(
eval_ds_transformer: ds_base.DatasetTransformer
) -> ds_base.ArrayBatchIterator:
# double-transpose-trick is only needed on TPU.
should_transpose_images = (
self.enable_double_transpose and
jax.local_devices()[0].platform == 'tpu')
return load(
Split.TEST,
is_training=False,
transpose=should_transpose_images,
total_batch_size=self.eval_batch,
data_parallelism=self.eval_data_parallelism,
fake_data=self.fake_data,
seed=self.dataset_seed,
ds_transform=eval_ds_transformer,)
return {
dataset_type: build_eval_dataset(transformer) for
dataset_type, transformer in self.eval_ds_transformers.items()
}
def load(
split: Split,
*,
is_training: bool,
total_batch_size: int,
data_parallelism: bool,
dtype: jnp.dtype = jnp.float32,
transpose: bool = False,
fake_data: bool = False,
image_size: Tuple[int, int] = (224, 224),
seed: Optional[int] = None,
ds_transform: ds_base.DatasetTransformer = lambda x: x,
num_examples: Optional[int] = None,
) -> ds_base.ArrayBatchIterator:
"""Loads the given split of the dataset."""
start, end = _shard(
split,
shard_index=jax.process_index(),
num_shards=jax.process_count(),
num_examples=num_examples)
# Run deterministically if rng is not None
if seed is not None:
rng = tf.random.create_rng_state(seed, 'threefry')
rng = tf.random.experimental.stateless_fold_in(
tf.cast(rng, tf.int64), start)
# Splitting the rng - one is used as seed for shuffling, the other is
# used as seed for random crop
rngs = tf.random.experimental.stateless_split(rng, 2)
if data_parallelism:
per_device_batch_size = ds_utils.get_per_device_batch_size(total_batch_size)
batch_dims = [jax.local_device_count(), per_device_batch_size]
else:
batch_dims = [total_batch_size]
if fake_data:
images = np.zeros(tuple(batch_dims) + image_size + (3,), dtype=dtype)
labels = np.zeros(tuple(batch_dims), dtype=np.int32)
if transpose:
axes = tuple(range(images.ndim))
axes = axes[:-4] + axes[-3:] + (axes[-4],) # NHWC -> HWCN
images = np.transpose(images, axes)
batch = ds_base.ArrayBatch(x=images, y=labels)
yield from it.repeat(batch, end - start)
return
total_batch_size = np.prod(batch_dims)
tfds_split = tfds.core.ReadInstruction(
_to_tfds_split(split), from_=start, to=end, unit='abs')
ds = tfds.load(
'imagenet2012:5.*.*',
split=tfds_split,
decoders={'image': tfds.decode.SkipDecoding()})
ds = ds.map(ds_utils.change_ds_dict_to_enn_batch)
# TODO(author3): Since we have sharding, the data indices for each shard
# range from 0 to shard size. This means we have similar data index for
# actually different data.
ds = ds_utils.add_data_index_to_dataset(ds)
options = tf.data.Options()
options.experimental_threading.private_threadpool_size = 48
options.experimental_threading.max_intra_op_parallelism = 1
options.experimental_optimization.map_parallelization = True
if seed is None and is_training:
options.experimental_deterministic = False
ds = ds.with_options(options)
if is_training:
if jax.process_count() > 1:
# Only cache if we are reading a subset of the dataset.
ds = ds.cache()
ds = ds.repeat()
seed_shuffle = tf.cast(rngs[0][0], tf.int64) if seed is not None else 0
ds = ds.shuffle(buffer_size=10 * total_batch_size, seed=seed_shuffle)
else:
if split.num_examples % total_batch_size != 0:
raise ValueError(f'Test/valid must be divisible by {total_batch_size}')
def _preprocess_fn(
batch: ds_base.ArrayBatch, example_rng: Optional[tf.Tensor] = None
) -> ds_base.ArrayBatch:
image = _preprocess_image(batch.x, is_training, image_size, example_rng)
return dataclasses.replace(batch, x=image)
def _preprocess_with_per_example_rng(
ds: tf.data.Dataset, *, rng: Optional[np.ndarray]
) -> tf.data.Dataset:
def _fn(
example_index: int, batch: ds_base.ArrayBatch
) -> ds_base.ArrayBatch:
example_rng = None
if rng is not None:
example_index = tf.cast(example_index, tf.int32)
example_rng = tf.random.experimental.stateless_fold_in(
tf.cast(rng, tf.int64), example_index
)
processed = _preprocess_fn(batch, example_rng)
return processed
return ds.enumerate().map(
_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
if seed is None:
ds = ds.map(
_preprocess_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
else:
rng_process = tf.cast(rngs[1], tf.int64)
ds = _preprocess_with_per_example_rng(ds, rng=rng_process)
# TODO(author2): This transform needs to come after processing.
ds = ds_transform(ds)
def transpose_fn(batch: ds_base.ArrayBatch) -> ds_base.ArrayBatch:
# We use double-transpose-trick to improve performance for TPUs. Note
# that this (typically) requires a matching HWCN->NHWC transpose in your
# model code. The compiler cannot make this optimization for us since our
# data pipeline and model are compiled separately.
transposed_x = tf.transpose(batch.x, (1, 2, 3, 0))
return dataclasses.replace(batch, x=transposed_x)
def cast_fn(batch: ds_base.ArrayBatch) -> ds_base.ArrayBatch:
x = tf.cast(batch.x, tf.dtypes.as_dtype(dtype))
return dataclasses.replace(batch, x=x)
for i, batch_size in enumerate(reversed(batch_dims)):
ds = ds.batch(batch_size, drop_remainder=True)
if i == 0:
if transpose:
ds = ds.map(transpose_fn) # NHWC -> HWCN
# NOTE: You may be tempted to move the casting earlier on in the pipeline,
# but for bf16 some operations will end up silently placed on the TPU and
# this causes stalls while TF and JAX battle for the accelerator.
ds = ds.map(cast_fn)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
yield from tfds.as_numpy(ds)
def _to_tfds_split(split: Split) -> tfds.Split:
"""Returns the TFDS split appropriately sharded."""
# NOTE: Imagenet did not release labels for the test split used in the
# competition, so it has been typical at DeepMind to consider the VALID
# split the TEST split and to reserve 10k images from TRAIN for VALID.
if split in (Split.TRAIN, Split.TRAIN_AND_VALID, Split.VALID):
return tfds.Split.TRAIN
else:
assert split == Split.TEST
return tfds.Split.VALIDATION
def _shard(split: Split,
shard_index: int,
num_shards: int,
num_examples: Optional[int] = None) -> Tuple[int, int]:
"""Returns [start, end) for the given shard index."""
assert shard_index < num_shards
if num_examples:
assert num_examples >= 1
num_examples = min(num_examples, split.num_examples)
else:
num_examples = split.num_examples
arange = np.arange(num_examples)
shard_range = np.array_split(arange, num_shards)[shard_index]
start, end = shard_range[0], (shard_range[-1] + 1)
if split == Split.TRAIN:
# Note that our TRAIN=TFDS_TRAIN[10000:] and VALID=TFDS_TRAIN[:10000].
offset = Split.VALID.num_examples
start += offset
end += offset
return start, end
def _preprocess_image(
image_bytes: tf.Tensor,
is_training: bool,
image_size: Sequence[int],
rng: Optional[tf.Tensor] = None,
) -> tf.Tensor:
"""Returns processed and resized images."""
if is_training:
if rng is not None:
rngs = tf.random.experimental.stateless_split(rng, 2)
image = _decode_and_random_crop(image_bytes, rngs[0], image_size)
image = tf.image.stateless_random_flip_left_right(image, rngs[1])
else:
image = _decode_and_random_crop(image_bytes, None, image_size)
image = tf.image.random_flip_left_right(image, None)
else:
image = _decode_and_center_crop(image_bytes, image_size=image_size)
assert image.dtype == tf.uint8
# NOTE: Bicubic resize (1) casts uint8 to float32 and (2) resizes without
# clamping overshoots. This means values returned will be outside the range
# [0.0, 255.0] (e.g. we have observed outputs in the range [-51.1, 336.6]).
image = tf.image.resize(image, image_size, tf.image.ResizeMethod.BICUBIC)
image = _normalize_image(image)
return image
def _normalize_image(image: tf.Tensor) -> tf.Tensor:
"""Normalize the image to zero mean and unit variance."""
image -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=image.dtype)
image /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=image.dtype)
return image
def _distorted_bounding_box_crop(
image_bytes: tf.Tensor,
*,
jpeg_shape: tf.Tensor,
bbox: tf.Tensor,
seed: Optional[tf.Tensor],
min_object_covered: float,
aspect_ratio_range: Tuple[float, float],
area_range: Tuple[float, float],
max_attempts: int,
) -> tf.Tensor:
"""Generates cropped_image using one of the bboxes randomly distorted."""
kwargs = {
'image_size': jpeg_shape,
'bounding_boxes': bbox,
'min_object_covered': min_object_covered,
'aspect_ratio_range': aspect_ratio_range,
'area_range': area_range,
'max_attempts': max_attempts,
'use_image_if_no_bounding_boxes': True
}
if seed is not None:
bbox_begin, bbox_size, _ = tf.image.stateless_sample_distorted_bounding_box(
seed=seed, **kwargs)
else:
bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box(**kwargs)
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
def _decode_and_random_crop(
image_bytes: tf.Tensor,
seed: Optional[tf.Tensor],
image_size: Sequence[int] = (224, 224),
) -> tf.Tensor:
"""Make a random crop of 224."""
jpeg_shape = tf.image.extract_jpeg_shape(image_bytes)
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image = _distorted_bounding_box_crop(
image_bytes,
jpeg_shape=jpeg_shape,
bbox=bbox,
seed=seed,
min_object_covered=0.1,
aspect_ratio_range=(3 / 4, 4 / 3),
area_range=(0.08, 1.0),
max_attempts=10)
if tf.reduce_all(tf.equal(jpeg_shape, tf.shape(image))):
# If the random crop failed fall back to center crop.
image = _decode_and_center_crop(image_bytes, jpeg_shape, image_size)
return image
def _decode_and_center_crop(
image_bytes: tf.Tensor,
jpeg_shape: Optional[tf.Tensor] = None,
image_size: Sequence[int] = (224, 224),
) -> tf.Tensor:
"""Crops to center of image with padding then scales."""
if jpeg_shape is None:
jpeg_shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = jpeg_shape[0]
image_width = jpeg_shape[1]
# Pad the image with at least 32px on the short edge and take a
# crop that maintains aspect ratio.
scale = tf.minimum(
tf.cast(image_height, tf.float32) / (image_size[0] + 32),
tf.cast(image_width, tf.float32) / (image_size[1] + 32))
padded_center_crop_height = tf.cast(scale * image_size[0], tf.int32)
padded_center_crop_width = tf.cast(scale * image_size[1], tf.int32)
offset_height = ((image_height - padded_center_crop_height) + 1) // 2
offset_width = ((image_width - padded_center_crop_width) + 1) // 2
crop_window = tf.stack([
offset_height, offset_width, padded_center_crop_height,
padded_center_crop_width
])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
| enn-master | enn/datasets/imagenet.py |
# python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Exposing the public methods of datasets."""
# Base
from enn.datasets.base import ArrayBatch
from enn.datasets.base import ArrayBatchIterator
from enn.datasets.base import DataIndex
from enn.datasets.base import Dataset
from enn.datasets.base import DatasetTransformer
from enn.datasets.base import DatasetWithTransform
from enn.datasets.base import EVAL_TRANSFORMERS_DEFAULT
from enn.datasets.base import OodVariant
# CIFAR10
from enn.datasets.cifar import Cifar
from enn.datasets.cifar import Cifar10
from enn.datasets.cifar import Cifar100
from enn.datasets.cifar import CifarVariant
from enn.datasets.cifar import Split as Cifar10Split
# ImageNet
from enn.datasets.imagenet import Imagenet
from enn.datasets.imagenet import MAX_NUM_TRAIN as IMAGENET_MAX_NUM_TRAIN
from enn.datasets.imagenet import Split as ImagenetSplit
# MNIST
from enn.datasets.mnist import Mnist
from enn.datasets.mnist import Split as MnistSplit
## Dataset Transformations
# Local Sample
from enn.datasets.transforms.local_sample import make_dyadic_transform
from enn.datasets.transforms.local_sample import make_repeat_sample_transform
from enn.datasets.transforms.local_sample import PerturbFn
# OOD
from enn.datasets.transforms.ood import get_dataset_transform_from_type
from enn.datasets.transforms.ood import make_ood_transformers
from enn.datasets.transforms.ood import sample_classes
# Utils
from enn.datasets.utils import add_data_index_to_dataset
from enn.datasets.utils import change_ds_dict_to_enn_batch
from enn.datasets.utils import get_per_device_batch_size
from enn.datasets.utils import OverrideTrainDataset
from enn.datasets.utils import slice_dataset_to_batches
| enn-master | enn/datasets/__init__.py |
# python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for datasets."""
# TODO(author3): Add tests
| enn-master | enn/datasets/datasets_test.py |
# python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility functions for working with datasets."""
from typing import Dict, Sequence
import chex
from enn.datasets import base as ds_base
import jax
import tensorflow.compat.v2 as tf
def change_ds_dict_to_enn_batch(
ds_dict: Dict[str, chex.Array]) -> ds_base.ArrayBatch:
"""Changes a dictionary of (image, label) to an enn batch."""
assert 'image' in ds_dict
assert 'label' in ds_dict
return ds_base.ArrayBatch(x=ds_dict['image'], y=ds_dict['label'])
def add_data_index_to_dataset(ds: tf.data.Dataset) -> tf.data.Dataset:
"""Adds integer data_index into the batch dictionary."""
ds = ds.enumerate()
return ds.map(_add_data_index)
def slice_dataset_to_batches(
dataset: tf.data.Dataset,
total_batch_size: int,
data_parallelism: bool,
drop_remainder: bool = True,
) -> tf.data.Dataset:
"""Slices the data of a dataset into batches.
Args:
dataset: a tf.data dataset.
total_batch_size: the total batch size over all devices.
data_parallelism: a boolean specifying whether to add a leading axis for the
number of devices. If true, data batches have shape (number_devices,
total_batch_size / number_devices, ...); otherwise, the have shape of
(total_batch_size, ...).
drop_remainder: a boolean specifying whether to drop the remainder of a
leftover batch. Usually set to true during training, but false if the user
would like to evaluate on the full eval dataset.
Returns:
a tf.data dataset sliced into batches.
"""
if data_parallelism:
per_device_batch_size = get_per_device_batch_size(total_batch_size)
dataset = dataset.batch(
per_device_batch_size, drop_remainder=drop_remainder
)
# drop_remainder set to True as the leading axis should always be equal to
# jax.local_device_count() so all devices are running on some data
dataset = dataset.batch(jax.local_device_count(), drop_remainder=True)
else:
dataset = dataset.batch(total_batch_size, drop_remainder=drop_remainder)
return dataset
def get_per_device_batch_size(total_batch_size: int) -> int:
"""Calculates the batch size per device based on total batch size."""
num_devices = jax.device_count()
per_device_batch_size, ragged = divmod(total_batch_size, num_devices)
if ragged:
raise ValueError(
f'Global batch size {total_batch_size} must be divisible by the '
f'total number of devices {num_devices}'
)
return per_device_batch_size
def _add_data_index(
data_index: int, batch: ds_base.ArrayBatch
) -> ds_base.ArrayBatch:
"""Adds data_index into the batch."""
return ds_base.ArrayBatch(x=batch.x, y=batch.y, data_index=data_index) # pytype: disable=wrong-arg-types # numpy-scalars
class OverrideTrainDataset(ds_base.DatasetWithTransform):
"""Overrides the train dataset with a replacement dataset."""
def __init__(self,
original_dataset: ds_base.DatasetWithTransform,
new_dataset: ds_base.DatasetWithTransform):
assert original_dataset.num_classes == new_dataset.num_classes
self.original_dataset = original_dataset
self.new_dataset = new_dataset
self.train_ds_transformer = original_dataset.train_ds_transformer
self.eval_ds_transformers = original_dataset.eval_ds_transformers
@property
def num_classes(self) -> int:
return self.original_dataset.num_classes
@property
def eval_input_shape(self) -> Sequence[int]:
return self.original_dataset.eval_input_shape
def train_dataset(self) -> ds_base.ArrayBatchIterator:
return self.new_dataset.train_dataset()
def eval_datasets(self) -> Dict[str, ds_base.ArrayBatchIterator]:
return self.original_dataset.eval_datasets()
| enn-master | enn/datasets/utils.py |
# python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Data loader for CIFAR10/100 dataset."""
import dataclasses
import enum
import functools
from typing import Dict, Sequence
from enn.datasets import base as ds_base
from enn.datasets import utils as ds_utils
import jax
from jaxline import utils
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
AUTOTUNE = tf.data.experimental.AUTOTUNE
class CifarVariant(enum.Enum):
"""Variants of Cifar daaset."""
CIFAR10 = 10
CIFAR100 = 100
@property
def num_classes(self) -> int:
return self.value
@property
def tfds_name(self) -> str:
return f'cifar{self.value}'
class Split(enum.Enum):
"""Cifar dataset split."""
TRAIN = 50000
TEST = 10000
@property
def num_examples(self) -> int:
return self.value
@dataclasses.dataclass
class Cifar(ds_base.DatasetWithTransform):
"""Cifar dataset."""
cifar_variant: CifarVariant = CifarVariant.CIFAR10
train_batch: int = 128
eval_batch: int = 100
normalization_mode: str = 'custom'
random_flip: bool = True
random_crop: bool = True
cutout: bool = False
keep_image_size: bool = False
num_train: int = 50_000
train_ds_transformer: ds_base.DatasetTransformer = lambda x: x
eval_ds_transformers: Dict[
str, ds_base.DatasetTransformer] = ds_base.EVAL_TRANSFORMERS_DEFAULT
# Whether to add a leading axis of number of devices to the batches. If true,
# data batches have shape (number_devices, batch_size / number_devices, ...);
# otherwise, they have shape of (batch_size, ...).
train_data_parallelism: bool = True # Slices the train data into devices.
eval_data_parallelism: bool = False
@property
def num_classes(self) -> int:
return self.cifar_variant.num_classes
@property
def eval_input_shape(self) -> Sequence[int]:
return (32, 32, 3)
def train_dataset(self,) -> ds_base.ArrayBatchIterator:
"""Returns the train dataset."""
def build_train_input():
ds = tfds.load(
name=self.cifar_variant.tfds_name,
split=f'train[:{self.num_train}]',
)
ds = ds.map(ds_utils.change_ds_dict_to_enn_batch)
ds = ds_utils.add_data_index_to_dataset(ds)
ds = self.train_ds_transformer(ds)
ds = ds.shard(jax.process_count(), jax.process_index())
# Shuffle before repeat ensures all examples seen in an epoch.
# https://www.tensorflow.org/guide/data_performance#repeat_and_shuffle
ds = ds.shuffle(buffer_size=10_000)
ds = ds.repeat()
train_preprocess = functools.partial(
preprocess_batch,
normalization_mode=self.normalization_mode,
random_flip=self.random_flip,
random_crop=self.random_crop,
cutout=self.cutout,
keep_image_size=self.keep_image_size,
is_training=True,
)
ds = ds.map(train_preprocess, num_parallel_calls=AUTOTUNE)
ds = ds_utils.slice_dataset_to_batches(
dataset=ds,
total_batch_size=self.train_batch,
data_parallelism=self.train_data_parallelism,
)
ds = ds.prefetch(AUTOTUNE)
return iter(tfds.as_numpy(ds))
train_input = utils.py_prefetch(build_train_input)
return utils.double_buffer_on_gpu(train_input)
def eval_datasets(self,) -> Dict[str, ds_base.ArrayBatchIterator]:
"""Returns the evaluation dataset."""
def build_eval_dataset(
eval_ds_transformer: ds_base.DatasetTransformer
) -> ds_base.ArrayBatchIterator:
ds = tfds.load(name=self.cifar_variant.tfds_name, split='test')
ds = ds.map(ds_utils.change_ds_dict_to_enn_batch)
ds = ds_utils.add_data_index_to_dataset(ds)
ds = ds.shard(jax.process_count(), jax.process_index())
# Preprocess
eval_preprocess = functools.partial(
preprocess_batch,
normalization_mode=self.normalization_mode,
random_flip=self.random_flip,
random_crop=self.random_crop,
cutout=self.cutout,
keep_image_size=self.keep_image_size,
is_training=False,
)
ds = ds.map(eval_preprocess, num_parallel_calls=AUTOTUNE)
# Apply evaluation transformer
ds = eval_ds_transformer(ds)
ds = ds_utils.slice_dataset_to_batches(
dataset=ds,
total_batch_size=self.eval_batch,
data_parallelism=self.eval_data_parallelism,
)
ds = ds.prefetch(AUTOTUNE)
return iter(tfds.as_numpy(ds))
return {
dataset_type: build_eval_dataset(transformer) for
dataset_type, transformer in self.eval_ds_transformers.items()
}
@dataclasses.dataclass
class Cifar10(Cifar):
"""Cifar10 as jaxline dataset."""
cifar_variant: CifarVariant = CifarVariant.CIFAR10
@dataclasses.dataclass
class Cifar100(Cifar):
"""Cifar100 as jaxline dataset."""
cifar_variant: CifarVariant = CifarVariant.CIFAR100
cutout: bool = True
keep_image_size: bool = True
def preprocess_batch(batch: ds_base.ArrayBatch,
normalization_mode: str,
random_crop: bool,
random_flip: bool,
cutout: bool,
keep_image_size: bool,
is_training: bool = False) -> ds_base.ArrayBatch:
"""Pre-processing module."""
images = batch.x
images = tf.image.convert_image_dtype(images, tf.float32)
if normalization_mode != 'custom':
images = images * 2. - 1.
tf.assert_less_equal(tf.math.reduce_max(images), 1.)
tf.assert_greater_equal(tf.math.reduce_min(images), -1.)
if normalization_mode == 'custom':
means = [0.49139968, 0.48215841, 0.44653091]
stds = [0.24703223, 0.24348513, 0.26158784]
images = (images - means) / stds
elif normalization_mode == 'standard':
images = tf.image.per_image_standardization(images)
elif normalization_mode == 'identity':
pass
else:
raise ValueError(
'Normalization mode should be one among custom, standard or identity.'
)
# Transformations that are valid only in training.
if is_training:
images = tf.reshape(images, (32, 32, 3))
if random_crop:
if keep_image_size:
image_shape = tf.shape(images)
images = tf.image.resize_with_crop_or_pad(images, image_shape[0] + 4,
image_shape[1] + 4)
images = tf.image.random_crop(images, (32, 32, 3))
else:
images = tf.image.random_crop(images, (24, 24, 3))
if random_flip:
images = tf.image.random_flip_left_right(images)
if cutout:
images = _cutout_single_image(
probability=0.5, cutout_size=16, image=images)
return dataclasses.replace(batch, x=images)
def _cutout_single_image(
probability: float, cutout_size: int, image: np.ndarray) -> np.ndarray:
"""Cutout function."""
tf.Assert(
tf.less(cutout_size, image.shape[0]),
[cutout_size, image.shape[0]])
tf.Assert(
tf.less(cutout_size, image.shape[1]),
[cutout_size, image.shape[1]])
x_range = image.shape[0] - cutout_size + 1
y_range = image.shape[1] - cutout_size + 1
x_before = tf.random.uniform([], minval=0, maxval=x_range, dtype=tf.int32)
y_before = tf.random.uniform([], minval=0, maxval=y_range, dtype=tf.int32)
x_after = image.shape[0] - x_before - cutout_size
y_after = image.shape[1] - y_before - cutout_size
cutout_square = tf.zeros([cutout_size, cutout_size, 3])
mask = tf.pad(
cutout_square, [[x_before, x_after], [y_before, y_after], [0, 0]],
constant_values=1.0)
pred = tf.less(tf.random.uniform([], minval=0.0, maxval=1.0), probability)
return tf.cond(pred, lambda: mask * image, lambda: image)
| enn-master | enn/datasets/cifar.py |
# python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Interface for datasets in jaxline."""
import abc
import dataclasses
import enum
import typing as tp
import chex
import numpy as np
import tensorflow.compat.v2 as tf
Array = tp.Union[np.ndarray, tf.Tensor]
# DatasetDict is a Dict with "images" and "labels" as keys
DatasetDict = tp.Dict[str, Array]
DataIndex = chex.Array # Integer identifiers used for bootstrapping
@chex.dataclass(frozen=True)
class ArrayBatch:
"""A Batch with array input and target."""
x: chex.Array # Inputs
y: chex.Array # Targets
data_index: tp.Optional[DataIndex] = None # Integer identifiers for data
weights: tp.Optional[chex.Array] = None # None defaults to weights = jnp.ones
extra: tp.Dict[str, chex.Array] = dataclasses.field(
default_factory=dict
) # You can put other optional stuff here
ArrayBatchIterator = tp.Iterator[
ArrayBatch
] # Equivalent to the dataset we loop through.
DatasetTransformer = tp.Callable[[tf.data.Dataset], tf.data.Dataset]
class Dataset(abc.ABC):
"""Abstract base class of a dataset."""
@property
@abc.abstractmethod
def num_classes(self) -> int:
"""Number of output classes."""
@property
@abc.abstractmethod
def eval_input_shape(self) -> tp.Sequence[int]:
"""Returns the shape of a single eval input from the dataset."""
@abc.abstractmethod
def train_dataset(self) -> ArrayBatchIterator:
"""Returns the train dataset."""
@abc.abstractmethod
def eval_datasets(self) -> tp.Dict[str, ArrayBatchIterator]:
"""Returns a dictionary of eval datasets.
The keys for these datasets should correspond to the self.mode in jaxline.
"""
@dataclasses.dataclass
class DatasetWithTransform(Dataset):
"""Dataset that implements dataset transforms explicitly on training/eval.
The point of this class is to allow for explicit *interception* of batches
so that we can more easily implement OOD experiments.
"""
train_ds_transformer: DatasetTransformer
eval_ds_transformers: tp.Dict[str, DatasetTransformer]
class OodVariant(enum.Enum):
WHOLE: str = 'eval'
IN_DISTRIBUTION: str = 'eval_in_dist'
OUT_DISTRIBUTION: str = 'eval_out_dist'
@classmethod
def valid_values(cls) -> tp.List[str]:
return list(map(lambda c: c.value, cls))
EVAL_TRANSFORMERS_DEFAULT = dataclasses.field(
default_factory=lambda: {OodVariant.WHOLE.value: (lambda x: x)})
| enn-master | enn/datasets/base.py |
# python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Data loader for MNIST dataset."""
import dataclasses
import enum
import functools
from typing import Dict, Sequence
from enn.datasets import base as ds_base
from enn.datasets import utils as ds_utils
import jax
from jaxline import utils
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
AUTOTUNE = tf.data.experimental.AUTOTUNE
class Split(enum.Enum):
"""Mnist dataset split."""
TRAIN = 50000
TEST = 10000
@property
def num_examples(self):
return self.value
@dataclasses.dataclass
class Mnist(ds_base.DatasetWithTransform):
"""MNIST as a jaxline dataset."""
train_batch: int = 128
eval_batch: int = 100
normalization_mode: str = 'standard'
train_ds_transformer: ds_base.DatasetTransformer = lambda x: x
eval_ds_transformers: Dict[
str, ds_base.DatasetTransformer] = ds_base.EVAL_TRANSFORMERS_DEFAULT
# Whether to add a leading axis of number of devices to the batches. If true,
# data batches have shape (number_devices, batch_size / number_devices, ...);
# otherwise, they have shape of (batch_size, ...).
train_data_parallelism: bool = True # Slices the train data into devices.
eval_data_parallelism: bool = False
@property
def num_classes(self) -> int:
return 10
@property
def eval_input_shape(self) -> Sequence[int]:
return (28, 28, 1)
def train_dataset(self,) -> ds_base.ArrayBatchIterator:
"""Returns the train dataset."""
def build_train_input():
ds = tfds.load(name='mnist', split='train')
ds = ds.map(ds_utils.change_ds_dict_to_enn_batch)
ds = self.train_ds_transformer(ds)
ds = ds_utils.add_data_index_to_dataset(ds)
ds = ds.shard(jax.process_count(), jax.process_index())
# Shuffle before repeat ensures all examples seen in an epoch.
# https://www.tensorflow.org/guide/data_performance#repeat_and_shuffle.
ds = ds.shuffle(buffer_size=10_000)
ds = ds.repeat()
train_preprocess = functools.partial(
preprocess_batch, normalization_mode=self.normalization_mode)
ds = ds.map(train_preprocess, num_parallel_calls=AUTOTUNE)
ds = ds_utils.slice_dataset_to_batches(
dataset=ds,
total_batch_size=self.train_batch,
data_parallelism=self.train_data_parallelism,
)
ds = ds.prefetch(AUTOTUNE)
return iter(tfds.as_numpy(ds))
train_input = utils.py_prefetch(build_train_input)
return utils.double_buffer_on_gpu(train_input)
def eval_datasets(self,) -> Dict[str, ds_base.ArrayBatchIterator]:
"""Returns the evaluation dataset."""
def build_eval_dataset(
eval_ds_transformer: ds_base.DatasetTransformer
) -> ds_base.ArrayBatchIterator:
ds = tfds.load(name='mnist', split='test')
ds = ds.map(ds_utils.change_ds_dict_to_enn_batch)
ds = ds_utils.add_data_index_to_dataset(ds)
ds = ds.shard(jax.process_count(), jax.process_index())
# Preprocess
eval_preprocess = functools.partial(
preprocess_batch, normalization_mode=self.normalization_mode)
ds = ds.map(eval_preprocess, num_parallel_calls=AUTOTUNE)
# Apply evaluation transformer
ds = eval_ds_transformer(ds)
ds = ds_utils.slice_dataset_to_batches(
dataset=ds,
total_batch_size=self.eval_batch,
data_parallelism=self.eval_data_parallelism,
)
ds = ds.prefetch(AUTOTUNE)
return iter(tfds.as_numpy(ds))
return {
dataset_type: build_eval_dataset(transformer) for
dataset_type, transformer in self.eval_ds_transformers.items()
}
def preprocess_batch(batch: ds_base.ArrayBatch,
normalization_mode: str) -> ds_base.ArrayBatch:
"""Pre-processing module."""
images = batch.x
images = tf.image.convert_image_dtype(images, tf.float32)
if normalization_mode == 'standard':
images = tf.image.per_image_standardization(images)
elif normalization_mode == 'identity':
pass
else:
raise ValueError(
'Normalization mode should be one among custom, standard or identity.'
)
return dataclasses.replace(batch, x=images)
| enn-master | enn/datasets/mnist.py |
# python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Transformation to sample from dataset with local structure.
This is used to generate evaluation batches that are of the (kappa, N) format.
"""
import dataclasses
from typing import Callable, Optional
from enn.datasets import base as ds_base
import tensorflow.compat.v2 as tf
AUTOTUNE = tf.data.experimental.AUTOTUNE
PerturbFn = Callable[[ds_base.ArrayBatch], ds_base.ArrayBatch]
def make_repeat_sample_transform(
num_repeat: int,
perturb_fn: PerturbFn,
limit_data: Optional[int] = None,
) -> ds_base.DatasetTransformer:
"""Alters Dataset for batches with num_repeat items and perturb_fn applied.
This function will alter the dataset so that each original entry is then
sequentially replaced by num_repeat copies of that entry, but with the
perturb_fn appplied to that entry. This is useful for highlighting the
importance of epistemic/aleatoric uncertainty.
Args:
num_repeat: number of repeated entries.
perturb_fn: function to be applied to each entry.
limit_data: optionally limit the final size of the dataset.
Returns:
dataset transformer.
"""
def repeat(batch: ds_base.ArrayBatch) -> ds_base.ArrayBatch:
repeated_x = tf.stack([batch.x] * num_repeat)
repeated_y = tf.stack([batch.y] * num_repeat)
return ds_base.ArrayBatch(x=repeated_x, y=repeated_y)
def transform(ds: tf.data.Dataset) -> tf.data.Dataset:
ds = ds.map(repeat).unbatch()
if limit_data:
ds = ds.take(limit_data)
ds = ds.map(perturb_fn, num_parallel_calls=AUTOTUNE)
return ds
return transform
def make_dyadic_transform(
num_repeat: int,
limit_data: Optional[int] = None,
crop_offset: int = 4,
flip: bool = True,
) -> ds_base.DatasetTransformer:
"""Defines settings perturbing images with random crop/flip."""
def perturb_fn(batch: ds_base.ArrayBatch) -> ds_base.ArrayBatch:
images = batch.x
if crop_offset > 0:
image_height, image_width, image_depth = images.shape
assert image_height > crop_offset
assert image_width > crop_offset
cropped_image_shape = (image_height - crop_offset,
image_width - crop_offset, image_depth)
images = tf.image.random_crop(images, cropped_image_shape)
# Resizing cropped image to its original shape. Without resizing, cropped
# image may not be passed to the neural network.
images = tf.image.resize(images, (image_height, image_width),
tf.image.ResizeMethod.BICUBIC)
if flip:
images = tf.image.random_flip_left_right(images)
return dataclasses.replace(batch, x=images)
return make_repeat_sample_transform(num_repeat, perturb_fn, limit_data)
| enn-master | enn/datasets/transforms/local_sample.py |
# python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Dataset transformations for generating OOD data."""
from typing import Dict, Tuple
from enn.datasets import base
import numpy as np
import tensorflow.compat.v2 as tf
def get_dataset_transform_from_type(
dataset_type: base.OodVariant,
ood_labels: np.ndarray,
) -> base.DatasetTransformer:
"""Returns a dataset transform from a type string."""
if dataset_type == base.OodVariant.IN_DISTRIBUTION:
return _make_ds_transform(ood_labels, ood_proportion=0.)
elif dataset_type == base.OodVariant.OUT_DISTRIBUTION:
return _make_ds_transform(ood_labels, ood_proportion=1.)
elif dataset_type == base.OodVariant.WHOLE:
return lambda x: x
else:
raise ValueError('Unsupported dataset type.')
def make_ood_transformers(
num_classes: int,
fraction_ood_classes: float = 0.2,
ood_proportion_in_train: float = 0.001,
seed: int = 321,
) -> Tuple[base.DatasetTransformer, Dict[
str, base.DatasetTransformer]]:
"""Returns a tuple of ood transfomers for train and eval datasets.
Args:
num_classes: Number of possible classes in the train dataset.
fraction_ood_classes: Fraction of classes considered as out of distribution.
We will sample OOD classes randomly according to the fraction.
ood_proportion_in_train: Fraction of data samples with OOD labels in the
training dataset.
seed: Random seed used to sample OOD labels and generate the training set.
Returns:
A tuple where the first element is an ood transfomer for train dataset and
the second elemenet is a dictionary of ood transfomers for eval dataset.
"""
ood_labels = sample_classes(
num_classes=num_classes,
num_samples=int(fraction_ood_classes * num_classes),
seed=seed,
)
train_ds_transformer = _make_ds_transform(
ood_labels=ood_labels,
ood_proportion=ood_proportion_in_train,
seed=seed,
)
eval_ds_transformers = dict()
for dataset_type in base.OodVariant:
eval_ds_transformers[dataset_type.value] = get_dataset_transform_from_type(
dataset_type=dataset_type,
ood_labels=ood_labels,
)
return (train_ds_transformer, eval_ds_transformers)
def sample_classes(num_classes: int, num_samples: int, seed: int) -> np.ndarray:
"""Sample a subset of size num_samples from [0, ..., num_classes - 1]."""
rng = np.random.default_rng(seed)
return rng.choice(range(num_classes), size=(num_samples,), replace=False)
def _make_ds_transform(
ood_labels: np.ndarray,
ood_proportion: float = 0.,
seed: int = 0,
) -> base.DatasetTransformer:
"""Makes a TF dataset transformation that filters out certain labels.
Args:
ood_labels: An array of out-of-distribution labels.
ood_proportion: Fraction of data samples with ood_labels in the new dataset.
seed: Random seed used to generate the new dataset.
Returns:
A function that takes a TF dataset and returns a TF dataset.
"""
assert (ood_proportion >= 0.) and (ood_proportion <= 1.)
if not ood_labels.any():
return lambda ds: ds
def in_dist_predicate(batch: base.ArrayBatch) -> bool:
return tf.reduce_all(tf.not_equal(batch.y, ood_labels))
def out_dist_predicate(batch: base.ArrayBatch) -> bool:
return not in_dist_predicate(batch)
if ood_proportion == 0.:
return lambda ds: ds.filter(in_dist_predicate)
elif ood_proportion == 1:
return lambda ds: ds.filter(out_dist_predicate)
weights = (1. - ood_proportion, ood_proportion)
def partial_filter(ds: tf.data.Dataset):
ds_in_dist = ds.filter(in_dist_predicate)
ds_out_dist = ds.filter(out_dist_predicate)
return tf.data.Dataset.sample_from_datasets(
datasets=[ds_in_dist, ds_out_dist],
weights=weights,
stop_on_empty_dataset=True,
seed=seed,
)
return partial_filter
| enn-master | enn/datasets/transforms/ood.py |
# python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
| enn-master | enn/datasets/transforms/__init__.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Entries for pretrained ENNs are stored here."""
import enum
from enn.checkpoints import base as checkpoint_base
from enn.checkpoints import cifar10
from enn.checkpoints import cifar100
from enn.checkpoints import epinet as checkpoint_epinet
from enn.checkpoints import imagenet
# Alias to fit inside character limit
_EnnCpt = checkpoint_base.EnnCheckpoint
_EpiCpt = checkpoint_epinet.EpinetCheckpoint
class Cifar10Models(enum.Enum):
"""Pretrained models on Cifar10."""
RESNET_18: _EnnCpt = cifar10.resnet_18()
RESNET_32: _EnnCpt = cifar10.resnet_32()
RESNET_44: _EnnCpt = cifar10.resnet_44()
RESNET_56: _EnnCpt = cifar10.resnet_56()
RESNET_110: _EnnCpt = cifar10.resnet_110()
RESNET_18_FINAL_EPINET: _EpiCpt = cifar10.resnet_18_final_epinet()
RESNET_32_FINAL_EPINET: _EpiCpt = cifar10.resnet_32_final_epinet()
RESNET_44_FINAL_EPINET: _EpiCpt = cifar10.resnet_44_final_epinet()
RESNET_56_FINAL_EPINET: _EpiCpt = cifar10.resnet_56_final_epinet()
RESNET_110_FINAL_EPINET: _EpiCpt = cifar10.resnet_110_final_epinet()
class Cifar100Models(enum.Enum):
"""Pretrained models on Cifar100."""
RESNET_18: _EnnCpt = cifar100.resnet_18()
RESNET_32: _EnnCpt = cifar100.resnet_32()
RESNET_44: _EnnCpt = cifar100.resnet_44()
RESNET_56: _EnnCpt = cifar100.resnet_56()
RESNET_110: _EnnCpt = cifar100.resnet_110()
RESNET_18_FINAL_EPINET: _EpiCpt = cifar100.resnet_18_final_epinet()
RESNET_32_FINAL_EPINET: _EpiCpt = cifar100.resnet_32_final_epinet()
RESNET_44_FINAL_EPINET: _EpiCpt = cifar100.resnet_44_final_epinet()
RESNET_56_FINAL_EPINET: _EpiCpt = cifar100.resnet_56_final_epinet()
RESNET_110_FINAL_EPINET: _EpiCpt = cifar100.resnet_110_final_epinet()
class ImagenetModels(enum.Enum):
"""Pretrained models on ImageNet."""
RESNET_50: _EnnCpt = imagenet.resnet_50()
RESNET_101: _EnnCpt = imagenet.resnet_101()
RESNET_152: _EnnCpt = imagenet.resnet_152()
RESNET_200: _EnnCpt = imagenet.resnet_200()
RESNET_50_FINAL_EPINET: _EpiCpt = imagenet.resnet_50_final_epinet()
RESNET_101_FINAL_EPINET: _EpiCpt = imagenet.resnet_101_final_epinet()
RESNET_152_FINAL_EPINET: _EpiCpt = imagenet.resnet_152_final_epinet()
RESNET_200_FINAL_EPINET: _EpiCpt = imagenet.resnet_200_final_epinet()
| enn-master | enn/checkpoints/catalog.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Entries on Cifar100."""
from enn import datasets
from enn import networks
from enn.checkpoints import base as checkpoint_base
from enn.checkpoints import epinet as checkpoint_epinet
from enn.checkpoints import utils
from enn.networks.epinet import priors
from enn.networks.epinet import resnet as resnet_epinet_lib
def _make_resnet_ctor(
config: networks.ResNetConfig,
) -> checkpoint_base.EnnCtor:
"""Creates a resnet constructor for appropriate config."""
def enn_ctor() -> networks.EnnArray:
return networks.EnsembleResNetENN(
num_output_classes=datasets.Cifar100().num_classes,
num_ensemble=1,
is_training=False,
enable_double_transpose=False,
config=config,
)
return enn_ctor
def resnet_18() -> checkpoint_base.EnnCheckpoint:
"""Benchmark baseline for ResNet18 on Cifar100."""
return checkpoint_base.EnnCheckpoint(
name='cifar100_resnet18',
load_fn=utils.load_from_file(file_name='resnet18_cifar100'),
enn_ctor=_make_resnet_ctor(networks.CanonicalResNets.RESNET_18.value),
dataset=datasets.Cifar100(),
)
def resnet_32() -> checkpoint_base.EnnCheckpoint:
"""Benchmark baseline for ResNet32 on Cifar100."""
return checkpoint_base.EnnCheckpoint(
name='cifar100_resnet32',
load_fn=utils.load_from_file(file_name='resnet32_cifar100'),
enn_ctor=_make_resnet_ctor(networks.CanonicalResNets.RESNET_32.value),
dataset=datasets.Cifar100(),
)
def resnet_44() -> checkpoint_base.EnnCheckpoint:
"""Benchmark baseline for ResNet44 on Cifar100."""
return checkpoint_base.EnnCheckpoint(
name='cifar100_resnet44',
load_fn=utils.load_from_file(file_name='resnet44_cifar100'),
enn_ctor=_make_resnet_ctor(networks.CanonicalResNets.RESNET_44.value),
dataset=datasets.Cifar100(),
)
def resnet_56() -> checkpoint_base.EnnCheckpoint:
"""Benchmark baseline for ResNet56 on Cifar100."""
return checkpoint_base.EnnCheckpoint(
name='cifar100_resnet56',
load_fn=utils.load_from_file(file_name='resnet56_cifar100'),
enn_ctor=_make_resnet_ctor(networks.CanonicalResNets.RESNET_56.value),
dataset=datasets.Cifar100(),
)
def resnet_110() -> checkpoint_base.EnnCheckpoint:
"""Benchmark baseline for ResNet110 on Cifar100."""
return checkpoint_base.EnnCheckpoint(
name='cifar100_resnet110',
load_fn=utils.load_from_file(file_name='resnet110_cifar100'),
enn_ctor=_make_resnet_ctor(networks.CanonicalResNets.RESNET_110.value),
dataset=datasets.Cifar100(),
)
def _make_epinet_config(
base_checkpoint: checkpoint_base.EnnCheckpoint
) -> resnet_epinet_lib.ResnetFinalEpinetConfig:
"""Creates an epinet config given a base net checkpoint."""
def prior_fn_ctor() -> networks.PriorFn:
return priors.make_cifar_conv_prior(num_ensemble=20, num_classes=100)
return resnet_epinet_lib.ResnetFinalEpinetConfig(
base_checkpoint=base_checkpoint,
index_dim=20,
num_classes=100,
epinet_hiddens=[50,],
epi_prior_scale=0.5,
add_prior_scale=0.5,
prior_fn_ctor=prior_fn_ctor,
freeze_base=True,
)
def resnet_18_final_epinet() -> checkpoint_epinet.EpinetCheckpoint:
"""Final-layer epinet with Resnet18 base model on CIFAR100."""
return resnet_epinet_lib.make_checkpoint_from_config(
name='cifar100_final_epinet_resnet50',
load_fn=utils.load_from_file(file_name='resnet18_epinet_cifar100'),
config=_make_epinet_config(resnet_18()),
)
def resnet_32_final_epinet() -> checkpoint_epinet.EpinetCheckpoint:
"""Final-layer epinet with Resnet32 base model on CIFAR100."""
return resnet_epinet_lib.make_checkpoint_from_config(
name='cifar100_final_epinet_resnet32',
load_fn=utils.load_from_file(file_name='resnet32_epinet_cifar100'),
config=_make_epinet_config(resnet_32()),
)
def resnet_44_final_epinet() -> checkpoint_epinet.EpinetCheckpoint:
"""Final-layer epinet with Resnet44 base model on CIFAR100."""
return resnet_epinet_lib.make_checkpoint_from_config(
name='cifar100_final_epinet_resnet44',
load_fn=utils.load_from_file(file_name='resnet44_epinet_cifar100'),
config=_make_epinet_config(resnet_44()),
)
def resnet_56_final_epinet() -> checkpoint_epinet.EpinetCheckpoint:
"""Final-layer epinet with Resnet56 base model on CIFAR100."""
return resnet_epinet_lib.make_checkpoint_from_config(
name='cifar100_final_epinet_resnet56',
load_fn=utils.load_from_file(file_name='resnet56_epinet_cifar100'),
config=_make_epinet_config(resnet_56()),
)
def resnet_110_final_epinet() -> checkpoint_epinet.EpinetCheckpoint:
"""Final-layer epinet with Resnet110 base model on CIFAR100."""
return resnet_epinet_lib.make_checkpoint_from_config(
name='cifar100_final_epinet_resnet110',
load_fn=utils.load_from_file(file_name='resnet110_epinet_cifar100'),
config=_make_epinet_config(resnet_110()),
)
| enn-master | enn/checkpoints/cifar100.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base file for defining how to restore pre-trained ENNs for evaluation."""
import dataclasses
from typing import Callable, Optional
from enn import base
from enn import datasets
from enn.checkpoints import base as checkpoint_base
from enn.networks.epinet import base as epinet_base
@dataclasses.dataclass
class EpinetCheckpoint:
"""Specialized checkpoint for epinet architectures.
You *can* just save the whole (base + epinet) as an ENN checkpoint.
However, if you break it down like this explicitly, then you can have an
optimized forward function over multiple ENN samples without recomputing
the base network each time.
"""
name: str # A string to describe this checkpoint entry.
load_fn: checkpoint_base.ParamsStateLoadFn # Restores params, state epinet.
epinet_ctor: Callable[[], epinet_base.EpinetWithState] # Epinet model
parse_hidden: epinet_base.BaseHiddenParser # Parse the hidden representation.
base_cpt: checkpoint_base.EnnCheckpoint # Checkpoint for the base model
base_index: Optional[base.Index] = None # Optional specify base_index.
base_scale: float = 1. # Scaling of base net output.
# Optionally rescale ENN outputs by 1/ temperature.
tuned_eval_temperature: Optional[float] = None
# Optional attributes used to identify the provenance of these models.
# This is mostly used *internally*, but can be useful outside too.
dataset: Optional[datasets.Dataset] = None # Dataset used in training.
report_cl: Optional[int] = None # Integer id for report CL (encouraged).
| enn-master | enn/checkpoints/epinet.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Entries on ImageNet."""
from enn import datasets
from enn import networks
from enn.checkpoints import base as checkpoint_base
from enn.checkpoints import epinet as checkpoint_epinet
from enn.checkpoints import utils
from enn.networks.epinet import priors
from enn.networks.epinet import resnet as resnet_epinet_lib
def _make_resnet_ctor(
config: networks.ResNetConfig,
) -> checkpoint_base.EnnCtor:
"""Creates a resnet constructor for appropriate config."""
def enn_ctor() -> networks.EnnArray:
return networks.EnsembleResNetENN(
num_output_classes=datasets.Imagenet().num_classes,
num_ensemble=1,
is_training=False,
enable_double_transpose=True,
config=config,
)
return enn_ctor
def resnet_50() -> checkpoint_base.EnnCheckpoint:
"""Benchmark baseline for ResNet50 on ImageNet."""
return checkpoint_base.EnnCheckpoint(
name='imagenet_resnet50',
load_fn=utils.load_from_file(file_name='resnet50_imagenet'),
enn_ctor=_make_resnet_ctor(networks.CanonicalResNets.RESNET_50.value),
dataset=datasets.Imagenet(),
tuned_eval_temperature=0.8,
)
def resnet_101() -> checkpoint_base.EnnCheckpoint:
"""Benchmark baseline for ResNet101 on ImageNet."""
return checkpoint_base.EnnCheckpoint(
name='imagenet_resnet101',
load_fn=utils.load_from_file(file_name='resnet101_imagenet'),
enn_ctor=_make_resnet_ctor(networks.CanonicalResNets.RESNET_101.value),
dataset=datasets.Imagenet(),
tuned_eval_temperature=0.8,
)
def resnet_152() -> checkpoint_base.EnnCheckpoint:
"""Benchmark baseline for ResNet152 on ImageNet."""
return checkpoint_base.EnnCheckpoint(
name='imagenet_resnet152',
load_fn=utils.load_from_file(file_name='resnet152_imagenet'),
enn_ctor=_make_resnet_ctor(networks.CanonicalResNets.RESNET_152.value),
dataset=datasets.Imagenet(),
tuned_eval_temperature=0.8,
)
def resnet_200() -> checkpoint_base.EnnCheckpoint:
"""Benchmark baseline for ResNet200 on ImageNet."""
return checkpoint_base.EnnCheckpoint(
name='imagenet_resnet200',
load_fn=utils.load_from_file(file_name='resnet200_imagenet'),
enn_ctor=_make_resnet_ctor(networks.CanonicalResNets.RESNET_200.value),
dataset=datasets.Imagenet(),
tuned_eval_temperature=0.8,
)
def _make_epinet_config(
base_checkpoint: checkpoint_base.EnnCheckpoint
) -> resnet_epinet_lib.ResnetFinalEpinetConfig:
"""Creates an epinet config given a base net checkpoint."""
return resnet_epinet_lib.ResnetFinalEpinetConfig(
base_checkpoint=base_checkpoint,
index_dim=30,
num_classes=1000,
base_logits_scale=1 / base_checkpoint.tuned_eval_temperature,
epinet_hiddens=[50],
epi_prior_scale=1.,
add_prior_scale=1.,
prior_fn_ctor=lambda: priors.make_imagenet_conv_prior(num_ensemble=30),
freeze_base=True,
)
def resnet_50_final_epinet() -> checkpoint_epinet.EpinetCheckpoint:
"""Final-layer epinet with Resnet50 base model on Imagenet."""
return resnet_epinet_lib.make_checkpoint_from_config(
name='imagenet_final_epinet_resnet50',
load_fn=utils.load_from_file(file_name='resnet50_epinet_imagenet'),
config=_make_epinet_config(resnet_50()),
tuned_eval_temperature=0.7,
)
def resnet_101_final_epinet() -> checkpoint_epinet.EpinetCheckpoint:
"""Final-layer epinet with Resnet101 base model on Imagenet."""
return resnet_epinet_lib.make_checkpoint_from_config(
name='imagenet_final_epinet_resnet101',
load_fn=utils.load_from_file(file_name='resnet101_epinet_imagenet'),
config=_make_epinet_config(resnet_101()),
tuned_eval_temperature=0.7,
)
def resnet_152_final_epinet() -> checkpoint_epinet.EpinetCheckpoint:
"""Final-layer epinet with Resnet152 base model on Imagenet."""
return resnet_epinet_lib.make_checkpoint_from_config(
name='imagenet_final_epinet_resnet152',
load_fn=utils.load_from_file(file_name='resnet152_epinet_imagenet'),
config=_make_epinet_config(resnet_152()),
tuned_eval_temperature=0.7,
)
def resnet_200_final_epinet() -> checkpoint_epinet.EpinetCheckpoint:
"""Final-layer epinet with Resnet200 base model on Imagenet."""
return resnet_epinet_lib.make_checkpoint_from_config(
name='imagenet_final_epinet_resnet200',
load_fn=utils.load_from_file(file_name='resnet200_epinet_imagenet'),
config=_make_epinet_config(resnet_200()),
tuned_eval_temperature=0.7,
)
| enn-master | enn/checkpoints/imagenet.py |
# python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
| enn-master | enn/checkpoints/__init__.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility functions for loading from entries."""
import os
import tempfile
from typing import Callable, Tuple
import chex
import dill
from enn import base
from enn import networks
from enn.checkpoints import base as checkpoint_base
from enn.checkpoints import epinet as checkpoint_epinet
import haiku as hk
import jax
import jax.numpy as jnp
import requests
from typing_extensions import Protocol
class EnnSampler(Protocol):
def __call__(self, inputs: chex.Array, key: chex.PRNGKey) -> chex.Array:
"""Takes in a batch of inputs, a key and outputs a number of ENN samples.
Args:
inputs: of shape [batch_size, ...]
key: random key
Returns:
Multiple ENN samples e.g. [num_enn_samples, batch_size, num_classes].
"""
def load_from_file(
file_name: str,
url_path: str = 'https://storage.googleapis.com/dm-enn',
) -> checkpoint_base.ParamsStateLoadFn:
"""Utility wrapper to create a load function from a file."""
def load_fn() -> Tuple[hk.Params, hk.State]:
return init_from_file(file_name, url_path)
return load_fn
def init_from_file(
file_name: str,
url_path: str = 'https://storage.googleapis.com/dm-enn',
) -> Tuple[hk.Params, hk.State]:
"""Returns state and params from a file stored on `url_path`."""
url = f'{url_path}/{file_name}.npzs'
with tempfile.TemporaryDirectory() as tmpdir:
response = requests.get(url, verify=False)
# Make a temporary file for downloading
filepath = os.path.join(tmpdir, f'/tmp/{file_name}.npzs')
open(filepath, 'wb').write(response.content)
# Read data from temporary file
with open(filepath, 'rb') as f:
data = dill.load(f)
# Extracting params and state from data
params, state = data['params'], data['state']
# Map params and state from np.array to jnp.array
state = jax.tree_util.tree_map(jnp.array, state)
params = jax.tree_util.tree_map(jnp.array, params)
return params, state
def average_logits(array: chex.Array) -> chex.Array:
"""Takes average of logits over num_enn_sample."""
assert array.ndim == 3
unused_num_enn_samples, batch_size, num_classes = array.shape
# Convert logits to probabilities and take average
probs = jnp.mean(jax.nn.softmax(array), axis=0)
# Convert average probabilities back to logits
logits = jnp.log(probs)
chex.assert_shape(logits, (batch_size, num_classes))
return logits
def make_enn_sampler_from_checkpoint(
checkpoint: checkpoint_base.EnnCheckpoint,
num_enn_samples: int,
temperature_rescale: bool = False,
) -> EnnSampler:
"""Makes a sampler that samples multiple logits given inputs and key.
Args:
checkpoint: an ENN checkpoint.
num_enn_samples: number of index samples for ENN.
temperature_rescale: whether to apply the tuned evaluation temperature.
Returns:
Callable: inputs, key --> logits of shape [num_enn_samples, batch, class].
"""
enn = checkpoint.enn_ctor()
params, state = checkpoint.load_fn()
if temperature_rescale and checkpoint.tuned_eval_temperature:
temperature = checkpoint.tuned_eval_temperature
else:
temperature = 1.
def sample_logits(inputs: chex.Array, key: chex.PRNGKey,) -> chex.Array:
index_fwd = lambda z: enn.apply(params, state, inputs, z)
indices = jax.vmap(enn.indexer)(jax.random.split(key, num_enn_samples))
enn_out, _ = jax.lax.map(index_fwd, indices)
logits = networks.parse_net_output(enn_out)
chex.assert_shape(logits, [num_enn_samples, None, None])
return logits / temperature
return jax.jit(sample_logits)
def load_checkpoint_as_logit_fn(
checkpoint: checkpoint_base.EnnCheckpoint,
num_enn_samples: int = 1,
temperature_rescale: bool = False,
seed: int = 0,
) -> Callable[[chex.Array], networks.OutputWithPrior]:
"""Loads an ENN as a simple forward function: images --> logits."""
enn_sampler = make_enn_sampler_from_checkpoint(
checkpoint, num_enn_samples, temperature_rescale)
def forward_fn(inputs: chex.Array) -> chex.Array:
logits = enn_sampler(inputs, jax.random.PRNGKey(seed))
ave_logits = average_logits(logits)
# Wrap the output with prior
return networks.OutputWithPrior(ave_logits) # pytype: disable=bad-return-type # numpy-scalars
return jax.jit(forward_fn)
################################################################################
# Optimized Epinet forward functions and samplers
def make_epinet_sampler_from_checkpoint(
epinet_cpt: checkpoint_epinet.EpinetCheckpoint,
num_enn_samples: int = 1000,
temperature_rescale: bool = False,
) -> EnnSampler:
"""Forms a callable that samples multiple logits based on inputs and key."""
base_enn = epinet_cpt.base_cpt.enn_ctor()
if epinet_cpt.base_index is None:
base_index = base_enn.indexer(jax.random.PRNGKey(0))
else:
base_index = epinet_cpt.base_index
epinet = epinet_cpt.epinet_ctor()
# Pull out the parameters
base_params, base_state = epinet_cpt.base_cpt.load_fn()
epi_params, epi_state = epinet_cpt.load_fn()
if temperature_rescale and epinet_cpt.tuned_eval_temperature:
temperature = epinet_cpt.tuned_eval_temperature
else:
temperature = 1.
def sample_logits(inputs: chex.Array, key: chex.PRNGKey,) -> chex.Array:
# Forward the base network once
base_out, unused_base_state = base_enn.apply(
base_params, base_state, inputs, base_index)
hidden = epinet_cpt.parse_hidden(base_out)
base_logits = networks.parse_net_output(base_out) * epinet_cpt.base_scale
# Forward the enn over all the different indices
keys = jax.random.split(key, num_enn_samples)
indices = jax.vmap(epinet.indexer)(keys)
def index_fwd(index: base.Index) -> chex.Array:
return epinet.apply(epi_params, epi_state, inputs, index, hidden) # pytype: disable=bad-return-type # numpy-scalars
enn_out, unused_epi_state = jax.lax.map(index_fwd, indices)
enn_logits = networks.parse_net_output(enn_out)
# Combined logits
combined_logits = jnp.expand_dims(base_logits, 0) + enn_logits
chex.assert_equal_shape([combined_logits, enn_logits])
return combined_logits / temperature
return jax.jit(sample_logits)
def make_epinet_forward_fn(
epinet_cpt: checkpoint_epinet.EpinetCheckpoint,
num_enn_samples: int = 1000,
temperature_rescale: bool = False,
seed: int = 44,
) -> Callable[[chex.Array], chex.Array]:
"""Forms a callable that averages epinet over num_enn_samples indices."""
epinet_sampler = make_epinet_sampler_from_checkpoint(
epinet_cpt, num_enn_samples, temperature_rescale)
key = jax.random.PRNGKey(seed)
def forward_fn(inputs: chex.Array) -> chex.Array:
logits_samples = epinet_sampler(inputs, key)
return average_logits(logits_samples)
return jax.jit(forward_fn)
| enn-master | enn/checkpoints/utils.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Entries on Cifar10."""
from enn import datasets
from enn import networks
from enn.checkpoints import base as checkpoint_base
from enn.checkpoints import epinet as checkpoint_epinet
from enn.checkpoints import utils
from enn.networks.epinet import priors
from enn.networks.epinet import resnet as resnet_epinet_lib
def _make_resnet_ctor(
config: networks.ResNetConfig,
) -> checkpoint_base.EnnCtor:
"""Creates a resnet constructor for appropriate config."""
def enn_ctor() -> networks.EnnArray:
return networks.EnsembleResNetENN(
num_output_classes=datasets.Cifar10().num_classes,
num_ensemble=1,
is_training=False,
enable_double_transpose=False,
config=config,
)
return enn_ctor
def resnet_18() -> checkpoint_base.EnnCheckpoint:
"""Benchmark baseline for ResNet18 on Cifar10."""
return checkpoint_base.EnnCheckpoint(
name='cifar10_resnet18',
load_fn=utils.load_from_file(file_name='resnet18_cifar10'),
enn_ctor=_make_resnet_ctor(networks.CanonicalResNets.RESNET_18.value),
dataset=datasets.Cifar10(),
)
def resnet_32() -> checkpoint_base.EnnCheckpoint:
"""Benchmark baseline for ResNet32 on Cifar10."""
return checkpoint_base.EnnCheckpoint(
name='cifar10_resnet32',
load_fn=utils.load_from_file(file_name='resnet32_cifar10'),
enn_ctor=_make_resnet_ctor(networks.CanonicalResNets.RESNET_32.value),
dataset=datasets.Cifar10(),
)
def resnet_44() -> checkpoint_base.EnnCheckpoint:
"""Benchmark baseline for ResNet44 on Cifar10."""
return checkpoint_base.EnnCheckpoint(
name='cifar10_resnet44',
load_fn=utils.load_from_file(file_name='resnet44_cifar10'),
enn_ctor=_make_resnet_ctor(networks.CanonicalResNets.RESNET_44.value),
dataset=datasets.Cifar10(),
)
def resnet_56() -> checkpoint_base.EnnCheckpoint:
"""Benchmark baseline for ResNet56 on Cifar10."""
return checkpoint_base.EnnCheckpoint(
name='cifar10_resnet56',
load_fn=utils.load_from_file(file_name='resnet56_cifar10'),
enn_ctor=_make_resnet_ctor(networks.CanonicalResNets.RESNET_56.value),
dataset=datasets.Cifar10(),
)
def resnet_110() -> checkpoint_base.EnnCheckpoint:
"""Benchmark baseline for ResNet110 on Cifar10."""
return checkpoint_base.EnnCheckpoint(
name='cifar10_resnet110',
load_fn=utils.load_from_file(file_name='resnet110_cifar10'),
enn_ctor=_make_resnet_ctor(networks.CanonicalResNets.RESNET_110.value),
dataset=datasets.Cifar10(),
)
def _make_epinet_config(
base_checkpoint: checkpoint_base.EnnCheckpoint
) -> resnet_epinet_lib.ResnetFinalEpinetConfig:
"""Creates an epinet config given a base net checkpoint."""
def prior_fn_ctor() -> networks.PriorFn:
return priors.make_cifar_conv_prior(num_ensemble=20, num_classes=10)
return resnet_epinet_lib.ResnetFinalEpinetConfig(
base_checkpoint=base_checkpoint,
index_dim=20,
num_classes=10,
epinet_hiddens=[50,],
epi_prior_scale=4.,
add_prior_scale=0.0,
prior_fn_ctor=prior_fn_ctor,
freeze_base=True,
)
def resnet_18_final_epinet() -> checkpoint_epinet.EpinetCheckpoint:
"""Final-layer epinet with Resnet18 base model on CIFAR10."""
return resnet_epinet_lib.make_checkpoint_from_config(
name='cifar10_final_epinet_resnet18',
load_fn=utils.load_from_file(file_name='resnet18_epinet_cifar10'),
config=_make_epinet_config(resnet_18()),
)
def resnet_32_final_epinet() -> checkpoint_epinet.EpinetCheckpoint:
"""Final-layer epinet with Resnet32 base model on CIFAR10."""
return resnet_epinet_lib.make_checkpoint_from_config(
name='cifar10_final_epinet_resnet32',
load_fn=utils.load_from_file(file_name='resnet32_epinet_cifar10'),
config=_make_epinet_config(resnet_32()),
)
def resnet_44_final_epinet() -> checkpoint_epinet.EpinetCheckpoint:
"""Final-layer epinet with Resnet44 base model on CIFAR10."""
return resnet_epinet_lib.make_checkpoint_from_config(
name='cifar10_final_epinet_resnet44',
load_fn=utils.load_from_file(file_name='resnet44_epinet_cifar10'),
config=_make_epinet_config(resnet_44()),
)
def resnet_56_final_epinet() -> checkpoint_epinet.EpinetCheckpoint:
"""Final-layer epinet with Resnet56 base model on CIFAR10."""
return resnet_epinet_lib.make_checkpoint_from_config(
name='cifar10_final_epinet_resnet56',
load_fn=utils.load_from_file(file_name='resnet56_epinet_cifar10'),
config=_make_epinet_config(resnet_56()),
)
def resnet_110_final_epinet() -> checkpoint_epinet.EpinetCheckpoint:
"""Final-layer epinet with Resnet110 base model on CIFAR10."""
return resnet_epinet_lib.make_checkpoint_from_config(
name='cifar10_final_epinet_resnet110',
load_fn=utils.load_from_file(file_name='resnet110_epinet_cifar10'),
config=_make_epinet_config(resnet_110()),
)
| enn-master | enn/checkpoints/cifar10.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base file for defining how to restore pre-trained ENNs for evaluation."""
import dataclasses
from typing import Callable, Optional, Tuple
from enn import datasets
from enn.networks import base
import haiku as hk
EnnCtor = Callable[[], base.EnnArray]
ParamsStateLoadFn = Callable[[], Tuple[hk.Params, hk.State]]
@dataclasses.dataclass
class EnnCheckpoint:
"""Maintains necessary info to restore an ENN from checkpoint.
This should only restore *one* ENN for *one* set of hyperparameters/data.
"""
name: str # A string to describe this checkpoint entry.
load_fn: ParamsStateLoadFn # Restores params, state for use in enn.
enn_ctor: EnnCtor # ENN model constructor
# Optional attributes used to identify the provenance of these models.
# This is mostly used *internally*, but can be useful outside too.
dataset: Optional[datasets.Dataset] = None # Dataset used in training.
report_cl: Optional[int] = None # Integer id for report CL (encouraged).
# Optionally rescale ENN outputs by 1/ temperature.
tuned_eval_temperature: Optional[float] = None
| enn-master | enn/checkpoints/base.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Functions for 1D regression data."""
import chex
from enn import datasets
from enn import networks
from enn import utils
from enn.supervised import base as supervised_base
import haiku as hk
import jax
import numpy as np
import pandas as pd
import plotnine as gg
def make_regression_df() -> pd.DataFrame:
"""Creates our regression dataset."""
seed = 0
n_data = 10
x = np.concatenate([np.linspace(0, 0.5, n_data), np.linspace(1, 1.5, n_data)])
w = np.random.RandomState(seed).randn(n_data * 2) * 0.1
y = x + np.sin(3 * x) + np.sin(12 * x) + w
return pd.DataFrame({'x': x, 'y': y}).reset_index()
def make_dataset(extra_input_dim: int = 1) -> datasets.ArrayBatchIterator:
"""Factory method to produce an iterator of Batches."""
df = make_regression_df()
data = datasets.ArrayBatch(
x=np.vstack([df['x'].values, np.ones((extra_input_dim, len(df)))]).T,
y=df['y'].values[:, None],
)
chex.assert_shape(data.x, (None, 1 + extra_input_dim))
return utils.make_batch_iterator(data)
def make_plot(experiment: supervised_base.BaseExperiment,
num_sample: int = 20,
extra_input_dim: int = 1) -> gg.ggplot:
"""Generate a regression plot with sampled predictions."""
plot_df = make_plot_data(
experiment, num_sample=num_sample, extra_input_dim=extra_input_dim)
p = (gg.ggplot()
+ gg.aes('x', 'y')
+ gg.geom_point(data=make_regression_df(), size=3, colour='blue')
+ gg.geom_line(gg.aes(group='k'), data=plot_df, alpha=0.5)
)
return p
def make_plot_data(experiment: supervised_base.BaseExperiment,
num_sample: int = 20,
extra_input_dim: int = 1) -> pd.DataFrame:
"""Generate a panda dataframe with sampled predictions."""
preds_x = np.vstack([np.linspace(-1, 2), np.ones((extra_input_dim, 50))]).T
data = []
rng = hk.PRNGSequence(jax.random.PRNGKey(seed=0))
for k in range(num_sample):
net_out = experiment.predict(preds_x, key=next(rng))
preds_y = networks.parse_net_output(net_out)
data.append(pd.DataFrame({'x': preds_x[:, 0], 'y': preds_y[:, 0], 'k': k}))
plot_df = pd.concat(data)
return plot_df
| enn-master | enn/supervised/regression_data.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""An standard experiment operating by SGD."""
import functools
from typing import Dict, NamedTuple, Optional, Tuple
import chex
from enn import base
from enn import datasets
from enn import loggers
from enn import losses
from enn import metrics
from enn import networks
from enn.supervised import base as supervised_base
import haiku as hk
import jax
import optax
class TrainingState(NamedTuple):
params: hk.Params
network_state: hk.State
opt_state: optax.OptState
class Experiment(supervised_base.BaseExperiment):
"""Class to handle supervised training.
Optional eval_datasets which is a collection of datasets to *evaluate*
the loss on every eval_log_freq steps. Note that this evaluation assumes
that the dataset will only be for *one* batch. This means that, if you want
to evaluate on the whole test set, you should make that batch size the size
of the entire test set, and that it is *repeated* iterator, so you can sample
from it multiple times without reaching end of iterator.
"""
def __init__(
self,
enn: networks.EnnArray,
loss_fn: losses.LossFnArray,
optimizer: optax.GradientTransformation,
dataset: datasets.ArrayBatchIterator,
seed: int = 0,
logger: Optional[loggers.Logger] = None,
train_log_freq: int = 1,
eval_datasets: Optional[Dict[str, datasets.ArrayBatchIterator]] = None,
eval_metrics: Optional[Dict[str, metrics.MetricCalculator]] = None,
eval_enn_samples: int = 100,
eval_log_freq: int = 1,
init_x: Optional[chex.Array] = None):
"""Initializes an SGD experiment.
Args:
enn: ENN mapping arrays to any output.
loss_fn: Defines the loss for the ENN on a batch of data.
optimizer: optax optimizer.
dataset: iterator that produces a training batch.
seed: initializes random seed from jax.
logger: optional logger, defaults to acme logger.
train_log_freq: train logging frequency.
eval_datasets: Optional dict of extra datasets to evaluate on. Note that
these evaluate on *one* batch, so should be of appropriate batch size.
eval_metrics: Optional dict of extra metrics that should be evaluated on
the eval_datasets.
eval_enn_samples: number of ENN samples to use in eval_metrics evaluation.
eval_log_freq: evaluation log frequency.
init_x: optional input array used to initialize networks. Default none
works by taking from the training dataset.
"""
self.enn = enn
self.dataset = dataset
self.rng = hk.PRNGSequence(seed)
# Internalize the loss_fn
self._loss = jax.jit(functools.partial(loss_fn, self.enn))
# Internalize the eval datasets and metrics
self._eval_datasets = eval_datasets
self._eval_metrics = eval_metrics
self._eval_log_freq = eval_log_freq
self._eval_enn_samples = eval_enn_samples
self._should_eval = True if eval_metrics and eval_datasets else False
# Forward network at random index
def forward(params: hk.Params,
state: hk.State,
inputs: chex.Array,
key: chex.PRNGKey) -> chex.Array:
index = self.enn.indexer(key)
out, unused_state = self.enn.apply(params, state, inputs, index)
return out
self._forward = jax.jit(forward)
# Batched forward at multiple random indices
self._batch_fwd = jax.vmap(forward, in_axes=[None, None, None, 0])
# Define the SGD step on the loss
def sgd_step(
training_state: TrainingState,
batch: datasets.ArrayBatch,
key: chex.PRNGKey,
) -> Tuple[TrainingState, base.LossMetrics]:
# Calculate the loss, metrics and gradients
loss_output, grads = jax.value_and_grad(self._loss, has_aux=True)(
training_state.params, training_state.network_state, batch, key)
loss, (network_state, loss_metrics) = loss_output
loss_metrics.update({'loss': loss})
updates, new_opt_state = optimizer.update(grads, training_state.opt_state)
new_params = optax.apply_updates(training_state.params, updates)
new_state = TrainingState(
params=new_params,
network_state=network_state,
opt_state=new_opt_state,
)
return new_state, loss_metrics
self._sgd_step = jax.jit(sgd_step)
# Initialize networks
if init_x is None:
batch = next(self.dataset)
init_x = batch.x
index = self.enn.indexer(next(self.rng))
params, network_state = self.enn.init(next(self.rng), init_x, index)
opt_state = optimizer.init(params)
self.state = TrainingState(params, network_state, opt_state)
self.step = 0
self.logger = logger or loggers.make_default_logger(
'experiment', time_delta=0)
self._train_log_freq = train_log_freq
def train(self, num_batches: int):
"""Trains the experiment for specified number of batches.
Note that this training is *stateful*, the experiment keeps track of the
total number of training steps that have occured. This method *also* logs
the training and evaluation metrics periodically.
Args:
num_batches: the number of training batches, and SGD steps, to perform.
"""
for _ in range(num_batches):
self.step += 1
self.state, loss_metrics = self._sgd_step(
self.state, next(self.dataset), next(self.rng))
# Periodically log this performance as dataset=train.
if self.step % self._train_log_freq == 0:
loss_metrics.update(
{'dataset': 'train', 'step': self.step, 'sgd': True})
self.logger.write(loss_metrics)
# Periodically evaluate the other datasets.
if self._should_eval and self.step % self._eval_log_freq == 0:
for name, dataset in self._eval_datasets.items():
# Evaluation happens on a single batch
eval_batch = next(dataset)
eval_metrics = {'dataset': name, 'step': self.step, 'sgd': False}
# Forward the network once, then evaluate all the metrics
net_out = self._batch_fwd(
self.state.params,
self.state.network_state,
eval_batch.x,
jax.random.split(next(self.rng), self._eval_enn_samples),
)
logits = networks.parse_net_output(net_out)
for metric_name, metric_calc in self._eval_metrics.items():
eval_metrics.update({
metric_name: metric_calc(logits, eval_batch.y),
})
# Write all the metrics to the logger
self.logger.write(eval_metrics)
def predict(self, inputs: chex.Array, key: chex.PRNGKey) -> chex.Array:
"""Evaluate the trained model at given inputs."""
return self._forward(
self.state.params,
self.state.network_state,
inputs,
key,
)
def loss(self, batch: datasets.ArrayBatch, key: chex.PRNGKey) -> chex.Array:
"""Evaluate the loss for one batch of data."""
loss, (unused_network_state, unused_metrics) = self._loss(
self.state.params,
self.state.network_state,
batch,
key,
)
return loss
| enn-master | enn/supervised/sgd_experiment.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Functions for 2D classification."""
from typing import Optional, Tuple
from enn import datasets as enn_datasets
from enn import networks
from enn import utils
from enn.supervised import base as supervised_base
import haiku as hk
import jax
import numpy as np
import pandas as pd
import plotnine as gg
from sklearn import datasets
def make_dataset(num_sample: int = 10,
prob_swap: float = 0.,
seed: int = 0) -> enn_datasets.ArrayBatchIterator:
"""Make a 2 moons dataset with num_sample per class and prob_swap label."""
x, y = datasets.make_moons(2 * num_sample, noise=0.1, random_state=seed)
# Swap the labels for data with prob_swap
swaps = np.random.RandomState(seed).binomial(1, prob_swap, len(y))
swap_locs = np.where(swaps)[0]
y[swap_locs] = 1 - y[swap_locs]
return utils.make_batch_iterator(enn_datasets.ArrayBatch(x=x, y=y))
def make_dataframe(
dataset: Optional[enn_datasets.ArrayBatchIterator] = None) -> pd.DataFrame:
dataset = dataset or make_dataset()
batch = next(dataset)
vals = np.hstack([batch.x, batch.y])
return pd.DataFrame(vals, columns=['x1', 'x2', 'label'])
def gen_2d_grid(plot_range: float) -> np.ndarray:
"""Generates a 2D grid for data in a certain_range."""
data = []
x_range = np.linspace(-plot_range, plot_range)
for x1 in x_range:
for x2 in x_range:
data.append((x1, x2))
return np.vstack(data)
def make_plot_data(experiment: supervised_base.BaseExperiment,
num_sample: int) -> pd.DataFrame:
"""Generate a classification plot with sampled predictions."""
preds_x = gen_2d_grid(plot_range=3)
data = []
rng = hk.PRNGSequence(jax.random.PRNGKey(seed=0))
for k in range(num_sample):
net_out = experiment.predict(preds_x, key=next(rng))
logits = networks.parse_net_output(net_out)
preds_y = jax.nn.softmax(logits)
data.append(pd.DataFrame({
'x1': preds_x[:, 0], 'x2': preds_x[:, 1], 'label': preds_y[:, 1],
'sample': k
}))
return pd.concat(data)
def make_sample_plot(plot_df: pd.DataFrame,
data_df: Optional[pd.DataFrame] = None):
"""Make a plot of 2D classification samples over dataset."""
if data_df is None:
data_df = make_dataframe()
p = (gg.ggplot()
+ gg.aes('x1', 'x2', fill='label')
+ gg.geom_tile(data=plot_df, alpha=0.75)
+ gg.scale_fill_continuous(limits=[0, 1])
+ gg.geom_point(data=data_df,
colour='black', size=5, stroke=2)
+ gg.facet_wrap('sample', labeller='label_both')
+ gg.ggtitle('Posterior samples from ENN')
+ gg.theme(figure_size=(20, 14), panel_spacing=0.2))
return p
def make_mean_plot(plot_df: pd.DataFrame,
data_df: Optional[pd.DataFrame] = None):
"""Make a plot of 2D classification of the mean of the samples."""
mean_df = plot_df.groupby(['x1', 'x2'])['label'].mean().reset_index()
if data_df is None:
data_df = make_dataframe()
p = (gg.ggplot()
+ gg.aes('x1', 'x2', fill='label')
+ gg.geom_tile(data=mean_df, alpha=0.75)
+ gg.scale_fill_continuous(limits=[0, 1])
+ gg.geom_point(data=data_df,
colour='black', size=5, stroke=2)
+ gg.ggtitle('Posterior mean from ENN')
+ gg.theme(figure_size=(12, 10), panel_spacing=0.2))
return p
def make_mean_plot_data(
experiment: supervised_base.BaseExperiment
) -> Tuple[pd.DataFrame, pd.DataFrame]:
plot_df = make_plot_data(experiment, num_sample=100)
dataframe = make_dataframe(experiment.dataset)
mean_df = plot_df.groupby(['x1', 'x2'])['label'].mean().reset_index()
return mean_df, dataframe
def colab_plots(experiment: supervised_base.BaseExperiment):
plot_df = make_plot_data(experiment, num_sample=100)
dataframe = make_dataframe(experiment.dataset)
make_mean_plot(plot_df, dataframe).draw()
make_sample_plot(plot_df[plot_df['sample'] < 12],
dataframe).draw()
| enn-master | enn/supervised/classification_data.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for enn.supervised.sgd_experiment."""
import itertools
from absl.testing import absltest
from absl.testing import parameterized
from enn import losses
from enn import networks
from enn import utils
from enn.supervised import sgd_experiment
import jax
import optax
class ExperimentTest(parameterized.TestCase):
@parameterized.parameters(itertools.product([1, 3], [0, 1]))
def test_train_decreases_loss(self, num_outputs: int, seed: int):
"""Train an ensemble ENN on a test dataset and make sure loss decreases."""
num_ensemble = 5
output_sizes = [8, 8, num_outputs]
dataset = utils.make_test_data(100)
enn = networks.MLPEnsembleMatchedPrior(
output_sizes=output_sizes,
num_ensemble=num_ensemble,
dummy_input=next(dataset).x,
)
optimizer = optax.adam(1e-3)
if num_outputs == 1:
single_loss = losses.L2Loss()
elif num_outputs > 1:
single_loss = losses.XentLoss(num_outputs)
else:
raise ValueError(f'num_outputs should be >= 1. It is {num_outputs}.')
loss_fn = losses.average_single_index_loss(
single_loss, num_index_samples=10
)
experiment = sgd_experiment.Experiment(
enn, loss_fn, optimizer, dataset, seed
)
init_key, loss_key = jax.random.split(jax.random.PRNGKey(seed), 2)
initial_loss = experiment.loss(next(dataset), init_key)
experiment.train(50)
final_loss = experiment.loss(next(dataset), loss_key)
self.assertGreater(
initial_loss,
final_loss,
f'final loss {final_loss} is greater than initial loss {initial_loss}',
)
if __name__ == '__main__':
absltest.main()
| enn-master | enn/supervised/sgd_experiment_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Exposing the public methods of the supervised experiments."""
# Base
from enn.supervised.base import BaseExperiment
# Multiloss
from enn.supervised.multiloss_experiment import MultilossExperiment
from enn.supervised.multiloss_experiment import MultilossTrainer
from enn.supervised.multiloss_experiment import TrainingState
# Experiments
from enn.supervised.sgd_experiment import Experiment
# Testing
from enn.supervised.testing import make_test_experiment
from enn.supervised.testing import TestExperiment
| enn-master | enn/supervised/__init__.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for enn.supervised.sgd_experiment."""
import itertools
from absl.testing import absltest
from absl.testing import parameterized
from enn import losses
from enn import networks
from enn import utils
from enn.supervised import multiloss_experiment
import jax
import optax
class ExperimentTest(parameterized.TestCase):
@parameterized.parameters(itertools.product([1, 3], [0, 1, 2]))
def test_train_decreases_loss(self, num_classes: int, seed: int):
"""Train an ensemble ENN on a test dataset and make sure loss decreases."""
# Creat ENN and loss functions
if num_classes == 1:
single_loss = losses.L2Loss()
else:
single_loss = losses.XentLoss(num_classes)
loss_fn = losses.average_single_index_loss(single_loss, 2)
# Create two different training losses
train_dataset = utils.make_test_data(30)
base_trainer = multiloss_experiment.MultilossTrainer(
loss_fn=loss_fn,
dataset=train_dataset,
should_train=lambda _: True,
)
prior_dataset = utils.make_test_data(2) # An example of alternative data
prior_trainer = multiloss_experiment.MultilossTrainer(
loss_fn=loss_fn,
dataset=prior_dataset,
should_train=lambda step: step % 2 == 0,
name='prior'
)
enn = networks.MLPEnsembleMatchedPrior(
output_sizes=[20, 20, num_classes],
num_ensemble=2,
dummy_input=next(train_dataset).x,
)
experiment = multiloss_experiment.MultilossExperiment(
enn=enn,
trainers=[base_trainer, prior_trainer],
optimizer=optax.adam(1e-3),
seed=seed,
)
init_key, loss_key = jax.random.split(jax.random.PRNGKey(seed), 2)
initial_loss = experiment.loss(next(train_dataset), init_key)
experiment.train(50)
final_loss = experiment.loss(next(train_dataset), loss_key)
self.assertGreater(
initial_loss, final_loss,
f'final loss {final_loss} is greater than initial loss {initial_loss}')
if __name__ == '__main__':
absltest.main()
| enn-master | enn/supervised/multiloss_experiment_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Setting up simple experiments for tests."""
from typing import Callable, NamedTuple
import chex
from enn import losses
from enn import networks
from enn import utils
from enn.supervised import sgd_experiment
import optax
class TestExperiment(NamedTuple):
num_outputs: int
experiment_ctor: Callable[[networks.EnnArray], sgd_experiment.Experiment]
dummy_input: chex.Array
def make_test_experiment(regression: bool) -> TestExperiment:
"""Utility function to set up a supervised experiment for testing."""
dataset = utils.make_test_data(20)
optimizer = optax.adam(1e-3)
if regression:
num_outputs = 1
single_loss = losses.L2Loss()
else:
num_outputs = 2
single_loss = losses.XentLoss(num_outputs)
loss_fn = losses.average_single_index_loss(
single_loss, num_index_samples=1)
return TestExperiment(
num_outputs=num_outputs,
experiment_ctor=lambda enn: sgd_experiment.Experiment( # pylint:disable=g-long-lambda
enn, loss_fn, optimizer, dataset),
dummy_input=next(dataset).x,
)
| enn-master | enn/supervised/testing.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""An SGD experiment with facility for multiple losses."""
import dataclasses
import functools
from typing import Callable, Dict, NamedTuple, Optional, Sequence, Tuple
import chex
from enn import base
from enn import datasets
from enn import loggers
from enn import losses
from enn import networks
from enn.supervised import base as supervised_base
import haiku as hk
import jax
import optax
class TrainingState(NamedTuple):
params: hk.Params
network_state: hk.State
opt_state: optax.OptState
@dataclasses.dataclass
class MultilossTrainer:
"""Specify the training schedule for a given loss/dataset.
For step=1,2,...:
If should_train(step):
Apply one step of loss_fn on a batch = next(dataset).
"""
loss_fn: losses.LossFnArray # Loss function
dataset: datasets.ArrayBatchIterator # Dataset to pull batch from
should_train: Callable[[int], bool] = lambda _: True # Which steps to train
name: str = 'loss' # Name used for logging
# Type definition for loss function after internalizing the ENN
PureLoss = Callable[
[hk.Params, hk.State, datasets.ArrayBatch, chex.PRNGKey],
base.LossOutput]
class MultilossExperiment(supervised_base.BaseExperiment):
"""Class to handle supervised training with multiple losses.
At each step=1,2,...:
For t in trainers:
If t.should_train(step):
Apply one step of t.loss_fn on batch = next(t.dataset)
This can be useful for settings like "prior_loss" or transfer learning.
Optional eval_datasets which is a collection of datasets to *evaluate*
the loss on every eval_log_freq steps.
"""
def __init__(
self,
enn: networks.EnnArray,
trainers: Sequence[MultilossTrainer],
optimizer: optax.GradientTransformation,
seed: int = 0,
logger: Optional[loggers.Logger] = None,
train_log_freq: int = 1,
eval_datasets: Optional[Dict[str, datasets.ArrayBatchIterator]] = None,
eval_log_freq: int = 1,
):
self.enn = enn
self.pure_trainers = _purify_trainers(trainers, enn)
self.rng = hk.PRNGSequence(seed)
# Internalize the eval datasets
self._eval_datasets = eval_datasets
self._eval_log_freq = eval_log_freq
# Forward network at random index
def forward(params: hk.Params, state: hk.State, inputs: chex.Array,
key: chex.PRNGKey) -> chex.Array:
index = self.enn.indexer(key)
out, _ = self.enn.apply(params, state, inputs, index)
return out
self._forward = jax.jit(forward)
# Define the SGD step on the loss
def sgd_step(
pure_loss: PureLoss,
state: TrainingState,
batch: datasets.ArrayBatch,
key: chex.PRNGKey,
) -> Tuple[TrainingState, base.LossMetrics]:
# Calculate the loss, metrics and gradients
loss_output, grads = jax.value_and_grad(pure_loss, has_aux=True)(
state.params, state.network_state, batch, key)
loss, (network_state, metrics) = loss_output
metrics.update({'loss': loss})
updates, new_opt_state = optimizer.update(grads, state.opt_state)
new_params = optax.apply_updates(state.params, updates)
new_state = TrainingState(
params=new_params,
network_state=network_state,
opt_state=new_opt_state,
)
return new_state, metrics
self._sgd_step = jax.jit(sgd_step, static_argnums=0)
# Initialize networks
batch = next(self.pure_trainers[0].dataset)
index = self.enn.indexer(next(self.rng))
params, network_state = self.enn.init(next(self.rng), batch.x, index)
opt_state = optimizer.init(params)
self.state = TrainingState(params, network_state, opt_state)
self.step = 0
self.logger = logger or loggers.make_default_logger(
'experiment', time_delta=0)
self._train_log_freq = train_log_freq
def train(self, num_batches: int):
"""Train the ENN for num_batches."""
for _ in range(num_batches):
self.step += 1
for t in self.pure_trainers:
if t.should_train(self.step):
self.state, loss_metrics = self._sgd_step(
t.pure_loss, self.state, next(t.dataset), next(self.rng))
# Periodically log this performance as dataset=train.
if self.step % self._train_log_freq == 0:
loss_metrics.update({
'dataset': 'train',
'step': self.step,
'sgd': True,
'trainer': t.name,
})
self.logger.write(loss_metrics)
# Periodically evaluate the other datasets.
if self._eval_datasets and self.step % self._eval_log_freq == 0:
for name, dataset in self._eval_datasets.items():
for t in self.pure_trainers:
loss, (unused_network_state, metrics) = t.pure_loss(
self.state.params,
self.state.network_state,
next(dataset),
next(self.rng),
)
metrics.update({
'dataset': name,
'step': self.step,
'sgd': False,
'loss': loss,
'trainer': t.name,
})
self.logger.write(metrics)
def predict(self, inputs: chex.Array,
key: chex.PRNGKey) -> chex.Array:
"""Evaluate the trained model at given inputs."""
return self._forward(
self.state.params,
self.state.network_state,
inputs,
key,
)
def loss(self, batch: datasets.ArrayBatch,
key: chex.PRNGKey) -> chex.Array:
"""Evaluate the first loss for one batch of data."""
pure_loss = self.pure_trainers[0].pure_loss
loss, _ = pure_loss(self.state.params, self.state.network_state, batch, key)
return loss
@dataclasses.dataclass
class _PureTrainer:
"""An intermediate representation of MultilossTrainer with pure loss."""
pure_loss: PureLoss # Pure loss function after internalizing enn
dataset: datasets.ArrayBatchIterator # Dataset to pull batch from
should_train: Callable[[int], bool] # Whether should train on step
name: str = 'loss' # Name used for logging
def _purify_trainers(
trainers: Sequence[MultilossTrainer],
enn: networks.EnnArray) -> Sequence[_PureTrainer]:
"""Converts MultilossTrainer to have *pure* loss function including enn."""
pure_trainers = []
for t in trainers:
pure_trainer = _PureTrainer(
pure_loss=jax.jit(functools.partial(t.loss_fn, enn)),
dataset=t.dataset,
should_train=t.should_train,
name=t.name,
)
pure_trainers.append(pure_trainer)
return tuple(pure_trainers)
| enn-master | enn/supervised/multiloss_experiment.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base classes for a 'standard' supervised experiment."""
import abc
import dataclasses
import chex
from enn.datasets import base as ds_base
@dataclasses.dataclass
class BaseExperiment(abc.ABC):
"""Base interface for experiment classes."""
dataset: ds_base.ArrayBatchIterator
@abc.abstractmethod
def train(self, num_batches: int):
"""Train the ENN for num_batches."""
@abc.abstractmethod
def predict(self, inputs: chex.Array,
key: chex.PRNGKey) -> chex.Array:
"""Evaluate the trained model at given inputs."""
@abc.abstractmethod
def loss(self, batch: ds_base.ArrayBatch,
key: chex.PRNGKey) -> chex.Array:
"""Calculate the loss at a given batch."""
| enn-master | enn/supervised/base.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Modules for forwarding ENNs multiple times."""
import chex
from enn import base
from enn.networks import utils
import haiku as hk
import jax
import typing_extensions
# TODO(author2): clarify the expected shapes/conventions for EnnBatchFwd
class EnnBatchFwd(typing_extensions.Protocol[base.Input]):
"""Creates a sampler for *multiple* logits samples from ENN.
In most of our code applications this should output something with shape:
[num_enn_samples, num_batch, num_class]
However, we are not currently careful/clear about shape expectations, and
intend to improve on this.
"""
def __call__(
self,
params: hk.Params,
state: hk.State,
inputs: base.Input,
) -> chex.Array:
"""Forwards the ENN at given inputs for *multiple* index samples."""
def make_batch_fwd(
enn: base.EpistemicNetwork[base.Input, chex.Array],
num_enn_samples: int = 1000,
seed: int = 66,
) -> EnnBatchFwd[base.Input]:
"""Forwards ENN for num_enn_samples sample logits."""
keys = jax.random.split(jax.random.PRNGKey(seed), num_enn_samples)
def forward(params: hk.Params, state: hk.State, x: base.Input) -> chex.Array:
batch_apply = jax.vmap(enn.apply, in_axes=[None, None, None, 0])
indices = jax.vmap(enn.indexer)(keys)
net_out, unused_state = batch_apply(params, state, x, indices)
return utils.parse_net_output(net_out)
return jax.jit(forward)
| enn-master | enn/networks/forwarders.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Gaussian ENN."""
from typing import List
from absl.testing import absltest
from absl.testing import parameterized
from enn import supervised
from enn.networks import gaussian_enn
class GaussianEnnTest(parameterized.TestCase):
@parameterized.parameters([
([], 1., True), ([10, 10], 0.1, True),
([], 1., False), ([10, 10], 0.1, False),
])
def test_ten_batches(self,
hiddens: List[int],
init_scale: float,
regression: bool):
"""Simple test to run just 10 batches."""
test_experiment = supervised.make_test_experiment(regression)
enn = gaussian_enn.GaussianNoiseMLP(
output_sizes=hiddens+[test_experiment.num_outputs],
init_scale=init_scale,
)
experiment = test_experiment.experiment_ctor(enn)
experiment.train(10)
if __name__ == '__main__':
absltest.main()
| enn-master | enn/networks/gaussian_enn_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Network definitions for VGG."""
from typing import Any, Sequence, Tuple
from absl import logging
import chex
from enn.networks import base as networks_base
from enn.networks import ensembles
import haiku as hk
import jax
import jax.numpy as jnp
BatchNormIndex = Tuple[Any, bool]
_VGG_CHANNELS = (64, 64, 128, 128, 128, 256, 256, 256, 512, 512, 512)
_VGG_STRIDES = (1, 1, 2, 1, 1, 2, 1, 1, 2, 1, 1)
# PIPELINE VGG (VGG - max_pooling + batch_norm)
class VGG(hk.Module):
"""VGG Network with batchnorm and without maxpool."""
def __init__(self,
num_output_classes: int,
vgg_output_channels: Sequence[int] = _VGG_CHANNELS,
vgg_strides: Sequence[int] = _VGG_STRIDES,
name=None,):
super().__init__(name=name)
logging.info('Initializing a VGG-Net.')
self._output_channels = vgg_output_channels
self._strides = vgg_strides
num_channels = len(self._output_channels)
assert len(self._strides) == num_channels
self._kernel_shapes = [[3, 3]] * num_channels
self._conv_modules = [
hk.Conv2D( # pylint: disable=g-complex-comprehension
output_channels=self._output_channels[i],
kernel_shape=self._kernel_shapes[i],
stride=self._strides[i],
name=f'conv_2d_{i}') for i in range(num_channels)
]
# TODO(author2): Find a more robust way to exclude batchnorm params.
self._bn_modules = [
hk.BatchNorm( # pylint: disable=g-complex-comprehension
create_offset=True,
create_scale=False,
decay_rate=0.999,
name=f'batchnorm_{i}') for i in range(num_channels)
]
self._logits_module = hk.Linear(num_output_classes, name='logits')
def __call__(self,
inputs: chex.Array,
is_training: bool = True,
test_local_stats: bool = False) -> chex.Array:
net = inputs
for conv_layer, bn_layer in zip(self._conv_modules, self._bn_modules):
net = conv_layer(net)
net = bn_layer(
net, is_training=is_training, test_local_stats=test_local_stats)
net = jax.nn.relu(net)
# Avg pool along axis 1 and 2
net = jnp.mean(net, axis=[1, 2], keepdims=False, dtype=jnp.float64)
return self._logits_module(net)
class EnsembleVGGENN(networks_base.EnnArray):
"""Ensemble of VGG Networks created using einsum ensemble."""
def __init__(self,
num_output_classes: int,
num_ensemble: int = 1,
is_training: bool = True,
test_local_stats: bool = False):
def net_fn(x: chex.Array) -> chex.Array:
return VGG(num_output_classes)(
x, is_training=is_training, test_local_stats=test_local_stats)
transformed = hk.without_apply_rng(hk.transform_with_state(net_fn))
enn = ensembles.EnsembleWithState(transformed, num_ensemble)
super().__init__(enn.apply, enn.init, enn.indexer)
| enn-master | enn/networks/vgg.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for enn.networks.categorical_ensembles."""
from typing import List
from absl.testing import absltest
from absl.testing import parameterized
import chex
from enn import losses
from enn import supervised
from enn import utils
from enn.networks import categorical_ensembles
import jax
import numpy as np
import optax
class CategoricalEnsemblesTest(parameterized.TestCase):
@parameterized.parameters([
[[20], np.linspace(-5, 5, 10), 3],
[[], np.linspace(-1, 1, 10), 1],
])
def test_categorical_ensemble(self, hiddens: List[int],
atoms: chex.Array, num_ensemble: int):
"""Running with the naive L2 loss."""
# The combine_functions_choice_via_index introduces an additional dimension
# raises error when added to the net output. We set this to allow for now.
jax.config.update('jax_numpy_rank_promotion', 'allow')
test_experiment = supervised.make_test_experiment(regression=True)
enn = categorical_ensembles.CatMLPEnsembleGpPrior(
output_sizes=hiddens + [1],
atoms=atoms,
input_dim=test_experiment.dummy_input.shape[1],
num_ensemble=num_ensemble,
num_feat=10,
)
experiment = test_experiment.experiment_ctor(enn)
experiment.train(10)
def test_categorical_2hot_regression(self):
"""Running with the categorical regression loss."""
# The combine_functions_choice_via_index introduces an additional dimension
# raises error when added to the net output. We set this to allow for now.
jax.config.update('jax_numpy_rank_promotion', 'allow')
dataset = utils.make_test_data()
enn = categorical_ensembles.CatMLPEnsembleMlpPrior(
output_sizes=[50, 50, 1],
atoms=np.linspace(-1, 1, 10),
dummy_input=next(dataset).x,
num_ensemble=3,
)
single_loss = losses.Cat2HotRegression()
loss_fn = losses.average_single_index_loss(single_loss, 1)
experiment = supervised.Experiment(enn, loss_fn, optax.adam(1e-3), dataset)
experiment.train(10)
if __name__ == '__main__':
absltest.main()
| enn-master | enn/networks/categorical_ensembles_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for ENN Networks."""
from typing import List
from absl.testing import absltest
from absl.testing import parameterized
from enn import supervised
from enn.networks import ensembles
import haiku as hk
import jax
import numpy as np
class EnsemblesENNTest(parameterized.TestCase):
@parameterized.product(
input_dim=[1, 2],
output_dim=[1, 3],
num_ensemble=[1, 5],
)
def test_ensemble_enn(
self,
input_dim: int,
output_dim: int,
num_ensemble: int,
):
"""Simple test to run just 10 batches."""
seed = 0
rng = hk.PRNGSequence(seed)
def model(inputs):
return hk.nets.MLP([output_dim])(inputs)
model = hk.without_apply_rng(hk.transform(model))
enn = ensembles.Ensemble(model, num_ensemble)
params = enn.init(next(rng), np.zeros((input_dim,)), 0)
self.assertEqual(params['mlp/~/linear_0']['b'].shape,
(num_ensemble, output_dim))
self.assertEqual(params['mlp/~/linear_0']['w'].shape,
(num_ensemble, input_dim, output_dim))
# overwrite random params
params = jax.tree_util.tree_map(lambda p: np.ones_like(p), params) # pylint: disable=[unnecessary-lambda]
dummy_inputs = np.ones(shape=(1, input_dim), dtype=np.float32)
expected_output = (1 + input_dim) * np.ones(shape=(1, output_dim),
dtype=np.float32)
for index in range(num_ensemble):
output = enn.apply(params, dummy_inputs, index)
self.assertTrue(
np.all(output - expected_output == 0),
f'Output: {output} \n is not equal to expected: {expected_output}')
class EnsemblesWithStateENNTest(parameterized.TestCase):
@parameterized.product(
input_dim=[1, 2],
output_dim=[1, 3],
num_ensemble=[1, 5],
)
def test_ensemble_with_state_enn(
self,
input_dim: int,
output_dim: int,
num_ensemble: int,
): # pylint:disable=[g-doc-args]
"""Test the shape and update of the model state.
Tests that the parameters and states of the model has the right shape.
It also tests that the output is calculated correctly and the counter in
the state is incremented.
"""
seed = 0
state_shape = (5, 7)
rng = hk.PRNGSequence(seed)
def model(inputs):
counter = hk.get_state(
'counter', shape=state_shape, dtype=np.int32, init=np.zeros)
hk.set_state('counter', counter + 1)
out = hk.nets.MLP([output_dim])(inputs)
return out
model = hk.without_apply_rng(hk.transform_with_state(model))
enn = ensembles.EnsembleWithState(model, num_ensemble)
params, state = enn.init(next(rng), np.zeros((input_dim,)), 0)
self.assertEqual(params['mlp/~/linear_0']['b'].shape,
(num_ensemble, output_dim))
self.assertEqual(params['mlp/~/linear_0']['w'].shape,
(num_ensemble, input_dim, output_dim))
self.assertEqual(state['~']['counter'].shape, (num_ensemble,) + state_shape)
# overwrite random params
params = jax.tree_util.tree_map(lambda p: np.ones_like(p), params) # pylint: disable=[unnecessary-lambda]
dummy_inputs = np.ones(shape=(1, input_dim), dtype=np.float32)
expected_output = (1 + input_dim) * np.ones(shape=(1, output_dim),
dtype=np.float32)
expected_state = np.zeros((num_ensemble,) + state_shape, dtype=np.int32)
for index in range(num_ensemble):
output, state = enn.apply(params, state, dummy_inputs, index)
expected_state[index, ...] = np.ones(state_shape, dtype=np.int32)
self.assertTrue(
np.all(output - expected_output == 0),
f'Output: {output} \n is not equal to expected: {expected_output}')
self.assertTrue(
np.all(state['~']['counter'] == expected_state),
f'State: {state} \n is not equal to expected: {expected_state}')
class MLPEnsembleTest(parameterized.TestCase):
@parameterized.parameters([
([], 1, True), ([10, 10], 5, True), ([], 1, False), ([10, 10], 5, False),
])
def test_ensemble(self,
hiddens: List[int],
num_ensemble: int,
regression: bool):
"""Simple test to run just 10 batches."""
test_experiment = supervised.make_test_experiment(regression)
enn = ensembles.MLPEnsembleMatchedPrior(
output_sizes=hiddens+[test_experiment.num_outputs],
dummy_input=test_experiment.dummy_input,
num_ensemble=num_ensemble,
prior_scale=1.,
)
experiment = test_experiment.experiment_ctor(enn)
experiment.train(10)
if __name__ == '__main__':
absltest.main()
| enn-master | enn/networks/ensembles_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for enn.networks.indexers."""
from absl.testing import absltest
from absl.testing import parameterized
from enn import base
from enn.networks import indexers
import jax
import numpy as np
class IndexersTest(parameterized.TestCase):
@parameterized.parameters([
[indexers.GaussianWithUnitIndexer(10)],
[indexers.EnsembleIndexer(5)],
[indexers.PrngIndexer()],
[indexers.ScaledGaussianIndexer(7)],
[indexers.DirichletIndexer(np.ones(3))],
])
def test_index_forward(self, indexer: base.EpistemicIndexer):
key = jax.random.PRNGKey(777)
jit_indexer = jax.jit(lambda x: indexer(x)) # pylint: disable=unnecessary-lambda
if isinstance(indexer, indexers.PrngIndexer):
assert np.all(indexer(key) == jit_indexer(key))
else:
assert np.allclose(indexer(key), jit_indexer(key))
if __name__ == '__main__':
absltest.main()
| enn-master | enn/networks/indexers_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for ENN Hypermodels."""
from typing import List
from absl.testing import absltest
from absl.testing import parameterized
from enn import supervised
from enn.networks import bbb
class BBBTest(parameterized.TestCase):
@parameterized.parameters([
([3, 7], True),
([3, 7], False),
])
def test_bbb(
self, model_hiddens: List[int], regression: bool):
"""Simple test to run just 10 batches."""
test_experiment = supervised.make_test_experiment(regression)
enn = bbb.make_bbb_enn(
base_output_sizes=model_hiddens + [test_experiment.num_outputs],
dummy_input=test_experiment.dummy_input)
experiment = test_experiment.experiment_ctor(enn)
experiment.train(10)
if __name__ == '__main__':
absltest.main()
| enn-master | enn/networks/bbb_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementing some types of ENN ensembles in JAX."""
from typing import Callable, Optional, Sequence, Tuple
import chex
from enn.networks import base as networks_base
from enn.networks import indexers
from enn.networks import priors
import haiku as hk
import jax
import jax.numpy as jnp
class Ensemble(networks_base.EnnNoState):
"""Ensemble ENN that uses a dot product in param space.
Repeats parameters by an additional *ensemble* dimension in axis=0.
Applying the parameters selects one single component of parameters per index.
"""
def __init__(self,
model: hk.Transformed,
num_ensemble: int):
self.model = model
self.num_ensemble = num_ensemble
def init(key: chex.PRNGKey, inputs: chex.Array, index: int) -> hk.Params:
del index # Unused
batched_init = jax.vmap(model.init, in_axes=[0, None], out_axes=0)
return batched_init(jax.random.split(key, num_ensemble), inputs)
def apply(params: hk.Params, inputs: chex.Array,
index: int) -> networks_base.Output:
one_hot_index = jax.nn.one_hot(index, num_ensemble)
param_selector = lambda p: jnp.einsum('i...,i->...', p, one_hot_index)
sub_params = jax.tree_util.tree_map(param_selector, params)
return model.apply(sub_params, inputs)
indexer = indexers.EnsembleIndexer(num_ensemble)
super().__init__(apply, init, indexer)
class EnsembleWithState(networks_base.EnnArray):
"""Ensemble ENN that uses a dot product in param space.
Per Ensemble but with added state variable.
"""
def __init__(self,
model: hk.TransformedWithState,
num_ensemble: int):
self.model = model
self.num_ensemble = num_ensemble
def init(key: chex.PRNGKey,
inputs: chex.Array,
index: int) -> Tuple[hk.Params, hk.State]:
del index # Unused
batched_init = jax.vmap(model.init, in_axes=[0, None], out_axes=0)
params, states = batched_init(jax.random.split(key, num_ensemble), inputs)
return params, states
def apply(params: hk.Params, states: hk.State, inputs: chex.Array,
index: int) -> Tuple[networks_base.Output, hk.State]:
particle_selector = lambda x: jnp.take(x, index, axis=0)
sub_params = jax.tree_util.tree_map(particle_selector, params)
sub_states = jax.tree_util.tree_map(particle_selector, states)
out, new_sub_states = model.apply(sub_params, sub_states, inputs)
new_states = jax.tree_util.tree_map(
lambda s, nss: s.at[index, ...].set(nss), states, new_sub_states)
return out, new_states
indexer = indexers.EnsembleIndexer(num_ensemble)
super().__init__(apply, init, indexer)
def make_mlp_ensemble_prior_fns(
output_sizes: Sequence[int],
dummy_input: chex.Array,
num_ensemble: int,
seed: int = 0,
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None
) -> Sequence[Callable[[chex.Array], chex.Array]]:
"""Factory method for creating ensemble of prior functions."""
rng = hk.PRNGSequence(seed)
def net_fn(x):
layers = [
hk.Flatten(),
hk.nets.MLP(output_sizes, w_init=w_init, b_init=b_init)
]
return hk.Sequential(layers)(x)
transformed = hk.without_apply_rng(hk.transform(net_fn))
prior_fns = []
for _ in range(num_ensemble):
params = transformed.init(next(rng), dummy_input)
prior_fns.append(lambda x, params=params: transformed.apply(params, x))
return prior_fns
def combine_functions_choice_via_index(
prior_fns: Sequence[Callable[[chex.Array], chex.Array]],
) -> priors.PriorFn:
"""Combines functions to a PriorFn(x, z), selecting fn by ensemble index."""
return lambda x, z: jax.lax.switch(z, prior_fns, x)
def combine_functions_linear_in_index(
prior_fns: Sequence[Callable[[chex.Array], chex.Array]],
) -> priors.PriorFn:
"""Combines functions to a PriorFn(x, z), linear in epistemic index."""
def enn_fn(x, z):
prior_outputs = jnp.array([prior_fn(x) for prior_fn in prior_fns])
num_index, unused_batch_size, unused_num_classes = prior_outputs.shape
chex.assert_shape(z, (num_index,))
return jnp.einsum('nbo,n->bo', prior_outputs, z)
return jax.jit(enn_fn)
def make_random_gp_ensemble_prior_fns(
input_dim: int,
output_dim: int,
num_feat: int,
gamma: priors.GpGamma,
num_ensemble: int,
seed: int = 0,
) -> Sequence[Callable[[chex.Array], chex.Array]]:
"""Factory method for creating an ensemble of random GPs."""
rng = hk.PRNGSequence(seed)
prior_fns = []
for _ in range(num_ensemble):
prior_fns.append(priors.make_random_feat_gp(
input_dim, output_dim, num_feat, next(rng), gamma, scale=1.))
return prior_fns
class MLPEnsembleMatchedPrior(networks_base.EnnArray):
"""Ensemble of MLPs with matched prior functions."""
def __init__(self,
output_sizes: Sequence[int],
dummy_input: chex.Array,
num_ensemble: int,
prior_scale: float = 1.,
seed: int = 0,
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None):
"""Ensemble of MLPs with matched prior functions."""
mlp_priors = make_mlp_ensemble_prior_fns(
output_sizes, dummy_input, num_ensemble, seed)
def net_fn(x: chex.Array) -> chex.Array:
x = hk.Flatten()(x)
return hk.nets.MLP(output_sizes, w_init, b_init)(x)
transformed = hk.without_apply_rng(hk.transform_with_state(net_fn))
ensemble = EnsembleWithState(transformed, num_ensemble)
enn = priors.EnnWithAdditivePrior(
enn=ensemble,
prior_fn=combine_functions_choice_via_index(mlp_priors),
prior_scale=prior_scale,
)
super().__init__(enn.apply, enn.init, enn.indexer)
| enn-master | enn/networks/ensembles.py |
# pylint: disable=g-bad-file-header
# Copyright 2023 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Factory methods to combine multiple ENNs."""
import typing as tp
import chex
from enn import base as enn_base
from enn.networks import base as networks_base
from enn.networks import forwarders
from enn.networks import utils
import haiku as hk
import jax
HeadInput = tp.TypeVar('HeadInput') # Inputs to head enn
BaseOutput = tp.TypeVar('BaseOutput') # Outputs of base enn
def combine_naive_enn(
head_enn: enn_base.EpistemicNetwork[HeadInput, networks_base.Output],
base_enn: enn_base.EpistemicNetwork[enn_base.Input, BaseOutput],
parse_base_network: tp.Callable[
[BaseOutput], HeadInput
] = utils.parse_net_output,
) -> enn_base.EpistemicNetwork[enn_base.Input, networks_base.Output]:
"""Combines a base enn and a head enn naively without optimization.
Note: It is assumed that the base enn has identity indexer.
Args:
head_enn: An EnnArray which is applied to the output of the base_enn.
base_enn: An Enn with generic inputs which takes the inputs and returns the
input for the head_enn.
parse_base_network: A callable that parses the desired output from base_enn
to feed into head_enn.
Returns:
A combined Enn.
"""
def apply(
params: hk.Params,
state: hk.State,
inputs: enn_base.Input,
index: enn_base.Index,
) -> tp.Tuple[networks_base.Output, hk.State]:
"""Applies the base enn and head enn."""
# Forward the base enn
# Since indexer is PrngIndexer, index is actually a random key.
key = index
base_out, base_state = base_enn.apply(params, state, inputs, key)
base_out = parse_base_network(base_out)
# Forward the head enn
head_index = head_enn.indexer(key)
head_out, head_state = head_enn.apply(
params, state, base_out, head_index)
# Combine the state for the base and the head enns.
state = {**base_state, **head_state}
return head_out, state
def init(key: chex.PRNGKey,
inputs: enn_base.Input,
index: enn_base.Index) -> tp.Tuple[hk.Params, hk.State]:
"""Initializes the base enn and the head enn."""
base_key, head_enn_key = jax.random.split(key)
# initialize the base enn. Note that these params, state are replaced by the
# params, state of the pre-trained base in the experiment.
base_params, base_state = base_enn.init(base_key, inputs, index)
# Forward the base enn to get output and use it as a dummy input to
# initialize the head enn.
base_out, unused_base_state = base_enn.apply(
base_params, base_state, inputs, index)
base_out = parse_base_network(base_out)
# initialize the head enn.
head_index = head_enn.indexer(head_enn_key)
head_params, head_state = head_enn.init(head_enn_key, base_out, head_index)
# Combine the params, state for the base and the head enns.
params = {**head_params, **base_params}
state = {**head_state, **base_state}
return (params, state)
return enn_base.EpistemicNetwork[enn_base.Input, networks_base.Output](
apply, init, base_enn.indexer
)
def make_optimized_forward(
head_enn: enn_base.EpistemicNetwork[HeadInput, networks_base.Output],
base_enn: enn_base.EpistemicNetwork[enn_base.Input, BaseOutput],
num_enn_samples: int,
key: chex.PRNGKey,
parse_base_network: tp.Callable[
[BaseOutput], HeadInput
] = utils.parse_net_output,
) -> forwarders.EnnBatchFwd[enn_base.Input]:
"""Combines base enn and head enn for multiple ENN samples.
Note: It is assumed that the base enn has identity indexer.
Args:
head_enn: An EnnArray which is applied to the output of the base_enn.
base_enn: An Enn with generic inputs which takes the inputs and returns the
input for the head_enn.
num_enn_samples: Number of enn samples to return for each input.
key: A random key.
parse_base_network: A callable that parses the desired output from base_enn
to feed into head_enn.
Returns:
An optimized forward function of combined Enns.
"""
enn_keys = jax.random.split(key, num_enn_samples)
def enn_batch_fwd(params: hk.Params,
state: hk.State,
x: enn_base.Input) -> chex.Array:
base_out, _ = base_enn.apply(params, state, x, key)
base_out = parse_base_network(base_out)
def sample_logits(sub_key: chex.PRNGKey) -> chex.Array:
index = head_enn.indexer(sub_key)
out, _ = head_enn.apply(params, state, base_out, index)
return utils.parse_net_output(out)
return jax.vmap(sample_logits)(enn_keys)
return jax.jit(enn_batch_fwd)
| enn-master | enn/networks/combiners.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementing Bayes-by-backprop (BBB) in JAX."""
from typing import Sequence
import chex
from enn.networks import base as networks_base
from enn.networks import hypermodels
from enn.networks import indexers
from enn.networks import utils as network_utils
import haiku as hk
import jax
import jax.numpy as jnp
# TODO(author2): Current implementation will produce a bbb based on a linear
# diagonal hypermodel that can *only* work for a single index at a time.
# However, note that jax.vmap means this can easily be converted into a form
# that works with batched index.
def make_bbb_enn(
base_output_sizes: Sequence[int],
dummy_input: chex.Array,
temperature: float = 1.) -> networks_base.EnnArray:
"""Makes a Bayes-by-backprop (BBB) aganet."""
def make_transformed_base(output_sizes: Sequence[int]) -> hk.Transformed:
"""Factory method for creating base net function."""
def net_fn(x):
net_out = hk.Sequential([hk.Flatten(), hk.nets.MLP(output_sizes)])(x)
return net_out / temperature
transformed = hk.without_apply_rng(hk.transform(net_fn))
return transformed
transformed_base = make_transformed_base(base_output_sizes)
base_params = transformed_base.init(jax.random.PRNGKey(0), dummy_input)
num_base_params = sum(
jax.tree_leaves(jax.tree_util.tree_map(jnp.size, base_params)))
# VI loss computed by vi_losses.get_linear_hypermodel_elbo_fn assumes the
# index to be Gaussian with the same variance as the latent prior variance.
indexer = indexers.GaussianIndexer(index_dim=num_base_params)
enn = network_utils.epistemic_network_from_module(
enn_ctor=hypermodels.hypermodel_module(
transformed_base,
dummy_input,
hyper_torso=lambda x: x,
diagonal_linear_hyper=True,
return_generated_params=True,
scale=False),
indexer=indexer,
)
return enn
| enn-master | enn/networks/bbb.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for ENN Networks."""
from typing import List
from absl.testing import absltest
from absl.testing import parameterized
from enn import supervised
from enn.networks import index_mlp
class NetworkTest(parameterized.TestCase):
@parameterized.parameters([
([], True), ([10, 10], True), ([], False), ([10, 10], False)])
def test_index_mlp(self, hiddens: List[int], regression: bool):
"""Simple test to run just 10 batches."""
test_experiment = supervised.make_test_experiment(regression)
enn = index_mlp.IndexMLPEnn(
output_sizes=hiddens+[test_experiment.num_outputs],
index_dim=10,
)
experiment = test_experiment.experiment_ctor(enn)
experiment.train(10)
if __name__ == '__main__':
absltest.main()
| enn-master | enn/networks/index_mlp_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementing some ensembles with categorical outputs.
Next step is to integrate more with the rest of the ENN code.
"""
from typing import Sequence
import chex
from enn.networks import base as networks_base
from enn.networks import ensembles
from enn.networks import priors
import haiku as hk
import jax
import jax.numpy as jnp
class CatOutputWithPrior(networks_base.OutputWithPrior):
"""Categorical outputs with a real-valued prior."""
@property
def preds(self) -> chex.Array:
train = jnp.sum(jax.nn.softmax(self.train) * self.extra['atoms'], axis=-1)
return train + jax.lax.stop_gradient(self.prior)
class CategoricalRegressionMLP(hk.Module):
"""Categorical MLP designed for regression ala MuZero value."""
def __init__(self, output_sizes: Sequence[int], atoms: chex.Array):
"""Categorical MLP designed for regression ala MuZero value."""
super().__init__(name='categorical_regression_mlp')
self.dim_out = output_sizes[-1]
self.atoms = jnp.array(atoms)
self.output_sizes = list(output_sizes[:-1]) + [self.dim_out * len(atoms)]
def __call__(self, inputs: chex.Array) -> chex.Array:
"""Apply MLP and wrap outputs appropriately."""
out = hk.Flatten()(inputs)
out = hk.nets.MLP(self.output_sizes)(out)
return CatOutputWithPrior( # pytype: disable=bad-return-type # numpy-scalars
train=jnp.reshape(out, [-1, self.dim_out, len(self.atoms)]),
extra={'atoms': self.atoms},
)
class CatMLPEnsemble(networks_base.EnnArray):
"""An ensemble of categorical MLP for regression."""
def __init__(self, output_sizes: Sequence[int], atoms: chex.Array,
num_ensemble: int):
"""An ensemble of categorical MLP for regression."""
def net_fn(x: chex.Array) -> chex.Array:
return CategoricalRegressionMLP(output_sizes, atoms)(x)
transformed = hk.without_apply_rng(hk.transform_with_state(net_fn))
enn = ensembles.EnsembleWithState(transformed, num_ensemble)
super().__init__(enn.apply, enn.init, enn.indexer)
class CatMLPEnsembleGpPrior(networks_base.EnnArray):
"""An ensemble of categorical MLP with a real-valued GP prior."""
def __init__(self,
output_sizes: Sequence[int],
atoms: chex.Array,
input_dim: int,
num_ensemble: int,
num_feat: int,
gamma: priors.GpGamma = 1.,
prior_scale: float = 1,
seed: int = 0):
"""An ensemble of categorical MLP with a real-valued GP prior."""
gp_priors = ensembles.make_random_gp_ensemble_prior_fns(
input_dim, 1, num_feat, gamma, num_ensemble, seed)
enn = priors.EnnWithAdditivePrior(
enn=CatMLPEnsemble(output_sizes, atoms, num_ensemble),
prior_fn=ensembles.combine_functions_choice_via_index(gp_priors),
prior_scale=prior_scale,
)
super().__init__(enn.apply, enn.init, enn.indexer)
class CatMLPEnsembleMlpPrior(networks_base.EnnArray):
"""An ensemble of categorical MLP with real-valued MLP prior."""
def __init__(self,
output_sizes: Sequence[int],
atoms: chex.Array,
dummy_input: chex.Array,
num_ensemble: int,
prior_scale: float = 1,
seed: int = 0):
"""An ensemble of categorical MLP with real-valued MLP prior."""
mlp_priors = ensembles.make_mlp_ensemble_prior_fns(
output_sizes, dummy_input, num_ensemble, seed)
enn = priors.EnnWithAdditivePrior(
enn=CatMLPEnsemble(output_sizes, atoms, num_ensemble),
prior_fn=ensembles.combine_functions_choice_via_index(mlp_priors),
prior_scale=prior_scale,
)
super().__init__(enn.apply, enn.init, enn.indexer)
| enn-master | enn/networks/categorical_ensembles.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for ENN Networks."""
from absl.testing import absltest
from absl.testing import parameterized
from enn.networks import vgg
import haiku as hk
import jax
class NetworkTest(parameterized.TestCase):
@parameterized.product(
num_classes=[2, 10],
batch_size=[1, 10],
image_size=[2, 10],
)
def test_forward_pass(
self,
num_classes: int,
batch_size: int,
image_size: int,
):
"""Tests forward pass and output shape."""
enn = vgg.EnsembleVGGENN(
num_output_classes=num_classes,
)
rng = hk.PRNGSequence(0)
image_shape = [image_size, image_size, 3]
x = jax.random.normal(next(rng), shape=[batch_size,] + image_shape)
index = enn.indexer(next(rng))
params, state = enn.init(next(rng), x, index)
out, unused_new_state = enn.apply(params, state, x, index)
self.assertEqual(out.shape, (batch_size, num_classes))
if __name__ == '__main__':
absltest.main()
| enn-master | enn/networks/vgg_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for ENN Hypermodels."""
from typing import List
from absl.testing import absltest
from absl.testing import parameterized
from enn import supervised
from enn.networks import hypermodels
from enn.networks import indexers
import haiku as hk
class MLPHypermodelTest(parameterized.TestCase):
@parameterized.parameters([
([], [], 4, True),
([3], [], 5, True),
([3, 7], [4], 3, True),
([], [], 4, False),
([3], [], 5, False),
([3, 7], [4], 3, False),
])
def test_ten_batches(self, model_hiddens: List[int], hyper_hiddens: List[int],
index_dim: int, regression: bool):
"""Simple test to run just 10 batches."""
test_experiment = supervised.make_test_experiment(regression)
def base_net(x):
return hk.nets.MLP(model_hiddens + [test_experiment.num_outputs])(x)
transformed_base = hk.without_apply_rng(hk.transform(base_net))
indexer = indexers.ScaledGaussianIndexer(index_dim, index_scale=1.0)
enn = hypermodels.MLPHypermodel(
transformed_base=transformed_base,
dummy_input=test_experiment.dummy_input,
indexer=indexer,
hidden_sizes=hyper_hiddens,
)
experiment = test_experiment.experiment_ctor(enn)
experiment.train(10)
@parameterized.parameters([
([], [], [], [], 0.0, 4, True),
([3], [], [4], [], 1.0, 4, True),
([3, 7], [4], [4], [], 1.0, 4, True),
([3, 7], [4], [4, 6], [5], 1.0, 4, True),
([], [], [], [], 0.0, 4, False),
([3], [], [4], [], 1.0, 4, False),
([3, 7], [4], [4], [], 1.0, 4, False),
([3, 7], [4], [4, 6], [5], 1.0, 4, False),
])
def test_hyper_prior(self, model_hiddens: List[int], hyper_hiddens: List[int],
prior_model_hiddens: List[int],
prior_hyper_hiddens: List[int], prior_scale: float,
index_dim: int, regression: bool):
"""Simple test to run just 10 batches."""
test_experiment = supervised.make_test_experiment(regression)
indexer = indexers.ScaledGaussianIndexer(index_dim, index_scale=1.0)
enn = hypermodels.MLPHypermodelWithHypermodelPrior(
base_output_sizes=model_hiddens + [test_experiment.num_outputs],
prior_scale=prior_scale,
dummy_input=test_experiment.dummy_input,
indexer=indexer,
prior_base_output_sizes=prior_model_hiddens +
[test_experiment.num_outputs],
hyper_hidden_sizes=hyper_hiddens,
prior_hyper_hidden_sizes=prior_hyper_hiddens)
experiment = test_experiment.experiment_ctor(enn)
experiment.train(10)
@parameterized.parameters([
([], [], [2], 0.0, 4, True),
([3], [], [2, 2], 1.0, 1, True),
([3], [], [2, 2], 1.0, 3, True),
([3], [], [2, 2], 1.0, 5, True),
([], [], [2], 0.0, 4, False),
([3], [], [2, 2], 1.0, 1, False),
([3], [], [2, 2], 1.0, 3, False),
([3], [], [2, 2], 1.0, 5, False),
])
def test_hyper_prior_independent_layers(self, model_hiddens: List[int],
hyper_hiddens: List[int],
prior_hiddens: List[int],
prior_scale: float, index_dim: int,
regression: bool):
"""Simple test to run just 10 batches."""
test_experiment = supervised.make_test_experiment(regression)
indexer = indexers.ScaledGaussianIndexer(index_dim, index_scale=1.0)
enn = hypermodels.MLPHypermodelPriorIndependentLayers(
base_output_sizes=model_hiddens + [test_experiment.num_outputs],
prior_scale=prior_scale,
dummy_input=test_experiment.dummy_input,
indexer=indexer,
prior_base_output_sizes=prior_hiddens +
[test_experiment.num_outputs],
hyper_hidden_sizes=hyper_hiddens,)
experiment = test_experiment.experiment_ctor(enn)
experiment.train(10)
if __name__ == '__main__':
absltest.main()
| enn-master | enn/networks/hypermodels_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for ENN Networks."""
from typing import Sequence
from absl.testing import absltest
from absl.testing import parameterized
from enn import supervised
from enn.networks import dropout
class NetworkTest(parameterized.TestCase):
@parameterized.product(
hiddens=[[], [10, 10]],
dropout_rate=[0.05, 0.2, 0.5],
dropout_input=[True, False],
regression=[True, False])
def test_dropout_mlp(self, hiddens: Sequence[int], dropout_rate: float,
dropout_input: bool, regression: bool):
"""Simple test to run just 10 batches."""
test_experiment = supervised.make_test_experiment(regression)
enn = dropout.MLPDropoutENN(
output_sizes=list(hiddens)+[test_experiment.num_outputs],
dropout_rate=dropout_rate,
dropout_input=dropout_input
)
experiment = test_experiment.experiment_ctor(enn)
experiment.train(10)
if __name__ == '__main__':
absltest.main()
| enn-master | enn/networks/dropout_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Prototype for linear hypermodel in JAX."""
from typing import Callable, Optional, Sequence, Type
import chex
from enn import base
from enn.networks import base as networks_base
from enn.networks import priors
from enn.networks import utils as network_utils
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
# TODO(author2): Current implementation will produce a hypermodel that can
# *only* work for a single index at a time. However, note that jax.vmap means
# this can easily be converted into a form that works with batched index.
class MLPHypermodel(networks_base.EnnArray):
"""MLP hypermodel for transformed_base as EpistemicNetwork."""
def __init__(
self,
transformed_base: hk.Transformed,
dummy_input: chex.Array,
indexer: base.EpistemicIndexer,
hidden_sizes: Optional[Sequence[int]] = None,
return_generated_params: bool = False,
scale: bool = True,
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
):
"""MLP hypermodel for transformed_base as EpistemicNetwork."""
if hidden_sizes is None:
hyper_torso = lambda x: x
else:
def hyper_torso(index):
return hk.nets.MLP(hidden_sizes, w_init=w_init, b_init=b_init)(index)
enn = network_utils.epistemic_network_from_module(
enn_ctor=hypermodel_module(
transformed_base,
dummy_input,
hyper_torso,
return_generated_params=return_generated_params,
scale=scale),
indexer=indexer,
)
super().__init__(enn.apply, enn.init, enn.indexer)
# pytype: disable=bad-return-type
def hypermodel_module(
transformed_base: hk.Transformed,
dummy_input: chex.Array,
hyper_torso: Callable[[base.Index], chex.Array] = lambda x: x,
diagonal_linear_hyper: bool = False,
return_generated_params: bool = False,
scale: bool = True,
) -> Type[networks_base.EpistemicModule]:
"""Generates an haiku module for a hypermodel of a transformed base network.
A hypermodel uses the index z to predict parameters for the base model defined
by transformed_base. See paper https://arxiv.org/abs/2006.07464.
For each layer of the base model, we would like the variance of inputs and
outputs to be equal (this can make SGD work better). If we were initializing
the weights of the base model, we could achieve this by "scaling" the weights
of each layer by 1/sqrt(n_i) where n_i is the fan in to the i-th layer of the
base model. Now, since the weights of the base model are generated by the
hypermodel's output, we can manually scale the generated weights. Note that
this scaling is needed only for the weight parameters and not for bias
parameters. Function `scale_fn` appropriately scales the weights generated by
the hypermodel.
Args:
transformed_base: hk.transformed_without_rng of base model y = f_theta(x).
dummy_input: example input x, needed to determine weight shapes.
hyper_torso: transformation of the index before the final layer. Defaults
to identity and a resultant linear hypermodel.
diagonal_linear_hyper: a boolean specifying whether the final layer to apply
to the transformed index is diagonal linear or linear.
return_generated_params: returns generated params in addition to output.
scale: a boolean specifying whether to scale the params or not.
Returns:
Hypermodel of the "base model" as ctor for EpistemicModule. Should be used
with only *one* epistemic index at a time (can vmap for batch).
"""
base_params = transformed_base.init(jax.random.PRNGKey(0), dummy_input)
base_params_flat = jax.tree_util.tree_map(jnp.ravel, base_params)
base_shapes = jax.tree_util.tree_map(lambda x: np.array(jnp.shape(x)),
base_params)
base_shapes_flat = jax.tree_util.tree_map(len, base_params_flat)
# base params as 1D array. It can be used to initialize the hyper network
base_params_array = jnp.concatenate(jax.tree_flatten(base_params_flat)[0])
def scale_fn(module_name, name, value):
"""Scales weight by 1/sqrt(fan_in) and leaves biases unchanged.
Args:
module_name: (typically) layer name. Not used but is needed for hk.map.
name: parameter name.
value: value of the parameters.
Returns:
scaled parameters suitable for use in the apply function of base network.
"""
del module_name
# The parameter name can be either 'w' (if the parameter is a weight)
# or 'b' (if the parameter is a bias)
return value / jnp.sqrt(value.shape[0]) if name == 'w' else value
def hyper_fn(inputs: chex.Array,
index: base.Index) -> chex.Array:
if diagonal_linear_hyper:
# index must be the same size as the total number of base params.
chex.assert_shape(index, (np.sum(jax.tree_leaves(base_shapes_flat)),))
hyper_index = DiagonalLinear(b_init_value=base_params_array)(index)
flat_output = jnp.split(
hyper_index, np.cumsum(jax.tree_leaves(base_shapes_flat))[:-1])
flat_output = jax.tree_unflatten(jax.tree_structure(base_shapes),
flat_output)
else:
# Apply the hyper_torso to the epistemic index
hyper_index = hyper_torso(index)
b_init = w_init = hk.initializers.VarianceScaling(
mode='fan_avg', distribution='truncated_normal')
# Generate a linear layer of size "base_shapes_flat"
final_layers = jax.tree_util.tree_map(
lambda s: hk.Linear(s, w_init=w_init, b_init=b_init),
base_shapes_flat)
# Apply this linear output to the output of the hyper_torso
flat_output = jax.tree_util.tree_map(lambda layer: layer(hyper_index),
final_layers)
# Reshape this flattened output to the original base shapes (unflatten)
generated_params = jax.tree_util.tree_map(jnp.reshape, flat_output,
base_shapes)
if scale:
# Scale the generated params such that expected variance of the raw
# generated params is O(1) for both bias and weight parameters.
generated_params_scaled = hk.data_structures.map(scale_fn,
generated_params)
else:
generated_params_scaled = generated_params
# Output the original base function(inputs) with these generated params
out = transformed_base.apply(generated_params_scaled, inputs)
if return_generated_params:
out = networks_base.OutputWithPrior(
train=out,
prior=jnp.zeros_like(out),
extra={
'hyper_net_out': generated_params,
'base_net_params': generated_params_scaled,
'hyper_index': hyper_index
})
return out
enn_module = hk.to_module(hyper_fn)
return enn_module
# pytype: enable=bad-return-type
class MLPHypermodelWithHypermodelPrior(networks_base.EnnArray):
"""MLP hypermodel with hypermodel prior as EpistemicNetwork."""
def __init__(
self,
base_output_sizes: Sequence[int],
prior_scale: float,
dummy_input: chex.Array,
indexer: base.EpistemicIndexer,
prior_base_output_sizes: Sequence[int],
hyper_hidden_sizes: Optional[Sequence[int]] = None,
prior_hyper_hidden_sizes: Optional[Sequence[int]] = None,
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
return_generated_params: bool = False,
seed: int = 0,
scale: bool = True,
):
"""MLP hypermodel with hypermodel prior as EpistemicNetwork."""
# Making the base model for the ENN without any prior function
def base_net(x):
return hk.nets.MLP(base_output_sizes, w_init=w_init, b_init=b_init)(x)
transformed_base = hk.without_apply_rng(hk.transform(base_net))
# Making the base model for the ENN of prior function
def prior_net(x):
return hk.nets.MLP(
prior_base_output_sizes, w_init=w_init, b_init=b_init)(x)
transformed_prior_base = hk.without_apply_rng(hk.transform(prior_net))
# Defining an ENN for the prior function
prior_enn = MLPHypermodel(
transformed_base=transformed_prior_base,
dummy_input=dummy_input,
indexer=indexer,
hidden_sizes=prior_hyper_hidden_sizes,
return_generated_params=return_generated_params,
w_init=w_init,
b_init=b_init,
scale=scale,
)
prior_fn = priors.convert_enn_to_prior_fn(
prior_enn, dummy_input, jax.random.PRNGKey(seed))
# Defining an ENN without any prior function
enn_wo_prior = MLPHypermodel(
transformed_base=transformed_base,
dummy_input=dummy_input,
indexer=indexer,
hidden_sizes=hyper_hidden_sizes,
return_generated_params=return_generated_params,
w_init=w_init,
b_init=b_init,
scale=scale)
# Defining the ENN with the prior `prior_fn`
enn = priors.EnnWithAdditivePrior(
enn_wo_prior, prior_fn, prior_scale=prior_scale)
super().__init__(enn.apply, enn.init, enn.indexer)
################################################################################
# Alternative implementation of MLP hypermodel with MLP prior where layers
# are generated by different set of indices.
class HyperLinear(hk.Module):
"""Linear hypermodel."""
def __init__(self,
output_size: int,
index_dim_per_layer: int,
weight_scaling: float = 1.,
bias_scaling: float = 1.,
fixed_bias_val: float = 0.0,
first_layer: bool = False,
name: str = 'hyper_linear'):
super().__init__(name=name)
self._output_size = output_size
self._index_dim_per_layer = index_dim_per_layer
self._weight_scaling = weight_scaling
self._bias_scaling = bias_scaling
self._fixed_bias_val = fixed_bias_val
self._first_layer = first_layer
def __call__(self, x: chex.Array,
z: base.Index) -> chex.Array:
unused_x_batch_size, hidden_size = x.shape
init = hk.initializers.RandomNormal()
w = hk.get_parameter(
'w', [self._output_size, hidden_size, self._index_dim_per_layer],
init=init)
b = hk.get_parameter(
'b', [self._output_size, self._index_dim_per_layer], init=init)
w /= jnp.linalg.norm(w, axis=-1, keepdims=True)
b /= jnp.linalg.norm(b, axis=-1, keepdims=True)
w *= jnp.sqrt(self._weight_scaling)
if not self._first_layer:
w *= jnp.sqrt(1 / hidden_size)
b = b * jnp.sqrt(self._bias_scaling) + self._fixed_bias_val
weights = jnp.einsum('ohi,i->oh', w, z)
bias = jnp.einsum('oi,i->o', b, z)
broadcasted_bias = jnp.broadcast_to(bias, (x.shape[0],) + bias.shape)
return jnp.einsum('oh,bh->bo', weights, x) + broadcasted_bias
class PriorMLPIndependentLayers(hk.Module):
"""Prior MLP with each layer generated by an independent index."""
def __init__(self,
output_sizes: Sequence[int],
index_dim: int,
weight_scaling: float = 1.,
bias_scaling: float = 1.,
fixed_bias_val: float = 0.0,
name: str = 'prior_independent_layers'):
super().__init__(name=name)
self._output_sizes = output_sizes
self._num_layers = len(self._output_sizes)
self._index_dim = index_dim
self._weight_scaling = weight_scaling
self._bias_scaling = bias_scaling
self._fixed_bias_val = fixed_bias_val
if self._index_dim < self._num_layers:
# Assigning all index dimensions to all layers
self._layers_indices = [jnp.arange(self._index_dim)] * self._num_layers
else:
# Spliting index dimension into num_layers chunks
self._layers_indices = jnp.array_split(
jnp.arange(self._index_dim), self._num_layers)
# Defining layers of the prior MLP and associating each layer with a set of
# indices
self._layers = []
first_layer = True
for layer_indices, output_size in zip(self._layers_indices,
self._output_sizes):
index_dim_per_layer = len(layer_indices)
layer = HyperLinear(output_size, index_dim_per_layer,
self._weight_scaling, self._bias_scaling,
self._fixed_bias_val, first_layer=first_layer)
first_layer = False
self._layers.append(layer)
def __call__(self, x: chex.Array,
z: base.Index) -> chex.Array:
if self._index_dim < self._num_layers:
# Assigning all index dimensions to all layers
index_layers = [z] * self._num_layers
else:
# Spliting index dimension into num_layers chunks
index_layers = jnp.array_split(z, self._num_layers)
out = x
for i, layer in enumerate(self._layers):
index_layer = index_layers[i]
out = layer(out, index_layer)
if i < self._num_layers - 1:
out = jax.nn.relu(out)
return out
class MLPHypermodelPriorIndependentLayers(networks_base.EnnArray):
"""MLP hypermodel with hypermodel prior as EpistemicNetwork."""
def __init__(self,
base_output_sizes: Sequence[int],
prior_scale: float,
dummy_input: chex.Array,
indexer: base.EpistemicIndexer,
prior_base_output_sizes: Sequence[int],
hyper_hidden_sizes: Optional[Sequence[int]] = None,
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
return_generated_params: bool = False,
prior_weight_scaling: float = 1.,
prior_bias_scaling: float = 1.,
prior_fixed_bias_val: float = 0.0,
seed: int = 0,
scale: bool = True,
problem_temperature: Optional[float] = None):
"""MLP hypermodel with hypermodel prior as EpistemicNetwork."""
# Making the base model for the ENN without any prior function
def base_net(x):
net_out = hk.nets.MLP(base_output_sizes, w_init=w_init, b_init=b_init)(x)
if problem_temperature:
net_out /= problem_temperature
return net_out
transformed_base = hk.without_apply_rng(hk.transform(base_net))
# Defining an ENN for the prior function based on an MLP with independent
# layers which divides index dimension among the MLP layers. To this end, we
# need to find the index dimension.
rng = hk.PRNGSequence(seed)
index = indexer(next(rng))
index_dim, = index.shape
def prior_net(x, z):
net_out = PriorMLPIndependentLayers(
output_sizes=prior_base_output_sizes,
index_dim=index_dim,
weight_scaling=prior_weight_scaling,
bias_scaling=prior_bias_scaling,
fixed_bias_val=prior_fixed_bias_val)(x, z)
if problem_temperature:
net_out /= problem_temperature
return net_out
prior_enn = hk.without_apply_rng(hk.transform(prior_net))
# Initializing prior ENN to get `prior_fn(x, z)` which forwards prior ENN
rng = hk.PRNGSequence(seed)
prior_params = prior_enn.init(next(rng), dummy_input, index)
def prior_fn(x, z):
return prior_enn.apply(prior_params, x, z)
# Defining an ENN without any prior function
enn_wo_prior = MLPHypermodel(
transformed_base=transformed_base,
dummy_input=dummy_input,
indexer=indexer,
hidden_sizes=hyper_hidden_sizes,
return_generated_params=return_generated_params,
w_init=w_init,
b_init=b_init,
scale=scale)
# Defining the ENN with the prior `prior_fn`
enn = priors.EnnWithAdditivePrior(
enn_wo_prior, prior_fn, prior_scale=prior_scale)
super().__init__(enn.apply, enn.init, enn.indexer)
class DiagonalLinear(hk.Module):
"""Diagonal Linear module."""
def __init__(
self,
with_bias: bool = True,
w_init: Optional[hk.initializers.Initializer] = None,
b_init_value: Optional[chex.Array] = None,
name: Optional[str] = None,
):
"""Constructs the diagonal linear module.
Args:
with_bias: Whether to add a bias to the output.
w_init: Optional initializer for weights. By default, uses random values
from truncated normal, with stddev 1.
b_init_value: Initial values for the bias. If None, biases are initiliazed
from normal with stddev 0.1.
name: Name of the module.
"""
super().__init__(name=name)
self.input_size = None
self.with_bias = with_bias
self.w_init = w_init
self.b_init_value = b_init_value
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
"""Computes a linear transform of the input."""
if not inputs.shape:
raise ValueError('Input must not be scalar.')
self.input_size = inputs.shape[-1]
dtype = inputs.dtype
w_init = self.w_init
inv_soft_plus_fn = lambda x: jnp.log(jnp.exp(x) - 1)
if w_init is None:
w_init = hk.initializers.Constant(inv_soft_plus_fn(0.01))
w = hk.get_parameter('w', [self.input_size], dtype, init=w_init)
out = inputs * jnp.log(1 + jnp.exp(w))
if self.with_bias:
if self.b_init_value is not None:
b_init = lambda x, y: jnp.array(self.b_init_value)
else:
b_init = hk.initializers.RandomNormal(stddev=0.1)
b = hk.get_parameter('b', [self.input_size], dtype, init=b_init)
out = out + b
return out
| enn-master | enn/networks/hypermodels.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Exposing the public methods of the networks."""
from enn.networks import epinet
# Base
from enn.networks.base import ApplyArray
from enn.networks.base import ApplyNoState
from enn.networks.base import EnnArray
from enn.networks.base import EnnNoState
from enn.networks.base import EpistemicModule
from enn.networks.base import InitArray
from enn.networks.base import InitNoState
from enn.networks.base import Output
from enn.networks.base import OutputWithPrior
# BBB
from enn.networks.bbb import make_bbb_enn
# Categorical regression ensemble
from enn.networks.categorical_ensembles import CategoricalRegressionMLP
from enn.networks.categorical_ensembles import CatMLPEnsembleGpPrior
from enn.networks.categorical_ensembles import CatMLPEnsembleMlpPrior
from enn.networks.categorical_ensembles import CatOutputWithPrior
# Combiners
from enn.networks.combiners import combine_naive_enn
from enn.networks.combiners import make_optimized_forward
# Dropout
from enn.networks.dropout import MLPDropoutENN
# Einsum MLP
from enn.networks.einsum_mlp import EnsembleMLP
from enn.networks.einsum_mlp import make_einsum_ensemble_mlp_enn
from enn.networks.einsum_mlp import make_ensemble_mlp_with_prior_enn
from enn.networks.einsum_mlp import make_ensemble_prior
# Ensemble
from enn.networks.ensembles import combine_functions_choice_via_index
from enn.networks.ensembles import combine_functions_linear_in_index
from enn.networks.ensembles import Ensemble
from enn.networks.ensembles import EnsembleWithState
from enn.networks.ensembles import make_mlp_ensemble_prior_fns
from enn.networks.ensembles import MLPEnsembleMatchedPrior
# Forwarders
from enn.networks.forwarders import EnnBatchFwd
from enn.networks.forwarders import make_batch_fwd
# Gaussian ENN
from enn.networks.gaussian_enn import GaussianNoiseEnn
from enn.networks.gaussian_enn import GaussianNoiseMLP
# Hypermodels
from enn.networks.hypermodels import hypermodel_module
from enn.networks.hypermodels import MLPHypermodel
from enn.networks.hypermodels import MLPHypermodelPriorIndependentLayers
from enn.networks.hypermodels import MLPHypermodelWithHypermodelPrior
from enn.networks.hypermodels import PriorMLPIndependentLayers
# Index MLP
from enn.networks.index_mlp import ConcatIndexMLP
from enn.networks.index_mlp import IndexMLPEnn
from enn.networks.index_mlp import IndexMLPWithGpPrior
# Indexers
from enn.networks.indexers import DirichletIndexer
from enn.networks.indexers import EnsembleIndexer
from enn.networks.indexers import GaussianIndexer
from enn.networks.indexers import GaussianWithUnitIndexer
from enn.networks.indexers import PrngIndexer
from enn.networks.indexers import ScaledGaussianIndexer
# LeNet (MNIST)
from enn.networks.lenet import EnsembleLeNet5ENN
from enn.networks.lenet import LeNet5
# MLP
from enn.networks.mlp import ExposedMLP
from enn.networks.mlp import ProjectedMLP
# Priors
from enn.networks.priors import convert_enn_to_prior_fn
from enn.networks.priors import EnnNoStateWithAdditivePrior
from enn.networks.priors import EnnWithAdditivePrior
from enn.networks.priors import get_random_mlp_with_index
from enn.networks.priors import make_null_prior
from enn.networks.priors import make_random_feat_gp
from enn.networks.priors import NetworkWithAdditivePrior
from enn.networks.priors import PriorFn
# ResNet (Imagenet)
from enn.networks.resnet.base import EnsembleResNetENN
from enn.networks.resnet.base import resnet_model
# ResNet Configs (Imagenet)
from enn.networks.resnet.lib import CanonicalResNets
from enn.networks.resnet.lib import ForwardFn
from enn.networks.resnet.lib import ResBlockV1
from enn.networks.resnet.lib import ResBlockV2
from enn.networks.resnet.lib import ResNet
from enn.networks.resnet.lib import ResNetConfig
# ResNet (Imagenet)
from enn.networks.resnet.priors import ResnetCnnPrior
from enn.networks.resnet.priors import ResnetMlpPrior
# Utils
from enn.networks.utils import epistemic_network_from_module
from enn.networks.utils import make_centered_enn
from enn.networks.utils import make_centered_enn_no_state
from enn.networks.utils import parse_net_output
from enn.networks.utils import parse_to_output_with_prior
from enn.networks.utils import scale_enn_output
from enn.networks.utils import wrap_apply_no_state_as_apply
from enn.networks.utils import wrap_enn_as_enn_no_state
from enn.networks.utils import wrap_enn_no_state_as_enn
from enn.networks.utils import wrap_init_no_state_as_init
from enn.networks.utils import wrap_transformed_as_enn
from enn.networks.utils import wrap_transformed_as_enn_no_state
# VGG (Cifar10)
from enn.networks.vgg import EnsembleVGGENN
from enn.networks.vgg import VGG
| enn-master | enn/networks/__init__.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for ENN Networks."""
from absl.testing import absltest
from absl.testing import parameterized
from enn.networks import lenet
import haiku as hk
import jax
class NetworkTest(parameterized.TestCase):
@parameterized.product(
num_classes=[2, 10],
batch_size=[1, 10],
image_size=[2, 10],
)
def test_forward_pass(
self,
num_classes: int,
batch_size: int,
image_size: int,
):
"""Tests forward pass and output shape."""
enn = lenet.EnsembleLeNet5ENN(
num_output_classes=num_classes,
)
rng = hk.PRNGSequence(0)
image_shape = [image_size, image_size, 3]
x = jax.random.normal(next(rng), shape=[batch_size,] + image_shape)
index = enn.indexer(next(rng))
params, state = enn.init(next(rng), x, index)
out, unused_new_state = enn.apply(params, state, x, index)
self.assertEqual(out.shape, (batch_size, num_classes))
if __name__ == '__main__':
absltest.main()
| enn-master | enn/networks/lenet_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for enn.priors."""
from typing import List
from absl.testing import absltest
from absl.testing import parameterized
from enn import supervised
from enn.networks import priors
from enn.networks import utils as network_utils
import haiku as hk
import jax
class PriorsTest(parameterized.TestCase):
@parameterized.parameters([[[50, 50], True], [[20, 20], False]])
def test_mlp_prior_module(self, hiddens: List[int], regression: bool):
"""Test MLP with prior from hk.Module."""
test_experiment = supervised.make_test_experiment(regression)
def net_fn(x):
net = priors.NetworkWithAdditivePrior(
net=hk.nets.MLP(hiddens + [test_experiment.num_outputs]),
prior_net=hk.nets.MLP(hiddens + [test_experiment.num_outputs]),
prior_scale=1.,
)
return net(x)
transformed = hk.without_apply_rng(hk.transform(net_fn))
enn = network_utils.wrap_transformed_as_enn(transformed)
experiment = test_experiment.experiment_ctor(enn)
experiment.train(10)
@parameterized.parameters([[[50, 50], True], [[20, 20], False]])
def test_mlp_prior_transformed(self, hiddens: List[int], regression: bool):
"""Test MLP with prior from EpistemicNetwork."""
test_experiment = supervised.make_test_experiment(regression)
def net_fn(x):
net = hk.nets.MLP(hiddens + [test_experiment.num_outputs])
return net(x)
transformed = hk.without_apply_rng(hk.transform(net_fn))
train_enn = network_utils.wrap_transformed_as_enn(transformed)
prior_params = transformed.init(
jax.random.PRNGKey(0), test_experiment.dummy_input)
prior_fn = lambda x, z: transformed.apply(prior_params, x)
enn = priors.EnnWithAdditivePrior(
enn=train_enn,
prior_fn=prior_fn,
prior_scale=1.,
)
experiment = test_experiment.experiment_ctor(enn)
experiment.train(10)
@parameterized.parameters([
[1, 3, 10, 10],
[2, 5, 1, 10],
[5, 1, 5, 10],
[5, 5, 5, 1],
])
def test_random_gp_forward(
self, input_dim: int, output_dim: int, num_feat: int, batch_size: int):
"""Test random gp can forward data correcly."""
rng = hk.PRNGSequence(0)
fake_data = jax.random.normal(next(rng), shape=[batch_size, input_dim])
gp_instance = priors.make_random_feat_gp(
input_dim, output_dim, num_feat, next(rng))
output = gp_instance(fake_data)
assert output.shape == (batch_size, output_dim)
@parameterized.parameters([[[50, 50], True], [[20, 20], False]])
def test_get_random_mlp_prior_fn(self, hiddens: List[int], regression: bool):
"""Test MLP with prior from EpistemicNetwork."""
test_experiment = supervised.make_test_experiment(regression)
output_sizes = hiddens + [test_experiment.num_outputs]
def net_fn(x):
net = hk.nets.MLP(output_sizes)
return net(x)
transformed = hk.without_apply_rng(hk.transform(net_fn))
train_enn = network_utils.wrap_transformed_as_enn(transformed)
dummy_x = test_experiment.dummy_input
dummy_z = train_enn.indexer(jax.random.PRNGKey(0))
rng_seq = hk.PRNGSequence(0)
prior_fn = priors.get_random_mlp_with_index(
x_sample=dummy_x, z_sample=dummy_z, rng=next(rng_seq),
prior_output_sizes=output_sizes)
enn = priors.EnnWithAdditivePrior(
enn=train_enn,
prior_fn=prior_fn,
prior_scale=1.,
)
experiment = test_experiment.experiment_ctor(enn)
experiment.train(10)
if __name__ == '__main__':
absltest.main()
| enn-master | enn/networks/priors_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MLP variants for ENN."""
from typing import Optional, Sequence
import chex
from enn import base
from enn.networks import base as networks_base
import haiku as hk
import jax
import jax.numpy as jnp
class ExposedMLP(hk.Module):
"""MLP module that exposes internal layers in output."""
def __init__(self,
output_sizes: Sequence[int],
expose_layers: Optional[Sequence[bool]] = None,
stop_gradient: bool = True,
w_init: Optional[hk.initializers.Initializer] = None,
name: Optional[str] = None):
"""ReLU MLP that also exposes the internals as output."""
super().__init__(name=name)
layers = []
for index, output_size in enumerate(output_sizes):
linear = hk.Linear(output_size, w_init=w_init, name=f'linear_{index}')
layers.append(linear)
self.layers = tuple(layers)
self.num_layers = len(self.layers)
self.output_size = output_sizes[-1]
self.stop_gradient = stop_gradient
self.expose_layers = expose_layers
# if expose_layers is None, we expose all layers
if self.expose_layers is None:
self.expose_layers = [True] * len(output_sizes)
assert len(self.expose_layers) == len(self.layers)
def __call__(self, inputs: chex.Array) -> networks_base.OutputWithPrior:
"""Standard MLP but exposes 'exposed_features' in .extra output."""
layers_features = []
out = inputs
for i, layer in enumerate(self.layers):
out = layer(out)
if i < (self.num_layers - 1):
out = jax.nn.relu(out)
layers_features.append(out)
exposed_features = [inputs]
for i, layer_feature in enumerate(layers_features):
# Add this layer feature if the expose flag for this layer is True
if self.expose_layers[i]:
exposed_features.append(layer_feature)
exposed_features = jnp.concatenate(exposed_features, axis=1)
if self.stop_gradient:
exposed_features = jax.lax.stop_gradient(exposed_features)
extra = {'exposed_features': exposed_features}
return networks_base.OutputWithPrior(
train=out,
prior=jnp.zeros_like(out),
extra=extra
)
class ProjectedMLP(networks_base.EpistemicModule):
"""MLP whose output in the final layer is then dot-product with Z-index."""
def __init__(self,
hidden_sizes: Sequence[int],
final_out: int,
index_dim: int,
concat_index: bool = True,
w_init: Optional[hk.initializers.Initializer] = None,
name: Optional[str] = None):
super().__init__(name=name)
self.hidden_sizes = hidden_sizes
self.final_out = final_out
self.index_dim = index_dim
self.concat_index = concat_index
output_sizes = list(self.hidden_sizes) + [self.final_out * index_dim]
self.mlp = hk.nets.MLP(output_sizes, w_init=w_init)
def __call__(self,
inputs: chex.Array,
index: base.Index) -> chex.Array:
chex.assert_shape(index, [self.index_dim])
if self.concat_index:
# Concatenate the index z to the *inputs* as well.
batched_z = jnp.repeat(jnp.expand_dims(index, 0), inputs.shape[0], axis=0)
inputs = jnp.concatenate([batched_z, inputs], axis=1)
reshaped_output = jnp.reshape(
self.mlp(inputs), [inputs.shape[0], self.final_out, self.index_dim])
return jnp.dot(reshaped_output, index)
| enn-master | enn/networks/mlp.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Efficient ensemble implementations for JAX/Haiku via einsum."""
from typing import Callable, Optional, Sequence, Tuple
import chex
from enn import base
from enn.networks import base as networks_base
from enn.networks import indexers
from enn.networks import priors
from enn.networks import utils as network_utils
import haiku as hk
import jax
import jax.numpy as jnp
# TODO(author2): Delete this implementation, base ensemble is fast enough.
def make_einsum_ensemble_mlp_enn(
output_sizes: Sequence[int],
num_ensemble: int,
nonzero_bias: bool = True,
activation: Callable[[chex.Array], chex.Array] = jax.nn.relu,
) -> networks_base.EnnArray:
"""Factory method to create fast einsum MLP ensemble ENN.
This is a specialized implementation for ReLU MLP without a prior network.
Args:
output_sizes: Sequence of integer sizes for the MLPs.
num_ensemble: Integer number of elements in the ensemble.
nonzero_bias: Whether to make the initial layer bias nonzero.
activation: Jax callable defining activation per layer.
Returns:
EpistemicNetwork as an ensemble of MLP.
"""
def ensemble_forward(x: chex.Array) -> networks_base.OutputWithPrior:
"""Forwards the entire ensemble at given input x."""
model = EnsembleMLP(output_sizes, num_ensemble, nonzero_bias, activation)
return model(x) # pytype: disable=bad-return-type # jax-ndarray
transformed = hk.without_apply_rng(hk.transform(ensemble_forward))
# Apply function selects the appropriate index of the ensemble output.
def apply(params: hk.Params, x: chex.Array,
z: base.Index) -> networks_base.OutputWithPrior:
net_out = transformed.apply(params, x)
one_hot_index = jax.nn.one_hot(z, num_ensemble)
return jnp.dot(net_out, one_hot_index)
def init(key: chex.PRNGKey, x: chex.Array,
z: base.Index) -> hk.Params:
del z
return transformed.init(key, x)
indexer = indexers.EnsembleIndexer(num_ensemble)
# TODO(author3): Change apply and init fns above to work with state.
apply = network_utils.wrap_apply_no_state_as_apply(apply)
init = network_utils.wrap_init_no_state_as_init(init)
return networks_base.EnnArray(apply, init, indexer)
def make_ensemble_mlp_with_prior_enn(
output_sizes: Sequence[int],
dummy_input: chex.Array,
num_ensemble: int,
prior_scale: float = 1.,
nonzero_bias: bool = True,
seed: int = 999,
) -> networks_base.EnnArray:
"""Factory method to create fast einsum MLP ensemble with matched prior.
Args:
output_sizes: Sequence of integer sizes for the MLPs.
dummy_input: Example x input for prior initialization.
num_ensemble: Integer number of elements in the ensemble.
prior_scale: Float rescaling of the prior MLP.
nonzero_bias: Whether to make the initial layer bias nonzero.
seed: integer seed for prior init.
Returns:
EpistemicNetwork ENN of the ensemble of MLP with matches prior.
"""
enn = make_einsum_ensemble_mlp_enn(output_sizes, num_ensemble, nonzero_bias)
init_key, _ = jax.random.split(jax.random.PRNGKey(seed))
prior_params, prior_state = enn.init(init_key, dummy_input, jnp.array([]))
# Apply function selects the appropriate index of the ensemble output.
def apply_with_prior(
params: hk.Params,
state: hk.State,
x: chex.Array,
z: base.Index,
) -> Tuple[networks_base.OutputWithPrior, hk.State]:
ensemble_train, state = enn.apply(params, state, x, z)
ensemble_prior, _ = enn.apply(prior_params, prior_state, x, z)
output = networks_base.OutputWithPrior(
train=ensemble_train, prior=ensemble_prior * prior_scale)
return output, state
return networks_base.EnnArray(apply_with_prior, enn.init, enn.indexer)
# TODO(author3): Come up with a better name and use ensembles.py instead.
def make_ensemble_prior(output_sizes: Sequence[int],
num_ensemble: int,
dummy_input: chex.Array,
seed: int = 999,) -> priors.PriorFn:
"""Combining a few ensemble elements as prior function."""
def net_fn(x):
model = EnsembleMLP(output_sizes, num_ensemble)
return model(x)
transformed = hk.without_apply_rng(hk.transform(net_fn))
rng = hk.PRNGSequence(seed)
params = transformed.init(next(rng), dummy_input)
prior_fn = lambda x, z: jnp.dot(transformed.apply(params, x), z)
return jax.jit(prior_fn)
################################################################################
# Einsum implementation of MLP
class EnsembleBranch(hk.Module):
"""Branches a single linear layer to num_ensemble, output_size."""
def __init__(self,
num_ensemble: int,
output_size: int,
nonzero_bias: bool,
w_init: Optional[hk.initializers.Initializer] = None,
name: str = 'ensemble_branch'):
super().__init__(name=name)
self.num_ensemble = num_ensemble
self.output_size = output_size
self.nonzero_bias = nonzero_bias
self.w_init = w_init
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray: # [B, H] -> [B, D, K]
assert inputs.ndim == 2
unused_batch, input_size = inputs.shape
if self.nonzero_bias:
b_init = hk.initializers.TruncatedNormal(
stddev=(1. / jnp.sqrt(input_size)))
else:
b_init = jnp.zeros
if self.w_init is not None:
w_init = self.w_init
else:
w_init = hk.initializers.TruncatedNormal(
stddev=(1. / jnp.sqrt(input_size)))
w = hk.get_parameter(
'w', [input_size, self.output_size, self.num_ensemble], init=w_init)
b = hk.get_parameter(
'b', [self.output_size, self.num_ensemble], init=b_init)
return jnp.einsum('bi,ijk->bjk', inputs, w) + jnp.expand_dims(b, axis=0)
class EnsembleLinear(hk.Module):
"""Keeps num_ensemble linear layers in parallel without interactions."""
def __init__(self,
output_size: int,
w_init: Optional[hk.initializers.Initializer] = None,
name: str = 'linear'):
super().__init__(name=name)
self.output_size = output_size
self.w_init = w_init
def __call__(self,
inputs: jnp.ndarray) -> jnp.ndarray: # [B, H, K] -> [B. D, K]
assert inputs.ndim == 3
unused_batch, input_size, self.num_ensemble = inputs.shape
if self.w_init is not None:
w_init = self.w_init
else:
w_init = hk.initializers.TruncatedNormal(
stddev=(1. / jnp.sqrt(input_size)))
w = hk.get_parameter(
'w', [input_size, self.output_size, self.num_ensemble], init=w_init)
b = hk.get_parameter(
'b', [self.output_size, self.num_ensemble], init=jnp.zeros)
return jnp.einsum('bik,ijk->bjk', inputs, w) + jnp.expand_dims(b, axis=0)
class EnsembleMLP(hk.Module):
"""Parallel num_ensemble MLPs all with same output_sizes.
In the first layer, the input is 'branched' to num_ensemble linear layers.
Then, in subsequent layers it is purely parallel EnsembleLinear.
"""
def __init__(self,
output_sizes: Sequence[int],
num_ensemble: int,
nonzero_bias: bool = True,
activation: Callable[[chex.Array], chex.Array] = jax.nn.relu,
w_init: Optional[hk.initializers.Initializer] = None,
name: str = 'ensemble_mlp'):
super().__init__(name=name)
self.num_ensemble = num_ensemble
self.activation = activation
layers = []
for index, output_size in enumerate(output_sizes):
if index == 0:
layers.append(
EnsembleBranch(num_ensemble, output_size, nonzero_bias, w_init))
else:
layers.append(EnsembleLinear(output_size, w_init))
self.layers = tuple(layers)
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray: # [B, H] -> [B, D, K]
num_layers = len(self.layers)
out = hk.Flatten()(inputs)
for i, layer in enumerate(self.layers):
out = layer(out)
if i < num_layers - 1:
out = self.activation(out)
return out # pytype: disable=bad-return-type # numpy-scalars
| enn-master | enn/networks/einsum_mlp.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for ENN Networks."""
from typing import List
from absl.testing import absltest
from absl.testing import parameterized
from enn import supervised
from enn.networks import einsum_mlp
from jax.config import config
config.update('jax_numpy_rank_promotion', 'raise')
class EinsumMlpTest(parameterized.TestCase):
@parameterized.parameters([
([], 1, True), ([10, 10], 5, True), ([], 1, False), ([10, 10], 5, False),
])
def test_ensemble(self,
hiddens: List[int],
num_ensemble: int,
regression: bool):
"""Simple test to run just 10 batches."""
test_experiment = supervised.make_test_experiment(regression)
enn = einsum_mlp.make_ensemble_mlp_with_prior_enn(
output_sizes=hiddens+[test_experiment.num_outputs],
dummy_input=test_experiment.dummy_input,
num_ensemble=num_ensemble,
prior_scale=1.,
)
experiment = test_experiment.experiment_ctor(enn)
experiment.train(10)
if __name__ == '__main__':
absltest.main()
| enn-master | enn/networks/einsum_mlp_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Epistemic indexers for ENNs."""
import dataclasses
import chex
from enn import base
import jax
import jax.numpy as jnp
class PrngIndexer(base.EpistemicIndexer):
"""Index by JAX PRNG sequence."""
def __call__(self, key: chex.PRNGKey) -> base.Index:
return key
@dataclasses.dataclass
class EnsembleIndexer(base.EpistemicIndexer):
"""Index into an ensemble by integer."""
num_ensemble: int
def __call__(self, key: chex.PRNGKey) -> base.Index:
return jax.random.randint(key, [], 0, self.num_ensemble)
@dataclasses.dataclass
class GaussianIndexer(base.EpistemicIndexer):
"""A Gaussian indexer ~ N(0, I)."""
index_dim: int
def __call__(self, key: chex.PRNGKey) -> base.Index:
return jax.random.normal(key, shape=[self.index_dim])
@dataclasses.dataclass
class ScaledGaussianIndexer(base.EpistemicIndexer):
"""A scaled Gaussian indexer."""
index_dim: int
# When index_scale is 1.0 the returned random variable has expected norm = 1.
index_scale: float = 1.0
def __call__(self, key: chex.PRNGKey) -> base.Index:
return self.index_scale / jnp.sqrt(self.index_dim) * jax.random.normal(
key, shape=[self.index_dim])
@dataclasses.dataclass
class GaussianWithUnitIndexer(base.EpistemicIndexer):
"""Produces index (1, z) for z dimension=index_dim-1 unit ball."""
index_dim: int
@property
def mean_index(self) -> chex.Array:
return jnp.append(1, jnp.zeros(self.index_dim - 1))
def __call__(self, key: chex.PRNGKey) -> base.Index:
return jnp.append(1, jax.random.normal(
key, shape=[self.index_dim - 1]) / jnp.sqrt(self.index_dim - 1))
@dataclasses.dataclass
class DirichletIndexer(base.EpistemicIndexer):
"""Samples a Dirichlet index with parameter alpha."""
alpha: chex.Array
def __call__(self, key: chex.PRNGKey) -> base.Index:
return jax.random.dirichlet(key, self.alpha)
| enn-master | enn/networks/indexers.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Network definitions for LeNet5."""
from typing import Sequence
from absl import logging
import chex
from enn.networks import base as networks_base
from enn.networks import ensembles
import haiku as hk
import jax
_LeNet5_CHANNELS = (6, 16, 120)
class LeNet5(hk.Module):
"""VGG Network with batchnorm and without maxpool."""
def __init__(self,
num_output_classes: int,
lenet_output_channels: Sequence[int] = _LeNet5_CHANNELS,):
super().__init__()
logging.info('Initializing a LeNet5.')
self._output_channels = lenet_output_channels
num_channels = len(self._output_channels)
self._conv_modules = [
hk.Conv2D( # pylint: disable=g-complex-comprehension
output_channels=self._output_channels[i],
kernel_shape=5,
padding='SAME',
name=f'conv_2d_{i}') for i in range(num_channels)
]
self._mp_modules = [
hk.MaxPool( # pylint: disable=g-complex-comprehension
window_shape=2, strides=2, padding='SAME',
name=f'max_pool_{i}') for i in range(num_channels)
]
self._flatten_module = hk.Flatten()
self._linear_module = hk.Linear(84, name='linear')
self._logits_module = hk.Linear(num_output_classes, name='logits')
def __call__(self, inputs: chex.Array) -> chex.Array:
net = inputs
for conv_layer, mp_layer in zip(self._conv_modules, self._mp_modules):
net = conv_layer(net)
net = jax.nn.relu(net)
net = mp_layer(net)
net = self._flatten_module(net)
net = self._linear_module(net)
net = jax.nn.relu(net)
return self._logits_module(net)
class EnsembleLeNet5ENN(networks_base.EnnArray):
"""Ensemble of LeNet5 Networks created using einsum ensemble."""
def __init__(self,
num_output_classes: int,
num_ensemble: int = 1,):
def net_fn(x: chex.Array) -> chex.Array:
return LeNet5(num_output_classes)(x)
transformed = hk.without_apply_rng(hk.transform_with_state(net_fn))
enn = ensembles.EnsembleWithState(transformed, num_ensemble)
super().__init__(enn.apply, enn.init, enn.indexer)
| enn-master | enn/networks/lenet.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility functions for networks."""
from typing import Callable, Optional, Tuple
import chex
from enn import base
from enn.networks import base as networks_base
import haiku as hk
import jax.numpy as jnp
def parse_net_output(net_out: networks_base.Output) -> chex.Array:
"""Convert network output to scalar prediction value."""
if isinstance(net_out, networks_base.OutputWithPrior):
return net_out.preds
else:
return net_out
def parse_to_output_with_prior(
net_out: networks_base.Output) -> networks_base.OutputWithPrior:
"""Convert network output to networks_base.OutputWithPrior."""
if isinstance(net_out, networks_base.OutputWithPrior):
return net_out
else:
return networks_base.OutputWithPrior(
train=net_out, prior=jnp.zeros_like(net_out))
def epistemic_network_from_module(
enn_ctor: Callable[[], networks_base.EpistemicModule],
indexer: base.EpistemicIndexer,
) -> networks_base.EnnArray:
"""Convert an Enn module to epistemic network with paired index."""
def enn_fn(inputs: chex.Array,
index: base.Index) -> networks_base.Output:
return enn_ctor()(inputs, index)
transformed = hk.without_apply_rng(hk.transform_with_state(enn_fn))
return networks_base.EnnArray(transformed.apply, transformed.init, indexer)
# TODO(author4): Sort out issues with importing the function in networks/__init__.
def wrap_net_fn_as_enn(
net_fn: Callable[[base.Input], base.Output], # pre-transformed
) -> base.EpistemicNetwork[base.Input, base.Output]:
"""Wrap a pre-transformed function as an ENN with dummy index.
Args:
net_fn: A pre-transformed net function y = f(x). We assume that the network
doesn't use rng during apply internally.
Returns:
An ENN that wraps around f(x) with a dummy indexer.
"""
transformed = hk.without_apply_rng(hk.transform_with_state(net_fn))
def apply(
params: hk.Params,
state: hk.State,
inputs: base.Input,
index: base.Index,
) -> Tuple[base.Output, hk.State]:
del index
return transformed.apply(params, state, inputs)
return base.EpistemicNetwork[base.Input, base.Output](
apply=apply,
init=lambda k, x, z: transformed.init(k, x),
indexer=lambda k: k,
)
def wrap_transformed_as_enn_no_state(
transformed: hk.Transformed) -> networks_base.EnnNoState:
"""Wraps a simple transformed function y = f(x) as an ENN."""
return networks_base.EnnNoState(
apply=lambda params, x, z: transformed.apply(params, x),
init=lambda key, x, z: transformed.init(key, x),
indexer=lambda key: key,
)
def wrap_transformed_as_enn(
transformed: hk.Transformed
) -> networks_base.EnnArray:
"""Wraps a simple transformed function y = f(x) as an ENN."""
apply = lambda params, x, z: transformed.apply(params, x)
apply = wrap_apply_no_state_as_apply(apply)
init = lambda key, x, z: transformed.init(key, x)
init = wrap_init_no_state_as_init(init)
return networks_base.EnnArray(
apply=apply,
init=init,
indexer=lambda key: key,
)
def wrap_enn_no_state_as_enn(
enn: networks_base.EnnNoState
) -> networks_base.EnnArray:
"""Wraps a standard ENN as an ENN with a dummy network state."""
return networks_base.EnnArray(
apply=wrap_apply_no_state_as_apply(enn.apply),
init=wrap_init_no_state_as_init(enn.init),
indexer=enn.indexer,
)
def wrap_enn_as_enn_no_state(
enn: networks_base.EnnArray,
constant_state: Optional[hk.State] = None,
) -> networks_base.EnnNoState:
"""Passes a dummy state to ENN with state as an ENN."""
if constant_state is None:
constant_state = {}
def init(key: chex.PRNGKey, x: chex.Array, z: base.Index) -> hk.Params:
params, unused_state = enn.init(key, x, z)
return params
def apply(
params: hk.Params, x: chex.Array, z: base.Index) -> networks_base.Output:
output, unused_state = enn.apply(params, constant_state, x, z)
return output
return networks_base.EnnNoState(
apply=apply,
init=init,
indexer=enn.indexer,
)
def wrap_apply_no_state_as_apply(
apply: networks_base.ApplyNoState,) -> networks_base.ApplyArray:
"""Wraps a legacy enn apply as an apply for enn with state."""
def new_apply(
params: hk.Params,
unused_state: hk.State,
inputs: chex.Array,
index: base.Index,
) -> Tuple[networks_base.Output, hk.State]:
return (apply(params, inputs, index), {})
return new_apply
def wrap_init_no_state_as_init(
init: networks_base.InitNoState) -> networks_base.InitArray:
"""Wraps a legacy enn init as an init for enn with state."""
def new_init(
key: chex.PRNGKey,
inputs: chex.Array,
index: base.Index,
) -> Tuple[hk.Params, hk.State]:
return (init(key, inputs, index), {})
return new_init
def scale_enn_output(
enn: networks_base.EnnArray,
scale: float,
) -> networks_base.EnnArray:
"""Returns an ENN with output scaled by a scaling factor."""
def scaled_apply(
params: hk.Params, state: hk.State, inputs: chex.Array,
index: base.Index) -> Tuple[networks_base.Output, hk.State]:
out, state = enn.apply(params, state, inputs, index)
if isinstance(out, networks_base.OutputWithPrior):
scaled_out = networks_base.OutputWithPrior(
train=out.train * scale,
prior=out.prior * scale,
extra=out.extra,
)
else:
scaled_out = out * scale
return scaled_out, state
return networks_base.EnnArray(
apply=scaled_apply,
init=enn.init,
indexer=enn.indexer,
)
def make_centered_enn_no_state(
enn: networks_base.EnnNoState,
x_train: chex.Array) -> networks_base.EnnNoState:
"""Returns an ENN that centers input according to x_train."""
assert x_train.ndim > 1 # need to include a batch dimension
x_mean = jnp.mean(x_train, axis=0)
x_std = jnp.std(x_train, axis=0)
def centered_apply(params: hk.Params, x: chex.Array,
z: base.Index) -> networks_base.Output:
normalized_x = (x - x_mean) / (x_std + 1e-9)
return enn.apply(params, normalized_x, z)
return networks_base.EnnNoState(centered_apply, enn.init, enn.indexer)
def make_centered_enn(
enn: networks_base.EnnArray,
x_train: chex.Array) -> networks_base.EnnArray:
"""Returns an ENN that centers input according to x_train."""
assert x_train.ndim > 1 # need to include a batch dimension
x_mean = jnp.mean(x_train, axis=0)
x_std = jnp.std(x_train, axis=0)
def centered_apply(params: hk.Params, state: hk.State, x: chex.Array,
z: base.Index) -> networks_base.Output:
normalized_x = (x - x_mean) / (x_std + 1e-9)
return enn.apply(params, state, normalized_x, z) # pytype: disable=bad-return-type # numpy-scalars
return networks_base.EnnArray(centered_apply, enn.init, enn.indexer) # pytype: disable=wrong-arg-types # numpy-scalars
| enn-master | enn/networks/utils.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementing some types of epistemic neural networks in JAX."""
from typing import Sequence
import chex
from enn import base
from enn.networks import base as networks_base
from enn.networks import indexers
from enn.networks import priors
from enn.networks import utils as network_utils
import haiku as hk
import jax
import jax.numpy as jnp
class ConcatIndexMLP(networks_base.EpistemicModule):
"""An MLP that has an d-dimensional index concatenated to every layer."""
# TODO(author2): Rationalize the type behaviour of network outputs.
def __init__(self,
output_sizes: Sequence[int],
index_dim: int,
variance_dim: int,
name: str = 'concat_index_mlp'):
super().__init__(name=name)
self.index_dim = index_dim
self.hiddens = output_sizes[:-1]
self.output_dim = output_sizes[-1]
self.variance_dim = variance_dim
def __call__(self, inputs: chex.Array,
index: base.Index) -> networks_base.OutputWithPrior:
"""Index must be of shape (index_dim,) intended to be Gaussian."""
batch_size = inputs.shape[0]
batched_index = jnp.repeat(jnp.expand_dims(index, 0), batch_size, axis=0)
flat_inputs = hk.Flatten()(inputs)
out = flat_inputs
out_no_index = out
for hidden in self.hiddens:
out_no_index = out
out = hk.Linear(hidden)(jnp.concatenate([out, batched_index], axis=1))
out = jax.nn.relu(out)
# Make the variance predictor
input_projection = hk.nets.MLP(
[self.variance_dim], activate_final=True)(flat_inputs)
var_embedding = jnp.concatenate([out_no_index, input_projection], axis=1)
var_pred = hk.nets.MLP([self.variance_dim, self.output_dim])(var_embedding)
out = hk.Linear(self.output_dim)(out)
return networks_base.OutputWithPrior(
train=out,
prior=jnp.zeros_like(out),
extra={'log_var': var_pred},
)
class IndexMLPWithGpPrior(networks_base.EnnArray):
"""An Index MLP with GP prior as an ENN."""
def __init__(self,
output_sizes: Sequence[int],
input_dim: int,
num_prior: int,
num_feat: int,
variance_dim: int = 20,
gamma: priors.GpGamma = 1.,
prior_scale: float = 1,
seed: int = 0):
"""An Index MLP with GP prior as an ENN."""
rng = hk.PRNGSequence(seed)
def net_fn(x, z):
net = ConcatIndexMLP(
output_sizes=output_sizes,
index_dim=num_prior+1,
variance_dim=variance_dim,
)
return net(x, z)
transformed = hk.without_apply_rng(hk.transform(net_fn))
output_dim = output_sizes[-1]
prior_fns = [priors.make_null_prior(output_dim)]
for _ in range(num_prior):
prior_fns.append(priors.make_random_feat_gp(
input_dim, output_dim, num_feat, next(rng), gamma))
def apply(params: hk.Params, inputs: chex.Array,
index: base.Index) -> networks_base.OutputWithPrior:
"""Forward the SpecialMLP and also the prior network with index."""
net_out = transformed.apply(params, inputs, index)
all_priors = [prior(inputs) for prior in prior_fns]
prior_fn = jnp.sum(jnp.stack(all_priors, axis=-1) * index, axis=-1)
return net_out._replace(prior=prior_scale * prior_fn)
# TODO(author3): Change transformed above to work with state.
apply = network_utils.wrap_apply_no_state_as_apply(apply)
init = network_utils.wrap_init_no_state_as_init(transformed.init)
super().__init__(
apply=apply,
init=init,
indexer=indexers.GaussianWithUnitIndexer(num_prior + 1),
)
class IndexMLPEnn(networks_base.EnnArray):
"""An MLP with index appended to each layer as ENN."""
def __init__(self,
output_sizes: Sequence[int],
index_dim: int,
variance_dim: int = 20):
"""An MLP with index appended to each layer as ENN."""
enn = network_utils.epistemic_network_from_module(
enn_ctor=lambda: ConcatIndexMLP(output_sizes, index_dim, variance_dim),
indexer=indexers.GaussianWithUnitIndexer(index_dim),
)
super().__init__(enn.apply, enn.init, enn.indexer)
| enn-master | enn/networks/index_mlp.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementing some mechanisms for prior functions with ENNs in JAX."""
import dataclasses
from typing import Callable, Iterable, Optional, Tuple, Union
from absl import logging
import chex
from enn import base
from enn.networks import base as networks_base
from enn.networks import utils as network_utils
import haiku as hk
import jax
import jax.numpy as jnp
PriorFn = Callable[[chex.Array, base.Index], chex.Array]
class EnnNoStateWithAdditivePrior(networks_base.EnnNoState):
"""Create an ENN with additive prior_fn applied to outputs."""
def __init__(self,
enn: networks_base.EnnNoState,
prior_fn: PriorFn,
prior_scale: float = 1.):
"""Create an ENN with additive prior_fn applied to outputs."""
enn_state = network_utils.wrap_enn_no_state_as_enn(enn)
enn_state_p = EnnWithAdditivePrior(enn_state, prior_fn, prior_scale)
enn_prior = network_utils.wrap_enn_as_enn_no_state(enn_state_p)
super().__init__(enn_prior.apply, enn_prior.init, enn_prior.indexer)
class EnnWithAdditivePrior(networks_base.EnnArray):
"""Create an ENN with additive prior_fn applied to outputs."""
def __init__(self,
enn: networks_base.EnnArray,
prior_fn: PriorFn,
prior_scale: float = 1.):
"""Create an ENN with additive prior_fn applied to outputs."""
def apply_fn(
params: hk.Params,
state: hk.State,
inputs: chex.Array,
index: base.Index
) -> Tuple[networks_base.OutputWithPrior, hk.State]:
net_out, state_out = enn.apply(params, state, inputs, index)
net_out = network_utils.parse_to_output_with_prior(net_out)
prior = prior_scale * prior_fn(inputs, index)
out = net_out._replace(prior=net_out.prior + prior)
return out, state_out
super().__init__(
apply=apply_fn,
init=enn.init,
indexer=enn.indexer,
)
def convert_enn_to_prior_fn(enn: networks_base.EnnArray,
dummy_input: chex.Array,
key: chex.PRNGKey) -> PriorFn:
"""Converts an ENN to prior function for fixed prior params."""
index_key, init_key, _ = jax.random.split(key, 3)
index = enn.indexer(index_key)
prior_params, prior_state = enn.init(init_key, dummy_input, index)
def prior_fn(x: chex.Array, z: base.Index) -> chex.Array:
output, unused_state = enn.apply(prior_params, prior_state, x, z)
return network_utils.parse_net_output(output)
return prior_fn
def make_null_prior(
output_dim: int) -> Callable[[chex.Array], chex.Array]:
def null_prior(inputs: chex.Array) -> chex.Array:
return jnp.zeros([inputs.shape[0], output_dim], dtype=jnp.float32)
return null_prior
# Configure GP kernel via float=gamma or uniform between gamma_min, gamma_max.
GpGamma = Union[float, Tuple[float, float]]
def _parse_gamma(gamma: GpGamma, num_feat: int,
key: chex.PRNGKey) -> Union[float, chex.Array]:
if isinstance(gamma, float) or isinstance(gamma, int):
return float(gamma)
else:
gamma_min, gamma_max = gamma
return gamma_min + (gamma_max - gamma_min) * jax.random.uniform(
key, shape=[1, num_feat, 1])
def make_random_feat_gp(
input_dim: int,
output_dim: int,
num_feat: int,
key: chex.PRNGKey,
gamma: GpGamma = 1.,
scale: float = 1.,
) -> Callable[[chex.Array], chex.Array]:
"""Generate a random features GP realization via random features.
This is based on the "random kitchen sink" approximation from Rahimi,Recht.
See blog post/paper: http://www.argmin.net/2017/12/05/kitchen-sinks/.
Args:
input_dim: dimension of input.
output_dim: dimension of output.
num_feat: number of random features used to approximate GP.
key: jax random number key.
gamma: gaussian kernel variance = gamma^2 (higher = more wiggle).
If you pass a tuple we generate uniform between gamma_min, gamma_max.
scale: scale of the output in each dimension.
Returns:
A callable gp_instance: inputs -> outputs in jax.
"""
weights_key, bias_key, alpha_key, gamma_key = jax.random.split(key, num=4)
weights = jax.random.normal(
weights_key, shape=[num_feat, input_dim, output_dim])
bias = 2 * jnp.pi * jax.random.uniform(
bias_key, shape=[1, num_feat, output_dim])
alpha = jax.random.normal(alpha_key, shape=[num_feat]) / jnp.sqrt(num_feat)
gamma = _parse_gamma(gamma, num_feat, gamma_key)
def gp_instance(inputs: chex.Array) -> chex.Array:
"""Assumes one batch dimension and flattens input to match that."""
flat_inputs = jax.vmap(jnp.ravel)(inputs)
input_embedding = jnp.einsum('bi,kio->bko', flat_inputs, weights)
random_feats = jnp.cos(gamma * input_embedding + bias)
return scale * jnp.einsum('bko,k->bo', random_feats, alpha)
return gp_instance
def get_random_mlp_with_index(x_sample: chex.Array,
z_sample: chex.Array,
rng: chex.PRNGKey,
prior_output_sizes: Optional[
Iterable[int]] = None,
prior_weight_std: float = 3,
prior_bias_std: float = 0.1) -> PriorFn:
"""Construct a prior func f(x, z) based on a random MLP.
The returned function assumes the data input, x, to include a batch dimension
but the index input, z, to not include a batch dimension.
Args:
x_sample: a sample data input.
z_sample: a sample index input.
rng: PRNG key.
prior_output_sizes: output sizes for the MLP.
prior_weight_std: unscaled std of the random weights. The actual std is
scaled by 1/sqrt{n} where n is the fan-in (truncated noraml at 2 sigma).
prior_bias_std: std of the random biases (truncated noraml at 2 sigma).
Returns:
a random function of two inputs x, and z.
"""
if prior_output_sizes is None:
prior_output_sizes = [10, 10, 1]
def net_fn(x: chex.Array, z: chex.Array):
# repeat z along the batch dimension of x.
z = jnp.tile(z, reps=(x.shape[0], 1))
xz = jnp.concatenate([x, z], axis=1)
mlp = hk.nets.MLP(
prior_output_sizes,
w_init=hk.initializers.VarianceScaling(scale=prior_weight_std),
b_init=hk.initializers.TruncatedNormal(stddev=prior_bias_std))
return mlp(xz)
transformed_fn = hk.without_apply_rng(hk.transform(net_fn))
params = transformed_fn.init(rng, x_sample, z_sample)
return lambda x, z: transformed_fn.apply(params, x, z)
# TODO(author2): Forming Modules with Prior is prone to bugs as the "prior"
# parameters will get returned in the init. In general, you should try and
# use the EnnNoStateWithAdditivePrior above instead.
WARN = ('WARNING: prior parameters will be included as hk.Params for module.'
'If possible, you should use EnnNoStateWithAdditivePrior instead.')
@dataclasses.dataclass
class NetworkWithAdditivePrior(hk.Module):
"""Combines network and a prior using a specified function."""
net: hk.Module
prior_net: hk.Module
prior_scale: float = 1.
def __call__(self, *args, **kwargs) -> chex.Array:
logging.warning(WARN)
prior = jax.lax.stop_gradient(self.prior_net(*args, **kwargs)) # pytype:disable=not-callable
net_out = self.net(*args, **kwargs) # pytype:disable=not-callable
return net_out + prior * self.prior_scale
| enn-master | enn/networks/priors.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementing Dropout as an ENN in JAX."""
from typing import List, Optional, Sequence
import chex
from enn import base
from enn.networks import base as networks_base
from enn.networks import indexers
from enn.networks import utils as network_utils
import haiku as hk
import jax
import jax.numpy as jnp
def cummulative_sum(x):
"""Returns the cummulative sum of elements of a list."""
# Create a copy of list x.
output = x[:]
for i in range(len(output) - 1):
output[i + 1] += output[i]
return output
def generate_masks(key: chex.PRNGKey, input_size: int,
output_sizes: Sequence[int],
dropout_rate: float) -> List[jnp.ndarray]:
"""Generates masks for nodes of the network excluding nodes of the last layer."""
# Create a list of num nodes per layer, ignoring the output layer.
num_nodes_layers = [input_size,] + list(output_sizes[:-1])
keep_rate = 1.0 - dropout_rate
# Create an array of masks, one mask for each nodes of the network, excluding
# nodes of the last layer.
masks = jax.random.bernoulli(
key, keep_rate, shape=[sum(num_nodes_layers)])
masks /= keep_rate
# Create a list where each element is an array of masks for one layer.
# TODO(author3): Use jnp.cumsum instead of cummulative_sum
masks = jnp.split(masks, cummulative_sum(num_nodes_layers))
return masks
class MLPDropoutENN(networks_base.EnnArray):
"""MLP with dropout as an ENN."""
def __init__(
self,
output_sizes: Sequence[int],
dropout_rate: float = 0.05,
dropout_input: bool = True,
seed: int = 0,
nonzero_bias: bool = True,
w_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None
):
"""MLP with dropout as an ENN."""
def enn_fn(inputs: chex.Array,
z: base.Index) -> networks_base.Output:
assert inputs.ndim == 2
unused_batch, input_size = inputs.shape
x = hk.Flatten()(inputs)
# Generate a list of masks, one for each network layer, exclduing the
# output layer
masks = generate_masks(
key=z,
input_size=input_size,
output_sizes=output_sizes,
dropout_rate=dropout_rate)
# Note that we consider a dropout layer after the input to be
# consistent with the paper "Dropout as a Bayesian Approximation:
# Representing Model Uncertainty in Deep Learning" (2015),
# https://github.com/yaringal/DropoutUncertaintyExps/blob/master/net/net.py
if dropout_input:
mask = masks[0]
x = jnp.einsum('bi,i->bi', x, mask)
for layer_index, output_size in enumerate(output_sizes):
if layer_index == 0:
if nonzero_bias:
b_init_0 = hk.initializers.TruncatedNormal(
stddev=(1. / jnp.sqrt(input_size)))
else:
b_init_0 = b_init
x = hk.Linear(output_size, w_init=w_init, b_init=b_init_0)(x)
else:
x = hk.Linear(output_size, w_init=w_init, b_init=b_init)(x)
if layer_index < len(output_sizes) - 1:
mask = masks[layer_index + 1]
x = jnp.einsum('bi,i->bi', x, mask)
x = jax.nn.relu(x)
return x
# Note that our enn_fn is stochastic because we generate masks in it. But,
# since we pass a chex.PRNGKey directly to it, we can still wrap transformed
# function with hk.without_apply_rng.
transformed = hk.without_apply_rng(hk.transform(enn_fn))
# We use a simple indexer which is basically an identity map.
indexer = indexers.PrngIndexer()
# Apply function for enn_fn requires a rng key to generate masks. We use
# the index z in f(x,z) as the rng key.
def apply(params: hk.Params, x: chex.Array,
z: base.Index) -> networks_base.Output:
net_out = transformed.apply(params, x, z)
return net_out
apply = network_utils.wrap_apply_no_state_as_apply(apply)
init = network_utils.wrap_init_no_state_as_init(transformed.init)
super().__init__(apply, init, indexer)
| enn-master | enn/networks/dropout.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementing a Gaussian ENN in JAX.
This model came out of brainstorming with who/benvanroy, who/iosband.
Each linear unit in the neural net is augmented:
Wx + b --> Wx + b + c Z, for Z ~ N(0, 1)
You are adding a learnable bias for each segment of
This is an implementation framing that network as an ENN.
"""
from typing import Callable, Sequence
import chex
from enn.networks import base as networks_base
from enn.networks import indexers
from enn.networks import utils as network_utils
import haiku as hk
import haiku.experimental as hke
import jax
import jax.numpy as jnp
def _is_linear_bias(context: hke.ParamContext):
return (context.full_name.endswith('/b')
and isinstance(context.module, hk.Linear))
def make_enn_creator(init_scale: float = 1.):
"""Make enn_creator initializing c unit to init_scale."""
custom_init = lambda shape, dtype: init_scale * jnp.ones(shape, dtype)
def enn_creator(next_creator, shape, dtype, init, context):
"""Create gaussian enn linear layer."""
# TODO(author2): How to import hk._src.base types correctly?
if _is_linear_bias(context): # Append gaussian bias term
standard_bias = next_creator(shape, dtype, init)
gaussian_bias = next_creator(shape, dtype, custom_init)
return standard_bias, gaussian_bias
else: # Return the usual creator
return next_creator(shape, dtype, init)
return enn_creator
def enn_getter(next_getter, value, context):
"""Get variables for gaussian enn linear layer."""
# TODO(author2): How to import hk._src.base types correctly?
if _is_linear_bias(context):
standard_bias = next_getter(value[0])
gaussian_bias = next_getter(value[1])
noise = jax.random.normal(hk.next_rng_key(), standard_bias.shape)
return standard_bias + gaussian_bias * noise
else:
return next_getter(value)
class GaussianNoiseEnn(networks_base.EnnArray):
"""GaussianNoiseEnn from callable module."""
def __init__(self,
module_ctor: Callable[[], hk.Module],
init_scale: float = 1.):
"""GaussianNoiseEnn from callable module."""
enn_creator = make_enn_creator(init_scale=init_scale)
def net_fn(inputs: chex.Array) -> chex.Array:
with hke.custom_getter(enn_getter), hke.custom_creator(enn_creator):
output = module_ctor()(inputs) # pytype: disable=not-callable
return output
# TODO(author2): Note that the GaussianENN requires a rng_key in place of an
# index. Therefore we do *not* hk.without_apply_rng.
transformed = hk.transform(net_fn)
apply = lambda params, x, z: transformed.apply(params, z, x)
init = lambda rng, x, z: transformed.init(rng, x)
# TODO(author3): Change apply and init fns above to work with state.
apply = network_utils.wrap_apply_no_state_as_apply(apply)
init = network_utils.wrap_init_no_state_as_init(init)
super().__init__(apply, init, indexer=indexers.PrngIndexer(),)
class GaussianNoiseMLP(networks_base.EnnArray):
"""Gaussian Enn on a standard MLP."""
def __init__(self, output_sizes: Sequence[int], init_scale: float = 1.):
"""Gaussian Enn on a standard MLP."""
enn = GaussianNoiseEnn(lambda: hk.nets.MLP(output_sizes), init_scale)
super().__init__(enn.apply, enn.init, enn.indexer)
| enn-master | enn/networks/gaussian_enn.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Epinet ENN."""
from typing import Sequence
from absl.testing import absltest
from absl.testing import parameterized
from enn import supervised
from enn.networks import indexers
from enn.networks import mlp
from enn.networks import utils as network_utils
import haiku as hk
class EpinetTest(parameterized.TestCase):
@parameterized.product(
hiddens=[[], [10], [10, 10]],
regression=[True, False]
)
def test_exposed_mlp(self, hiddens: Sequence[int], regression: bool):
"""Test that the exposed MLP runs."""
test_experiment = supervised.make_test_experiment(regression)
output_sizes = list(hiddens) + [test_experiment.num_outputs,]
def net_fn(x):
return mlp.ExposedMLP(output_sizes)(x)
transformed = hk.without_apply_rng(hk.transform(net_fn))
enn = network_utils.wrap_transformed_as_enn(transformed)
experiment = test_experiment.experiment_ctor(enn)
experiment.train(10)
@parameterized.product(
hiddens=[[], [10, 10]],
index_dim=[1, 10],
regression=[True, False]
)
def test_projected_mlp(self,
hiddens: Sequence[int],
index_dim: int,
regression: bool):
"""Test that the projected MLP runs."""
test_experiment = supervised.make_test_experiment(regression)
def enn_ctor():
return mlp.ProjectedMLP(
hidden_sizes=hiddens,
final_out=test_experiment.num_outputs,
index_dim=index_dim,
)
enn = network_utils.epistemic_network_from_module(
enn_ctor, indexers.GaussianIndexer(index_dim))
experiment = test_experiment.experiment_ctor(enn)
experiment.train(10)
if __name__ == '__main__':
absltest.main()
| enn-master | enn/networks/mlp_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base classes for networks."""
import abc
import dataclasses
import typing as tp
import chex
from enn import base
import haiku as hk
import jax
import numpy as np
import typing_extensions
# Special Enn output with prior
class OutputWithPrior(tp.NamedTuple):
"""Output wrapper for networks with prior functions."""
train: chex.Array
prior: chex.Array = np.zeros(1)
extra: tp.Dict[str, chex.Array] = {}
@property
def preds(self) -> chex.Array:
return self.train + jax.lax.stop_gradient(self.prior)
Output = tp.Union[chex.Array, OutputWithPrior]
# Sepcializing the network definitions from base.py to work with
# 1. Array inputs
# 2. Output with priors defined above
ApplyArray = base.ApplyFn[chex.Array, Output]
InitArray = base.InitFn[chex.Array]
EnnArray = base.EpistemicNetwork[chex.Array, Output]
################################################################################
# The default network definitions above assume that the network has a state.
# Since a network might not have a state, below we provide definitions for
# Epistemic Networks without state, specialized to work with Array inputs.
class ApplyNoState(typing_extensions.Protocol):
"""Applies the ENN at given parameters, inputs, index."""
def __call__(self,
params: hk.Params,
inputs: chex.Array,
index: base.Index) -> Output:
"""Applies the ENN at given parameters, inputs, index."""
class InitNoState(typing_extensions.Protocol):
"""Initializes the ENN at given rng_key, inputs, index."""
def __call__(self,
rng_key: chex.PRNGKey,
inputs: chex.Array,
index: base.Index) -> hk.Params:
"""Initializes the ENN at given rng_key, inputs, index."""
@dataclasses.dataclass
class EnnNoState:
"""Convenient pairing of Haiku transformed function and index sampler."""
apply: ApplyNoState
init: InitNoState
indexer: base.EpistemicIndexer
class EpistemicModule(abc.ABC, hk.Module):
"""Epistemic neural network abstract base class as Haiku module."""
@abc.abstractmethod
def __call__(self, inputs: chex.Array, index: base.Index) -> Output:
"""Forwards the epsitemic network y = f(x,z)."""
| enn-master | enn/networks/base.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Refactored ResNet for easier research."""
import abc
import dataclasses
import enum
import functools
from typing import Any, Optional, Sequence
import chex
from enn.networks import base as networks_base
import haiku as hk
import jax
import jax.numpy as jnp
from typing_extensions import Protocol
class ForwardFn(Protocol):
def __call__(self,
inputs: chex.Array,
is_training: bool,
test_local_stats: bool = False) -> Any:
"""Forwards a ResNet block with appropriate defaults."""
class ResBlock(abc.ABC, hk.Module):
"""ResNet Block."""
@abc.abstractmethod
def __call__(self, # pytype: disable=signature-mismatch # overriding-default-value-checks
inputs: chex.Array,
is_training: bool,
test_local_stats: bool = False) -> Any:
"""Forwards a ResNet block."""
class ResBlockV1(ResBlock):
"""ResNet block for datasets like CIFAR10/100 with smaller input image sizes."""
def __init__(
self,
output_channels: int,
stride: int,
name: Optional[str] = None,
):
super().__init__(name=name)
# Custom constructors for batchnorm and conv
bn_ctor = functools.partial(
hk.BatchNorm, create_scale=True, create_offset=True, decay_rate=0.9)
conv_ctor = functools.partial(hk.Conv2D, padding='SAME', with_bias=False)
# Blocks of batchnorm and convolutions, with shortcut.
self.conv0 = conv_ctor(
output_channels, kernel_shape=3, stride=stride, name='conv0')
self.bn0 = bn_ctor(name='batchnorm_0')
self.conv1 = conv_ctor(
output_channels, kernel_shape=3, stride=1, name='conv1')
self.bn1 = bn_ctor(name='batchnorm_1')
if stride != 1:
width = output_channels // 4
self.shortcut = lambda x: jnp.pad(x[:, ::2, ::2, :], ( # pylint: disable=g-long-lambda
(0, 0), (0, 0), (0, 0), (width, width)), 'constant')
else:
self.shortcut = lambda x: x
def __call__(self, # pytype: disable=signature-mismatch # overriding-parameter-count-checks
inputs: chex.Array,
is_training: bool,
test_local_stats: bool) -> chex.Array:
out = shortcut = inputs
out = self.conv0(out)
out = self.bn0(out, is_training, test_local_stats)
out = jax.nn.relu(out)
out = self.conv1(out)
out = self.bn1(out, is_training, test_local_stats)
shortcut = self.shortcut(shortcut)
return jax.nn.relu(out + shortcut)
class ResBlockV2(ResBlock):
"""ResNet preactivation block, 1x1->3x3->1x1 plus shortcut.
This block is designed to be maximally simplistic and readable starting point
for variations on ResNet50. Fix bottleneck=True compared to the reference
haiku implementation.
"""
def __init__(
self,
output_channels: int,
stride: int,
use_projection: bool,
name: Optional[str] = None,
):
super().__init__(name=name)
self._use_projection = use_projection
width = output_channels // 4
# Custom constructors for batchnorm and conv
bn_ctor = functools.partial(
hk.BatchNorm, create_scale=True, create_offset=True, decay_rate=0.9)
conv_ctor = functools.partial(hk.Conv2D, padding='SAME', with_bias=False)
# Blocks of batchnorm and convolutions, with shortcut.
self.bn0 = bn_ctor(name='batchnorm_0')
self.conv0 = conv_ctor(width, kernel_shape=1, name='conv0')
self.bn1 = bn_ctor(name='batchnorm_1')
self.conv1 = conv_ctor(width, stride=stride, kernel_shape=3, name='conv1')
self.bn2 = bn_ctor(name='batchnorm_2')
self.conv2 = conv_ctor(output_channels, kernel_shape=1, name='conv2')
self.conv_shortcut = conv_ctor(
output_channels, stride=stride, kernel_shape=1, name='conv_shortcut')
def __call__(self, # pytype: disable=signature-mismatch # overriding-parameter-count-checks
inputs: chex.Array,
is_training: bool,
test_local_stats: bool) -> chex.Array:
# First layer requires special handling due to projection.
norm = jax.nn.relu(self.bn0(inputs, is_training, test_local_stats))
layer_1 = self.conv0(norm)
if self._use_projection:
shortcut = self.conv_shortcut(norm)
else:
shortcut = inputs
layer_2 = self.conv1(
jax.nn.relu(self.bn1(layer_1, is_training, test_local_stats)))
layer_3 = self.conv2(
jax.nn.relu(self.bn2(layer_2, is_training, test_local_stats)))
return layer_3 + shortcut
@dataclasses.dataclass
class ResNetConfig:
"""Configuration options for ResNet."""
channels_per_group: Sequence[int]
blocks_per_group: Sequence[int]
strides_per_group: Sequence[int]
resnet_block_version: str
def __post_init__(self):
assert len(self.channels_per_group) == self.num_groups
assert len(self.blocks_per_group) == self.num_groups
assert len(self.strides_per_group) == self.num_groups
assert self.resnet_block_version in ['V1', 'V2']
@property
def num_groups(self) -> int:
return len(self.channels_per_group)
class ResNet(hk.Module):
"""ResNet implementation designed for maximal clarity/simplicity.
Now exposes the core hidden units for easier access with epinet training.
"""
def __init__(self,
num_classes: int,
config: ResNetConfig,
name: Optional[str] = None):
super().__init__(name=name)
self.is_resnet_block_v1 = config.resnet_block_version == 'V1'
self.is_resnet_block_v2 = not self.is_resnet_block_v1
# Custom constructors for batchnorm and conv
bn_ctor = functools.partial(
hk.BatchNorm, create_scale=True, create_offset=True, decay_rate=0.9)
conv_ctor = functools.partial(hk.Conv2D, padding='SAME', with_bias=False)
if self.is_resnet_block_v1:
self.initial_conv = conv_ctor(
output_channels=16,
kernel_shape=3,
stride=1,
name='initial_conv',
)
self.initial_bn = bn_ctor(name='initial_batchnorm')
if self.is_resnet_block_v2:
self.initial_conv = conv_ctor(
output_channels=64,
kernel_shape=7,
stride=2,
name='initial_conv',
)
self.final_bn = bn_ctor(name='final_batchnorm')
# ResNet body
self.blocks = _make_resnet_blocks(config)
# ResNet head
self.final_fc = hk.Linear(num_classes, w_init=jnp.zeros, name='final_fc')
def __call__(self,
inputs: chex.Array,
is_training: bool,
test_local_stats: bool = False) -> networks_base.OutputWithPrior:
# Holds the output of hidden layers.
extra = {}
# Stem
out = self.initial_conv(inputs)
if self.is_resnet_block_v1:
out = self.initial_bn(out, is_training, test_local_stats)
out = jax.nn.relu(out)
if self.is_resnet_block_v2:
out = hk.max_pool(
out, window_shape=(1, 3, 3, 1), strides=(1, 2, 2, 1), padding='SAME')
# Body
for i, block in enumerate(self.blocks):
out = block(out, is_training, test_local_stats)
extra[f'hidden_{i}'] = out
# Head
if self.is_resnet_block_v2:
out = self.final_bn(out, is_training, test_local_stats)
out = jax.nn.relu(out)
pool = jnp.mean(out, axis=[1, 2])
extra['final_out'] = pool
logits = self.final_fc(pool)
return networks_base.OutputWithPrior(
train=logits, prior=jnp.zeros_like(logits), extra=extra)
def _make_resnet_blocks(config: ResNetConfig) -> Sequence[ResBlock]:
"""Makes a sequence of ResNet blocks based on config."""
blocks = []
for group_idx in range(config.num_groups):
for block_idx in range(config.blocks_per_group[group_idx]):
if config.resnet_block_version == 'V1':
block = ResBlockV1(
output_channels=config.channels_per_group[group_idx],
stride=config.strides_per_group[group_idx] if block_idx == 0 else 1,
)
else:
block = ResBlockV2(
output_channels=config.channels_per_group[group_idx],
stride=config.strides_per_group[group_idx] if block_idx == 0 else 1,
use_projection=block_idx == 0,
)
blocks.append(block)
return blocks
class CanonicalResNets(enum.Enum):
"""Canonical ResNet configs."""
RESNET_18: ResNetConfig = ResNetConfig(
channels_per_group=(16, 32, 64),
blocks_per_group=(2, 2, 2),
strides_per_group=(1, 2, 2),
resnet_block_version='V1',
)
RESNET_32: ResNetConfig = ResNetConfig(
channels_per_group=(16, 32, 64),
blocks_per_group=(5, 5, 5),
strides_per_group=(1, 2, 2),
resnet_block_version='V1',
)
RESNET_44: ResNetConfig = ResNetConfig(
channels_per_group=(16, 32, 64),
blocks_per_group=(7, 7, 7),
strides_per_group=(1, 2, 2),
resnet_block_version='V1',
)
RESNET_56: ResNetConfig = ResNetConfig(
channels_per_group=(16, 32, 64),
blocks_per_group=(9, 9, 9),
strides_per_group=(1, 2, 2),
resnet_block_version='V1',
)
RESNET_110: ResNetConfig = ResNetConfig(
channels_per_group=(16, 32, 64),
blocks_per_group=(18, 18, 18),
strides_per_group=(1, 2, 2),
resnet_block_version='V1',
)
RESNET_50: ResNetConfig = ResNetConfig(
channels_per_group=(256, 512, 1024, 2048),
blocks_per_group=(3, 4, 6, 3),
strides_per_group=(1, 2, 2, 2),
resnet_block_version='V2',
)
RESNET_101: ResNetConfig = ResNetConfig(
channels_per_group=(256, 512, 1024, 2048),
blocks_per_group=(3, 4, 23, 3),
strides_per_group=(1, 2, 2, 2),
resnet_block_version='V2',
)
RESNET_152: ResNetConfig = ResNetConfig(
channels_per_group=(256, 512, 1024, 2048),
blocks_per_group=(3, 8, 36, 3),
strides_per_group=(1, 2, 2, 2),
resnet_block_version='V2',
)
RESNET_200: ResNetConfig = ResNetConfig(
channels_per_group=(256, 512, 1024, 2048),
blocks_per_group=(3, 24, 36, 3),
strides_per_group=(1, 2, 2, 2),
resnet_block_version='V2',
)
| enn-master | enn/networks/resnet/lib.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""ResNet."""
| enn-master | enn/networks/resnet/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.