python_code
stringlengths
0
780k
repo_name
stringlengths
7
38
file_path
stringlengths
5
103
# Copyright 2018 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Variational Dropout.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from sonnet.python.modules import base as snt_base import tensorflow.compat.v1 as tf import tensorflow_probability as tfp from tensorflow.contrib import util as contrib_util class Dropout(snt_base.AbstractModule): """Possibly variational dropout.""" def __init__(self, keep_prob, share_mask=True, scaler=1.0, name='dropout'): super(Dropout, self).__init__(name=name) self._keep_prob = keep_prob self._keep_mask = None self._share_mask = share_mask self._scaler = scaler def _ensure_keep_mask(self, x): if self._keep_mask is None or not self._share_mask: shape = tf.shape(x) noise = tf.random_uniform(shape, dtype=x.dtype) self._keep_mask = (tf.floor(self._keep_prob + noise) * (self._scaler / self._keep_prob)) self._keep_mask.set_shape(x.get_shape()) return self._keep_mask def _build(self, x): if contrib_util.constant_value(self._keep_prob) == 1: return x else: return x * self._ensure_keep_mask(x) class GaussianDropout(snt_base.AbstractModule): """Possibly variational dropout.""" def __init__(self, keep_prob, share_mask=True, scaler=1.0, name='dropout'): super(GaussianDropout, self).__init__(name=name) self._keep_prob = keep_prob self._keep_mask = None self._share_mask = share_mask self._scaler = scaler def _ensure_keep_mask(self, x): if self._keep_mask is None or not self._share_mask: shape = tf.shape(x) # Calculate the stddev for the normal distribution that # matches the stddev of the bernoulli with p=keep_prob. stddev = tf.sqrt((1 - self._keep_prob) / self._keep_prob) self._keep_mask = tf.random_normal(shape, mean=1.0, stddev=stddev, dtype=x.dtype) self._keep_mask.set_shape(x.get_shape()) return self._keep_mask def _build(self, x): if contrib_util.constant_value(self._keep_prob) == 1: return x else: return x * self._ensure_keep_mask(x) class DirichletDropout(snt_base.AbstractModule): """Possibly variational dropout.""" def __init__(self, keep_prob, share_mask=True, scaler=1.0, name='dropout'): super(DirichletDropout, self).__init__(name=name) self._keep_prob = keep_prob self._keep_mask = None self._share_mask = share_mask self._scaler = scaler def _ensure_keep_mask(self, x): if self._keep_mask is None or not self._share_mask: shape = tf.shape(x) k = shape[1] # To make this class a drop-in replacement for bernoulli dropout we # paramaterize it with keep_prob. Set alpha of the dirichlet so that the # variance is equal to the variance of the bernoulli with p=keep_prob # divided by keep_prob. # Now the variance of the dirichlet with k equal alphas is # (k-1)/(k^2(k*alpha+1). Solve that for alpha. kf = tf.cast(k, tf.float32) alpha = self._keep_prob * (kf - 1.0) / ((1-self._keep_prob)*kf) - 1.0/kf dist = tfp.distributions.Dirichlet(tf.ones(shape=k) * alpha) assert (dist.reparameterization_type == tfp.distributions.FULLY_REPARAMETERIZED) # The E[dir(alpha)] = 1/k for all elements, but we want the expectation to # be keep_prob, hence the multiplication. self._keep_mask = kf * dist.sample(shape[0]) self._keep_mask.set_shape(x.get_shape()) return self._keep_mask def _build(self, x): if contrib_util.constant_value(self._keep_prob) == 1: return x else: return tf.cond(tf.equal(self._keep_prob, 1.0), lambda: x, lambda: x * self._ensure_keep_mask(x)) class DriftingDropout(snt_base.AbstractModule): """Dropout with gradually changing mask.""" def __init__(self, keep_prob, flip_prob=0.0, scaler=1.0, name='dropout'): super(DriftingDropout, self).__init__(name=name) self._keep_prob = keep_prob self._flip_prob = flip_prob self._scaler = scaler self._time_step = 0 def _build(self, x, state): prev_keep_mask = state shape = tf.shape(x) noise = tf.random_uniform(shape, dtype=x.dtype) other_mask = tf.floor(self._keep_prob + noise) choice_noise = tf.random_uniform(shape, dtype=x.dtype) choice = tf.less(choice_noise, self._flip_prob) # KLUDGE(melisgl): The client has to pass the last keep_mask from # a batch to the next so the mask may end up next to some # recurrent cell state. This state is often zero at the beginning # and may be periodically zeroed (per example) during training. # While zeroing LSTM state is okay, zeroing the dropout mask is # not. So instead of forcing every client to deal with this common # (?) case, if an all zero mask is detected, then regenerate a # fresh mask. This is of course a major hack and won't help with # learnt initial states, for example. sum_ = tf.reduce_sum(prev_keep_mask, 1, keepdims=True) is_initializing = tf.equal(sum_, 0.0) self._keep_mask = tf.where(tf.logical_or(choice, is_initializing), other_mask, prev_keep_mask) self._time_step += 1 return x * self._keep_mask / self._keep_prob * self._scaler
lamb-master
lamb/dropout.py
# Copyright 2018 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Dynamic evaluation.""" # pylint: disable=missing-docstring # pylint: disable=g-complex-comprehension from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf class Dyneval(object): def __init__(self, grads_and_vars, learning_rate, decay_rate, epsilon): with tf.variable_scope('dyneval'): # convert_to_tensor densifies IndexedSlices self._grads = [tf.convert_to_tensor(grad) for grad, _ in grads_and_vars] self._vars = [var for _, var in grads_and_vars] self._learning_rate = learning_rate self._decay_rate = decay_rate def shadow_vars(): return [ tf.get_variable( var.name.replace('/', '-').replace(':', '-'), var.get_shape(), initializer=tf.zeros_initializer(), trainable=False) for var in self._vars] with tf.variable_scope('save'): self._saves = shadow_vars() with tf.variable_scope('sum_squared_grads'): self._sum_squared_grads = shadow_vars() self._save = self._make_save() self._restore = self._make_restore() # These are for computing an RMSProplike estimate of the variance of # minibatch gradients. Here, this quantity is estimated on the training # set once, while gradient descent happens on validation/test. self._num_squared_grads = tf.get_variable( 'num_squared_grads', [], initializer=tf.zeros_initializer(), trainable=False) self._zero_sum_squared_grads = self._make_zero_sum_squared_grads() self._add_squared_grads = self._make_add_squared_grads() self._epsilon = epsilon self._update = self._make_update() def _make_save(self): assignments = [] for save, var in zip(self._saves, self._vars): assignments.append(save.assign(var)) return tf.group(assignments) def _make_restore(self): assignments = [] for save, var in zip(self._saves, self._vars): assignments.append(var.assign(save)) return tf.group(assignments) def _make_update(self): mss = [] gsum = 0.0 count = 0 for sum_squared_grads in self._sum_squared_grads: ms = tf.sqrt(sum_squared_grads / self._num_squared_grads) gsum += tf.reduce_sum(ms) count += tf.reduce_sum(tf.ones_like(ms)) mss.append(ms) gsum = gsum / count assignments = [] for grad, var, save, sum_squared_grads, ms in zip( self._grads, self._vars, self._saves, self._sum_squared_grads, mss): decay_rate = tf.minimum(1.0, self._decay_rate*(ms/gsum)) delta = (-self._learning_rate*grad / (ms + self._epsilon) + decay_rate*(save-var)) assignments.append(var.assign_add(delta)) return tf.group(assignments) def _make_add_squared_grads(self): assignments = [] for sum_squared_grads, grads in zip(self._sum_squared_grads, self._grads): assignments.append(sum_squared_grads.assign_add(tf.square(grads))) return tf.group(assignments + [self._num_squared_grads.assign_add(1)]) def _make_zero_sum_squared_grads(self): assignments = [] for sum_squared_grads in self._sum_squared_grads: assignments.append(sum_squared_grads.assign( tf.zeros_like(sum_squared_grads))) return tf.group(assignments + [self._num_squared_grads.assign(0)]) def save(self): tf.get_default_session().run(self._save) def restore(self): tf.get_default_session().run(self._restore) def update_op(self): return self._update def zero_sum_squared_grads(self): tf.get_default_session().run(self._zero_sum_squared_grads) def add_squared_grads_op(self): return self._add_squared_grads def __enter__(self): self.save() def __exit__(self, type_, value, traceback): self.restore()
lamb-master
lamb/dyneval.py
# Copyright 2018 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Runner for the lamb model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import os import random import traceback from absl import app from absl import flags from absl import logging from lamb import corpus from lamb import lamb_flags from lamb import training from lamb import utils from lamb.vocab import Vocab import numpy as np import six import tensorflow.compat.v1 as tf FLAGS = flags.FLAGS class Experiment(object): """Experiment class.""" def __init__(self, config, experiment_dir, tuner): self._experiment_dir = experiment_dir self._tuner = tuner self._config = config self._training_state = None def _finalize_config(self, config, data, vocab): """Apply data dependent defaults to config.""" config.vocab_size = vocab.size() # TODO(melisgl): Separate the vocabs. config.conditioning_vocab_size = vocab.size() config.eos_index = vocab.eos_index() # Set up max_time_steps. if FLAGS.episodic: # Calculate maximum number of time steps. Add 1 for the end token. config.max_time_steps = min( config.max_time_steps, max([data['training'].max_sentence_length(), data['valid'].max_sentence_length(), data['test'].max_sentence_length()]) + 1) return config def average_metrics(self, metrics_list): n = len(metrics_list) if n > 0: average = {} for key in ['best_xe']: average[key] = sum([metrics[key] for metrics in metrics_list]) / n return average else: return None def valid_metrics(self, metrics): for value in metrics.values(): if math.isnan(value): return False return True def final_measure(self, fold_metrics): average = self.average_metrics(fold_metrics) if not self.valid_metrics(average): return None else: return average['best_xe'] def run_training(self, folds): if self._tuner: self._run_training_with_tuner(folds) else: self._run_training_without_tuner(folds) def _run_training_without_tuner(self, folds): """Simply train. Don't modify the configuration settings.""" tf.gfile.MakeDirs(self._experiment_dir) fold_metrics = [] for i, (data, vocab) in enumerate(folds): logging.info('Training on fold %d/%d', i+1, len(folds)) config = self._finalize_config(self._config, data, vocab) metrics, _ = training.train(None, data, vocab, config, self._experiment_dir, seed=FLAGS.seed + i) logging.info('Training on fold %d/%d measure: %s', i+1, len(folds), metrics) fold_metrics.append(metrics) average_metrics = self.average_metrics(fold_metrics) logging.info('Average after %d folds: %s', len(fold_metrics), average_metrics) logging.info('Crossvalidation results:') for i, metrics in enumerate(fold_metrics): logging.info('Fold %i: %s', i, metrics) def _run_training_with_tuner(self, folds): """Train and evaluate based on parameters provided from a tuner.""" try: fold_metrics = [] sum_turns = 0 for i, (data, vocab) in enumerate(folds): logging.info('Training on fold %d/%d', i+1, len(folds)) config = self._finalize_config(self._config, data, vocab) # Setup the experiment directory. exp_actual_dir = self._experiment_dir if len(folds) > 1: exp_actual_dir = os.path.join(exp_actual_dir, '_fold{}'.format(i), '') tf.gfile.MakeDirs(exp_actual_dir) # Train. metrics, turn = training.train( self._tuner, data, vocab, config, exp_actual_dir, seed=FLAGS.seed + i) logging.info('Training on fold %d/%d metrics: %s', i+1, len(folds), metrics) fold_metrics.append(metrics) sum_turns += turn # Report average measure across folds up to now to the tuner. average_metrics = self.average_metrics(fold_metrics) logging.info('Average after %d folds: %s', len(fold_metrics), average_metrics) measure = self.final_measure(fold_metrics) if measure is None: self._tuner.report_done(infeasible=True, infeasible_reason='nan') return if self._tuner.report_measure(measure, global_step=sum_turns+1, metrics=average_metrics): logging.info('Stopping due to tuner request.') break except (tf.errors.ResourceExhaustedError, # Some OOM conditions turn into internal errors. tf.errors.InternalError, # Ignore NaNs detected by clip_by_global_norm. tf.errors.InvalidArgumentError): stack_trace = traceback.format_exc() logging.warning('Reporting trial infeasible because of:\n%s', stack_trace) self._tuner.report_done(infeasible=True, infeasible_reason=stack_trace) return logging.info('Crossvalidation results:') for i, metrics in enumerate(fold_metrics): logging.info('Fold %i: %s', i, metrics) self._tuner.report_done() def _make_fold(training_corpus, valid_corpus, test_corpus): """Create a data, vocab pair.""" data = { 'training': training_corpus, 'valid': valid_corpus, 'test': test_corpus, } vocab = Vocab(data['training'].tokens()) return data, vocab def read_corpus(filename): if FLAGS.word_based: return corpus.read_word_based_corpus( filename, encoding=FLAGS.file_encoding) else: return corpus.read_character_based_corpus( filename, encoding=FLAGS.file_encoding) def main(argv, tuner=None): """Main function.""" assert argv is None or len(argv) == 1, ( 'This program expects no non-option arguments. Got {}.'.format(argv)) tf.enable_resource_variables() lamb_flags.initialize() if FLAGS.use_old_linear_names: utils._BIAS_VARIABLE_NAME = 'biases' # pylint: disable=protected-access utils._WEIGHTS_VARIABLE_NAME = 'weights' # pylint: disable=protected-access # Set seeds. The tensorflow seed is set later. random.seed(FLAGS.seed) np.random.seed(FLAGS.seed) # Load the files. assert FLAGS.training_file, 'No training file specified.' training_file_data = read_corpus(FLAGS.training_file) if FLAGS.test_file and FLAGS.eval_on_test: test_file_data = read_corpus(FLAGS.test_file) else: test_file_data = corpus.Corpus(data=[]) # Let's assemble the 'folds': training and eval set combinations, # plus the vocabulary. folds = [] def add_fold(training_corpus, eval_corpus, test_corpus): fold = _make_fold(training_corpus, eval_corpus, test_corpus) logging.info('number of examples in fold %d', len(folds)) logging.info(' training: %d', fold[0]['training'].size()) logging.info(' valid: %d', fold[0]['valid'].size()) logging.info(' test: %d', fold[0]['test'].size()) folds.append(fold) if FLAGS.crossvalidate: logging.info('Doing cross-validation.') assert FLAGS.validation_file == '' # pylint: disable=g-explicit-bool-comparison for _ in six.moves.range(FLAGS.crossvalidation_rounds): for training_set, validation_set in utils.cv_splits( training_file_data.data(), FLAGS.crossvalidation_folds): add_fold(corpus.Corpus(data=training_set), corpus.Corpus(data=validation_set), test_file_data) else: logging.info('Using dedicated eval data.') assert FLAGS.validation_file, 'No eval file specified.' validation_file_data = read_corpus(FLAGS.validation_file) add_fold(training_file_data, validation_file_data, test_file_data) experiment = Experiment(lamb_flags.get_config(), FLAGS.experiment_dir, tuner) experiment.run_training(folds=folds) if __name__ == '__main__': app.run(main)
lamb-master
lamb/main.py
# Copyright 2018 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """The training loop.""" # pylint: disable=missing-docstring from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import random import time from absl import flags from absl import logging from lamb import corpus from lamb import evaluation from lamb import lamb_flags from lamb import lm from lamb import monitoring from lamb import utils from lamb.averaged import Averaged from lamb.dyneval import Dyneval import numpy as np import six import tensorflow.compat.v1 as tf from tensorflow.contrib import framework as contrib_framework nest = contrib_framework.nest FLAGS = flags.FLAGS def _load_checkpoint(checkpoint_filename, extra_vars, trainable_only=False): if tf.gfile.IsDirectory(checkpoint_filename): checkpoint_filename = tf.train.latest_checkpoint(checkpoint_filename) logging.info('Loading checkpoint %s', checkpoint_filename) saveables = (tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) + tf.get_collection(tf.GraphKeys.SAVEABLE_OBJECTS)) if trainable_only: saveables = list(set(saveables) & set(tf.trainable_variables())) # Try to restore all saveables, if that fails try without extra_vars. try: saver = tf.train.Saver(var_list=saveables) saver.restore(tf.get_default_session(), checkpoint_filename) except (ValueError, tf.errors.NotFoundError): logging.info('Missing key in checkpoint. Trying old checkpoint format.') saver = tf.train.Saver(var_list=list(set(saveables) - set(extra_vars))) saver.restore(tf.get_default_session(), checkpoint_filename) def train(tuner, data, vocab, config, experiment_dir, seed=None): """Main training loop. Args: tuner: . data: . vocab: . config: A config object (see get_config()). experiment_dir: Path of a directory where to log training events. seed: suitable for tf.set_random_seed Returns: The second return value of _maybe_report_measure. """ if FLAGS.save_config: config.save(os.path.join(experiment_dir, 'config')) session_config = tf.ConfigProto( log_device_placement=FLAGS.log_device_placement) with tf.Graph().as_default(): tf.set_random_seed(seed) logging.info('Creating the model.') config = lamb_flags.handle_config_defaults(config, lm.LM.num_params) model = lm.LM(config) logging.info('Model created.') if FLAGS.trigger_averaging_turns >= 0: averaged = Averaged(tf.trainable_variables()) else: averaged = None # The monitor and the lr scheduler have some state that we need to # checkpoint in case of preemption. We do that by serializing them into the # graph. training_state = utils.TFSerializer('training_state') def sync_training_state_from_graph(): state = training_state.retrieve() logging.info('Loaded training state: %s', state) if state.get('monitor_state', None): monitor.set_state(state['monitor_state']) if state.get('learning_rate_state', None): lr_scheduler.set_state(state['learning_rate_state']) def sync_training_state_to_graph(): state = { # To help maintain backwards compatibility. 'state_version': 1, 'monitor_state': monitor.state(), 'learning_rate_state': lr_scheduler.state() } training_state.store(state) # Checkpoint saving. logging.info('Creating savers.') best_turn_saver = tf.train.Saver(max_to_keep=1, save_relative_paths=True) last_turn_saver = tf.train.Saver(max_to_keep=1, save_relative_paths=True) best_checkpoint_dir = os.path.join(experiment_dir, 'best/') last_checkpoint_dir = os.path.join(experiment_dir, 'last/') best_checkpoint_filename = os.path.join(best_checkpoint_dir, 'model.ckpt') last_checkpoint_filename = os.path.join(last_checkpoint_dir, 'model.ckpt') # Upon resuming from a checkpoint the saver won't count the old checkpoints # against max_to_keep. Recover its state. best_checkpoint_states = tf.train.get_checkpoint_state(best_checkpoint_dir) last_checkpoint_states = tf.train.get_checkpoint_state(last_checkpoint_dir) if best_checkpoint_states is not None: logging.info('Previous best checkpoint paths: %s', best_checkpoint_states.all_model_checkpoint_paths) best_turn_saver.recover_last_checkpoints( best_checkpoint_states.all_model_checkpoint_paths) if last_checkpoint_states is not None: logging.info('Previous last checkpoint paths: %s', last_checkpoint_states.all_model_checkpoint_paths) last_turn_saver.recover_last_checkpoints( last_checkpoint_states.all_model_checkpoint_paths) def maybe_save_checkpoint(saver, filename): if FLAGS.save_checkpoints: logging.info('Saving checkpoint %s', filename) sync_training_state_to_graph() saver.save(tf.get_default_session(), filename, global_step=model.global_step()) # Callback for monitor. def save_best_checkpoint(): maybe_save_checkpoint(best_turn_saver, best_checkpoint_filename) # Callback for train_loop. def save_last_checkpoint(): maybe_save_checkpoint(last_turn_saver, last_checkpoint_filename) # The monitor keeps track of the best result so far, does early stopping. monitor = monitoring.TrainingMonitor( max_turns=config.turns, tuner=tuner, new_best_fn=save_best_checkpoint, es_turns=FLAGS.early_stopping_turns, es_rampup_turns=FLAGS.early_stopping_rampup_turns, es_slowest_rate=FLAGS.early_stopping_slowest_rate) # Set up the learning rate scheduler lr_scheduler = monitoring.LearningRateScheduler( base_learning_rate=config.learning_rate, monitor=monitor, drop_multiplier=config.drop_learning_rate_multiplier, drop_turns=config.drop_learning_rate_turns, drop_at_turn_at_the_latest=config.drop_learning_rate_at_the_latest) with tf.Session(config=session_config) as sess: logging.info('Initializing model.') sess.run(tf.global_variables_initializer()) # Load the checkpoint specified by the user or try to resume from last. if FLAGS.load_checkpoint: checkpoint_filename = os.path.join(experiment_dir, FLAGS.load_checkpoint) _load_checkpoint(checkpoint_filename, training_state.variables(), not FLAGS.load_optimizer_state) if FLAGS.load_optimizer_state: sync_training_state_from_graph() if averaged and FLAGS.load_averaged: averaged.switch_to_average() averaged.reset() else: try: _load_checkpoint(last_checkpoint_dir, training_state.variables()) sync_training_state_from_graph() # TODO(melisgl): The training iterator state and last_state are not # saved currently. They should be, of course, but failing that random # initialization of dataset iterators ensures that there is no bias # introduced if training is repeatedly interrupted and continued from # a checkpoint. So use a random seed in this case. random.seed() np.random.seed() except (ValueError, tf.errors.NotFoundError): logging.info('Last checkpoint file %s does not exist.', last_checkpoint_filename) # Takes a lot of space. Disabled for now. # summary_writer = tf.summary.FileWriter( # experiment_dir, graph=sess.graph, # flush_secs=FLAGS.summary_flush_secs) summary_writer = None if FLAGS.dyneval: dyneval = Dyneval(model.clipped_grads_and_vars, learning_rate=FLAGS.dyneval_learning_rate, decay_rate=FLAGS.dyneval_decay_rate, epsilon=FLAGS.dyneval_epsilon) else: dyneval = None if config.turns > 0: logging.info('Starting training.') else: logging.info('Starting testing.') metrics = _train_loop( monitor, lr_scheduler, averaged, dyneval, model, data, vocab, config, summary_writer, save_last_checkpoint) logging.info('Training finished.') return metrics, monitor.turn() def _train_loop(monitor, lr_scheduler, averaged, dyneval, model, data, vocab, config, summary_writer, save_last_checkpoint_fn): source_iterator = corpus.get_batches( data['training'], vocab, config.batch_size, config.max_time_steps, num_samples=config.num_training_samples, episodic=FLAGS.episodic, deterministic=False, conditioning_separator=config.conditioning_separator) last_state = None steps_per_sec = 0.0 def munge_max_batches_flag_value(max_batches): if max_batches == -1: return None else: return max_batches def evaluate0(): # KLUDGE: This depends on monitor calling this function before using the # worst target. monitor.set_es_worst_target(es_worst_target()) global_step = model.global_step() logging.info('turn: %s (eval), step: %d (opt) (%.2f/s)', monitor.turn(), global_step, steps_per_sec) if config.accum_batch_size == -1: eval_batch_size = config.batch_size else: eval_batch_size = config.accum_batch_size training_xe, valid_xe, test_xe = evaluation.evaluate_all( model, data, vocab, eval_batch_size, config.max_time_steps, FLAGS.min_non_episodic_eval_examples_per_stripe, munge_max_batches_flag_value(FLAGS.max_training_eval_batches), munge_max_batches_flag_value(FLAGS.max_eval_eval_batches), munge_max_batches_flag_value(FLAGS.max_test_eval_batches), FLAGS.episodic, config.eval_softmax_temperature, config.eval_softmax_temperature_estimation_num_tokens, config.eval_method, config.num_eval_samples, config.eval_power_mean_power, config.eval_dropout_multiplier, config.validation_prediction_file, dyneval, conditioning_separator=config.conditioning_separator) return valid_xe, {'training_xe': training_xe, 'test_xe': test_xe, 'global_step': global_step} def evaluate(): if monitor.averaging_triggered(): with averaged: logging.info('Evaluating with averaged parameters.') return evaluate0() else: return evaluate0() def add_summary(summary_str): if summary_writer is not None: summary_writer.add_summary(summary_str, model.global_step()) def add_summaries_for_metrics(): metrics = monitor.metrics() summary = tf.Summary() for key in metrics: summary.value.add(tag=key, simple_value=metrics[key]) add_summary(summary) # Compute the early stopping worst target. It may change when the learning # rate is dropped. def es_worst_target(): if FLAGS.early_stopping_worst_xe_target is None: return -1.0 else: targets_for_lr_drops = [ float(string) for string in FLAGS.early_stopping_worst_xe_target.split(',') if string ] num_drops = lr_scheduler.num_drops() if targets_for_lr_drops: return targets_for_lr_drops[min(num_drops, len(targets_for_lr_drops)-1)] else: return None def log_summaries(summary): utils.log_scalar_summaries(summary) add_summary(summary) while monitor.next_turn(evaluate): logging.info('metrics: %r', monitor.metrics()) logging.info( 'early stopping: turns: %s, worst xe target: %s, best expected xe: %s', monitor.effective_es_turns(), monitor.es_worst_target(), monitor.best_expected_xe()) add_summaries_for_metrics() # If enough turns passed without improvement, turn on averaging. best_turn = monitor.best_xe_turn() or 0 num_tuns_since_best = monitor.turn() - best_turn if (averaged and ((monitor.turn() > 0 and num_tuns_since_best >= FLAGS.trigger_averaging_turns) or (FLAGS.trigger_averaging_at_the_latest >= 0 and monitor.turn() >= FLAGS.trigger_averaging_at_the_latest))): monitor.set_averaging_triggered(True) start_time = time.time() sum_cost = 0.0 sum_tokens = 0 for _ in range(FLAGS.steps_per_turn): cost, summary, last_state, num_tokens = train_1( model, source_iterator, last_state, learning_rate=lr_scheduler.learning_rate(), accum_batch_size=model.config.accum_batch_size) if monitor.averaging_triggered(): averaged.take_sample() sum_cost += cost sum_tokens += num_tokens # Log summaries at the very beginning of training to make it easier to # debug initialization problems. if (model.global_step() == 1 or (model.global_step()+1) % FLAGS.print_training_stats_every_num_steps == 1): log_summaries(summary) logging.info('avg training cost at step %d: %.5f', model.global_step(), sum_cost / sum_tokens) sum_cost = 0.0 sum_tokens = 0 steps_per_sec = FLAGS.steps_per_turn / (time.time()-start_time) # TODO(melisgl): Is this the right frequency for saving? save_last_checkpoint_fn() metrics = monitor.metrics() logging.info('Finished at turn %d for reason: %s', monitor.turn(), monitor.finished_reason()) logging.info('Best XE was %5.5f at turn %d', metrics['best_xe'], metrics['best_xe_turn']) return metrics def train_1(model, source_iterator, last_state, learning_rate, extra_feed=None, accum_batch_size=-1): """Trains model for a a single iteration.""" if accum_batch_size == -1: cond, cond_len, source, source_len, target = next(source_iterator) feed = _make_train_feed(model, cond, cond_len, source, source_len, target, last_state, learning_rate, extra_feed) batch_size = feed[model.source_len].shape[0] num_tokens = feed[model.source_len].sum() cost, summary, last_state = model.fit(feed) return cost*batch_size, summary, last_state, num_tokens else: return _train_1_with_accum(model, source_iterator, last_state, learning_rate, extra_feed, accum_batch_size) def _train_1_with_accum(model, source_iterator, last_state, learning_rate, extra_feed, accum_batch_size): """Trains model for a a single iteration.""" cond, cond_len, source, source_len, target = next(source_iterator) (conds, cond_lens, sources, source_lens, targets, last_states) = _maybe_split_batch( cond, cond_len, source, source_len, target, last_state, accum_batch_size) num_accum_batches = len(sources) cost = 0.0 new_last_states = [] batch_size = 0 num_tokens = 0 for i in six.moves.range(num_accum_batches): cond = conds[i] if cond is not None else None cond_len = cond_lens[i] if cond_len is not None else None source = sources[i] source_len = source_lens[i] target = targets[i] if last_states is not None: last_state = last_states[i] else: last_state = None feed = _make_train_feed(model, cond, cond_len, source, source_len, target, last_state, learning_rate, extra_feed) batch_size1 = feed[model.source_len].shape[0] batch_size += batch_size1 num_tokens += feed[model.source_len].sum() cost1, summary1, last_state1 = model.accumulate_gradients(feed) cost += cost1*batch_size1 new_last_states.append(last_state1) model.fit_accumulated(feed) last_state = _concat_last_states(new_last_states) return cost, summary1, last_state, num_tokens def _make_train_feed(model, cond, cond_len, source, source_len, target, last_state, learning_rate, extra_feed=None): feed = {} model.add_input_to_feed(feed, cond, cond_len, source, source_len, target) model.add_dropout_to_feed(feed) feed.update({ model.num_samples: model.config.num_training_samples, model.learning_rate: learning_rate }) if extra_feed: feed.update(extra_feed) if not FLAGS.episodic and last_state is not None: # At test time we start from zero state, so let's forget the # current state during training too. Simply not feeding the # previous state back would be simpler, but it distorts the # objective too much. if model.config.drop_state_probability > 0.0: mask = [None] def ensure_mask(x): if mask[0] is None: mask[0] = np.random.binomial( 1, 1.0-model.config.drop_state_probability, size=[x.shape[0]*model.config.num_training_samples, 1]) return mask[0] last_state = utils.map_nested(lambda x: ensure_mask(x)*x, last_state) feed.update({model.initial_state: last_state}) return feed def _maybe_split_batch(cond, cond_len, source, source_len, target, last_state, accum_batch_size): batch_size = source_len.shape[0] assert batch_size % accum_batch_size == 0 n = batch_size // accum_batch_size return (np.split(cond, n, axis=1) if cond is not None else None, np.split(cond_len, n, axis=0) if cond_len is not None else None, np.split(source, n, axis=1), np.split(source_len, n, axis=0), np.split(target, n, axis=1), _split_last_state(last_state, n) if last_state is not None else None) def _split_last_state(last_state, n): list_of_split_arrays = [np.split(array, n) for array in nest.flatten(last_state)] list_of_split_states = zip(*list_of_split_arrays) return [nest.pack_sequence_as(last_state, split_state) for split_state in list_of_split_states] def _concat_last_states(last_states): list_of_flat_states = [nest.flatten(last_state) for last_state in last_states] flat_list_of_states = zip(*list_of_flat_states) flat_state = [np.concatenate(list_of_states, axis=0) for list_of_states in flat_list_of_states] return nest.pack_sequence_as(last_states[0], flat_state)
lamb-master
lamb/training.py
# Copyright 2018 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """An LSTM cell.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from lamb import tiled_linear from lamb import utils import six import tensorflow.compat.v1 as tf class TiledLSTMCell(tf.nn.rnn_cell.RNNCell): """An LSTM cell with tiled connections. Supports various connectivity patterns such as the vanilla, dense TiledLinear, and also SparseTiledLinear, LayerNormedTiledLinear. """ def __init__( self, num_units, use_peepholes=False, cell_clip=None, initializer=None, num_proj=None, feature_mask_rounds=0, feature_mask_rank=0, tie_gates=False, cap_input_gate=True, layer_norm=False, activation=tf.tanh, input_transform=None, state_transform=None, update_transform=None, tiled_linear_class=None, tiled_linear_var_init_params=None): """Initialize the parameters of a single LSTM layer. Args: num_units: int, The number of hidden units in the layer. use_peepholes: bool, set True to enable diagonal/peephole connections (non implemented). cell_clip: (optional) A float value, if provided the cell state is clipped to be in the [-cell_clip, cell_clip] range prior to the cell output activation. initializer: (optional) The default initializer to use for the weight and projection matrices. num_proj: (optional) int, The output size of the non-linear transformation (usually `h`) of the cell state (`c`). If None, no projection is performed and `h=tanh(c)`. If provided, then `h` is `tanh(c)` projected to `num_proj` dimensions. feature_mask_rounds: Gate the input and the state before they are used for calculating all the other stuff (i.e. i, j, o, f). This allows input features to be reweighted based on the state, and state features to be reweighted based on the input. When feature_mask_rounds is 0, there is no extra gating in the LSTM. When 1<=, the input is gated: x *= 2*sigmoid(affine(h))). When 2<=, the state is gated: h *= 2*sigmoid(affine(x))). For higher number of rounds, the alternating gating continues. feature_mask_rank: If 0, the linear transforms are full rank, dense matrices. If >0, then the matrix representing the linear transform is factorized as the product of two low rank matrices ([*, rank] and [rank, *]). This reduces the number of parameters greatly. tie_gates: Whether to make the input gate one minus the forget gate. cap_input_gate: Whether to cap the input gate at one minus the forget gate (if they are not tied, of course). This ensures 'c' is in [-1,1] and makes training easier especially in the early stages. layer_norm: Whether to use Layer Normalization. activation: Activation function of the inner states. input_transform: None, or a function of one argument that massages the input in some way. For example, variational dropout can be implemted by passing a Dropout object here. state_transform: Similar to input_transform, this is applied to the recurrent state. update_transform: Similar to input_transform, this is applied to the proposed update ('j'). tiled_linear_class: A class such as tiled_linear.TiledLinear that's instantiated an unspecified number of times with the same tiled_linear_var_init_params but with possibly different inputs and outputs. If layer_norm is false, the default is tiled_linear.TiledLinear else it's tiled_linear.LayerNormedTiledLinear. tiled_linear_var_init_params: Passed right on to `tiled_linear_class` as the `var_init_params` argument. """ assert not use_peepholes, 'Peepholes are not implemented in LSTMCell.' self._num_units = num_units self._use_peepholes = use_peepholes self._cell_clip = cell_clip self._initializer = initializer self._num_proj = num_proj self._feature_mask_rounds = feature_mask_rounds self._feature_mask_rank = feature_mask_rank self._tie_gates = tie_gates self._cap_input_gate = cap_input_gate self._layer_norm = layer_norm self._activation = activation self._input_transform = input_transform self._state_transform = state_transform self._update_transform = update_transform if tiled_linear_class is None: if layer_norm: tiled_linear_class = tiled_linear.LayerNormedTiledLinear else: tiled_linear_class = tiled_linear.TiledLinear self._tiled_linear_class = tiled_linear_class self._tiled_linear_var_init_params = tiled_linear_var_init_params self._tiled_linear_mod = None if num_proj: self._output_size = num_proj else: self._output_size = num_units self._state_size = tf.nn.rnn_cell.LSTMStateTuple( num_units, self._output_size) @property def state_size(self): return self._state_size @property def output_size(self): return self._output_size @staticmethod def _do_feature_masking(x, y, num_x, num_y, rounds, rank): for round_ in six.moves.range(rounds): # Even rounds correspond to input transforms. Odd rounds to state # transforms. Implemented this way because feature_mask_rounds=1 with a # single round of transforming the state does not seem to improve things # much. Concurrent updates were also tested, but were not an improvement # either. transforming_x = (round_ % 2 == 0) fm_name = 'fm_' + str(round_) if rank == 0: # full rank case if transforming_x: x *= 2*tf.sigmoid(utils.linear(y, num_x, bias=True, scope=fm_name)) else: y *= 2*tf.sigmoid(utils.linear(x, num_y, bias=True, scope=fm_name)) else: # low-rank factorization case if transforming_x: shape = [num_y, num_x] else: shape = [num_x, num_y] a, b = utils.low_rank_factorization(fm_name + '_weight', shape, rank) bias = tf.get_variable(fm_name + '_bias', shape[1], initializer=tf.zeros_initializer()) if transforming_x: x *= 2*tf.sigmoid(tf.matmul(tf.matmul(y, a), b) + bias) else: y *= 2*tf.sigmoid(tf.matmul(tf.matmul(x, a), b) + bias) return x, y def __call__(self, input_, state, scope=None): """Run one step of LSTM. All tensor arguments are shaped [batch_size, *]. Args: input_: A tensor. state: An LSTMStateTuple. scope: VariableScope for the created subgraph; defaults to `LSTMCell`. Returns: A tuple containing: - A `2-D, [batch, output_dim]`, Tensor representing the output of the LSTM after one time step. Here output_dim is: - num_proj if num_proj was set, - num_units otherwise. - An LSTMStateTuple of Tensors representing the new state of the LSTM after one time step. Raises: ValueError: If input size cannot be inferred from `input_` via static shape inference. """ num_units = self._num_units num_proj = num_units if self._num_proj is None else self._num_proj num_inputs = input_.get_shape().with_rank(2)[1] def maybe_transform(transform, x): if transform is None: return x else: return transform(x) with tf.variable_scope(scope or type(self).__name__, initializer=self._initializer): # Apply transformations to the input and the recurrent state. transformed_input = maybe_transform(self._input_transform, input_) transformed_state = maybe_transform(self._state_transform, state.h) # Let's transform the input and state further with 'feature masking'. transformed_input, transformed_state = self._do_feature_masking( transformed_input, transformed_state, num_inputs, num_units, self._feature_mask_rounds, self._feature_mask_rank) inputs = [transformed_input, transformed_state] input_name_and_sizes = [('x', num_inputs), ('h', num_proj)] output_name_and_sizes = [('j', num_units), ('o', num_units), ('f', num_units)] if not self._tie_gates: output_name_and_sizes.append(('i', num_units)) if self._tiled_linear_mod is None: self._tiled_linear_mod = self._tiled_linear_class( input_name_and_sizes, output_name_and_sizes, self._tiled_linear_var_init_params) if self._tie_gates: j_pre, o_pre, f_pre = self._tiled_linear_mod(inputs) else: j_pre, o_pre, f_pre, i_pre = self._tiled_linear_mod(inputs) # Compute the cell state c. f = tf.sigmoid(f_pre) j = self._activation(j_pre) j = maybe_transform(self._update_transform, j) o = tf.sigmoid(o_pre) if self._tie_gates: c = f * state.c + (1-f) * j else: i = tf.sigmoid(i_pre) if self._cap_input_gate: c = f * state.c + tf.minimum(1-f, i) * j else: c = f * state.c + i * j if self._layer_norm: c2 = utils.layer_norm(c, [1], scope='ln_c') else: c2 = c if self._cell_clip is not None: # pylint: disable=invalid-unary-operand-type c = tf.clip_by_value(c, -self._cell_clip, self._cell_clip) # pylint: enable=invalid-unary-operand-type h = o * self._activation(c2) if self._num_proj is not None: h = utils.linear(h, self._num_proj, bias=False, scope='projection') return h, tf.nn.rnn_cell.LSTMStateTuple(c, h)
lamb-master
lamb/tiled_lstm.py
# Copyright 2018 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Corpus and simple corpus loaders.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import codecs import random import sys from lamb import utils import numpy as np import six # TODO(melisgl): Just for tf.gfile, beh. import tensorflow.compat.v1 as tf # pylint: disable=missing-docstring class Corpus(object): """An immutable dataset of instances.""" def __init__(self, data): self._data = data def data(self): return self._data def size(self): return len(self._data) # Make `dataset + dataset2` and `dataset + sequence` work. def __add__(self, other): if isinstance(other, Corpus): return type(self)(data=self._data + other.data()) else: return type(self)(data=self._data + other) def instance_iterator(self, shuffle=False, start=None, max_instances=None, max_epochs=None): """Return an iterator over data in the corpus.""" max_instances = max_instances or sys.maxsize max_epochs = max_epochs or sys.maxsize data = self._data n = len(data) permutation = None for _ in six.moves.range(max_epochs): if shuffle: if permutation is None: permutation = list(range(n)) random.shuffle(permutation) if start is None: offset = 0 elif start == 'random': offset = random.randrange(n) elif callable(start): offset = start() else: assert isinstance(start, int) offset = start num_instances_in_this_epoch = min(n, max_instances) for i in six.moves.range(num_instances_in_this_epoch): if permutation is None: yield data[(offset+i)%n] else: yield data[permutation[(offset+i)%n]] max_instances -= num_instances_in_this_epoch if max_instances <= 0: break # Return the more restrictive of max_instances and max_epochs in # units of instances or None if there is no limit. def _effective_max_instances(self, max_instances, max_epochs): if max_instances is not None and max_epochs is not None: return min(max_instances, max_epochs*self.size()) elif max_instances: return max_instances elif max_epochs: return max_epochs*self.size() else: return None # `n` is either the corpus size (i.e. the number of examples in the dataset), # or the number of examples to iterate over with num_iterators, return the # share of the `i`th iterator. def _share_of_iterator(self, n, num_iterators, i): assert 0 <= i and i < num_iterators # TODO(melisgl): Equidistant spacing should take length of # individual examples into account. if n is not None: return (int(((i+1) * n) // num_iterators) - int((i * n) // num_iterators)) else: # `n` is infinity (i.e. None). This is typically the case for training # iterators (max_instances is None). return None def ordered_iterators(self, num_iterators, max_instances=None, max_epochs=None): """Return a number of iterators as multiple read heads into a corpus. In a non-episodic setting, where training examples form a sequence (such as the sequence of sentences in a document), one often wants to iterate over the dataset in the original order. So far this can be done with a simple `instance_iterator`. However, when batches of examples are needed, using a single iterator would likely lead to consecutive examples being assigned to the same batch which is bad for training because the examples are then highly correlated, and it also makes techniques that rely on carrying over state such as truncated backpropagation impossible. This function is intended for this batched, non-episodic mode. Create one iterator for each stripe in the batch, and let `max_instances` and `max_epochs` be automatically distributed evenly between all iterators. The iterators' starting offset will be drawn randomly and independently from each other once at the beginning. Args: num_iterators: The number of iterators to return. max_instances: The total number of examples to iterate over (summed over all returned iterators). None means no limit. max_epochs: The number of times to iterator over the corpus. If both `max_instances` and `max_epochs` are specified, the more restrictive is in effect. Returns: `num_iterators` number of iterators. """ max_instances = self._effective_max_instances(max_instances, max_epochs) iterators = [] for i in six.moves.range(num_iterators): iterator_max_instances = self._share_of_iterator( max_instances, num_iterators, i) if iterator_max_instances is None or iterator_max_instances > 0: iterators.append(self.instance_iterator( start='random', max_instances=iterator_max_instances)) return iterators def equidistant_iterators(self, num_iterators, random_starts=False, start_jitter=None, max_instances=None, max_epochs=None): """Like ordered_iterators but keeps the heads approximately equidistant. Args: num_iterators: The number of iterators to return. random_starts: If True, the starting offset of iterators is shifted by the same random number. This is done once when the iterators are created. start_jitter: If not None, then after each epoch the starting offsets are randomized by adding a random integer from (-start_jitter, start_jitter) to them. These jitter offsets are independent from each other. max_instances: The total number of examples to iterate over (summed over all returned iterators). None means no limit. max_epochs: The number of times to iterator over the corpus. If both `max_instances` and `max_epochs` are specified, the more restrictive is in effect. Returns: `num_iterators` number of iterators. """ max_instances = self._effective_max_instances(max_instances, max_epochs) if max_instances is None: n = self.size() else: n = min(self.size(), max_instances) iterators = [] if random_starts: start = random.randrange(n) else: start = 0 for i in six.moves.range(num_iterators): # Note that if `n` (the corpus size) is equal to max_instances, # and max_epochs=1 then all examples will be generated exactly # once, because both `start` and `num_instances` are computed by # _share_of_iterator. iterator_max_instances = self._share_of_iterator( max_instances, num_iterators, i) if iterator_max_instances is None or iterator_max_instances > 0: if start_jitter: def start_fn(start=start): # pylint: disable=invalid-unary-operand-type return (start + random.randint(-start_jitter, start_jitter)) % n iterators.append(self.instance_iterator( start=start_fn, max_instances=iterator_max_instances)) else: iterators.append(self.instance_iterator( start=(start % n), max_instances=iterator_max_instances)) # Spread the start positions equidistantly. start += self._share_of_iterator(n, num_iterators, i) return iterators def max_sentence_length(self): """Returns the maximum number of tokens in the longest sentence.""" return max([len(datapoint) for datapoint in self.instance_iterator(max_epochs=1)] # In case the corpus is empty. + [0]) def tokens(self): for instance in self.instance_iterator(max_epochs=1): for token in instance: yield token yield u'\u25bc' def maybe_truncate(seq, n): if n is not None and len(seq) > n: return seq[:n] else: return seq def read_character_based_corpus(filename, encoding='utf-8'): with codecs.getreader(encoding)(tf.gfile.GFile(filename, mode='rb')) as f: return Corpus([line.rstrip('\n') for line in f]) def read_word_based_corpus(filename, encoding='utf-8'): with codecs.getreader(encoding)(tf.gfile.GFile(filename, mode='rb')) as f: return Corpus([line.split() for line in f]) def get_episodic_batches(instance_generator, max_batch_size, vocab, num_steps, num_samples=1, max_batches=None, conditioning_separator=None): instance_generator = utils.repeat(instance_generator, num_samples) max_batch_size *= num_samples is_exhausted = False while not is_exhausted and (max_batches is None or max_batches > 0): if max_batches is not None: max_batches -= 1 instances = [] for _ in six.moves.range(max_batch_size): try: instances.append(next(instance_generator)) except StopIteration: is_exhausted = True break batch_size = len(instances) if batch_size == 0: break if conditioning_separator: cond = np.zeros(shape=[num_steps, batch_size], dtype=np.int32) cond_len = np.zeros(shape=[batch_size], dtype=np.int64) else: cond = None cond_len = None source = np.zeros(shape=[num_steps, batch_size], dtype=np.int32) source_len = np.zeros(shape=[batch_size], dtype=np.int64) target = np.zeros(shape=[num_steps, batch_size], dtype=np.int32) if conditioning_separator: # TODO(melisgl): Separate the vocabs. conditioning_vocab = vocab conditioning_eos = [conditioning_vocab.eos_index()] def break_at_separator(seq): if conditioning_separator not in seq: assert False, 'Conditioning separator {} not found in {}.'.format( conditioning_separator, seq) pos = seq.index(conditioning_separator) return seq[:pos], seq[pos+1:] eos = [vocab.eos_index()] def emit(batch_index): instance_text = instances[batch_index] if conditioning_separator: conditioning_text, instance_text = break_at_separator(instance_text) encoded_conditioning = conditioning_vocab.encode( conditioning_text, add_eos=False) encoded_conditioning = maybe_truncate(encoded_conditioning, num_steps-1) cond_n = len(encoded_conditioning) + 1 cond[:cond_n, batch_index] = encoded_conditioning + conditioning_eos cond_len[batch_index] = cond_n encoded_source = vocab.encode(instance_text, add_eos=False) encoded_source = maybe_truncate(encoded_source, num_steps-1) n = len(encoded_source) + 1 source[:n, batch_index] = eos + encoded_source source_len[batch_index] = n target[:n, batch_index] = encoded_source + eos for batch_index in range(batch_size): emit(batch_index) yield (cond, cond_len, source, source_len, target) def get_non_episodic_batches(instance_generators, vocab, num_steps, num_samples=1, max_batches=None, add_eos=True): """Non-episodic.""" num_generators = len(instance_generators) batch_size = num_generators*num_samples # Every element produced by an instance generator is a sequence # that may be shorter or longer than `num_steps`. We use `sources` # as temporary storage for suffixes of those sequences that didn't # fit in the previous batch and also as a general staging area. # # The invariant is that the first element of `sources` is the last # element from the previous batch, so these start out effectively # empty. sources = [[vocab.eos_index()] for _ in six.moves.range(num_generators)] # Whether the corresponding iterator is exhausted. Note that # `texts` may still be non-empty even if the iterator is # exhausted. is_exhausteds = [False] * num_generators def ensure_window(i): # pylint: disable=g-doc-args """Move data from the generator to texts[i]. Ensure that sources[i] has at least num_steps elements available or their iterator is exhausted. """ # To produce a source and target sequence of num_steps we need # num_steps+1 elements due to the source being shifted. while not is_exhausteds[i] and len(sources[i]) <= num_steps: try: text = next(instance_generators[i]) encoded_text = vocab.encode(text, add_eos=add_eos) sources[i].extend(encoded_text) except StopIteration: is_exhausteds[i] = True break def pop_window(i): """Extract num_steps (if available).""" ensure_window(i) # The number of available elements accounting for the special, # first one that's the last element from the previous batch. n = min(num_steps, len(sources[i]) - 1) if n > 0: # Extract data. encoded_source = sources[i][0:n] encoded_target = sources[i][1:n+1] # Remove the extracted data, keeping around the last element # of the target which will be the first of the next source. sources[i] = sources[i][n:] return encoded_source, encoded_target else: return [], [] def emit_batch(): source = np.zeros(shape=[num_steps, batch_size], dtype=np.int32) source_len = np.zeros(shape=[batch_size], dtype=np.int64) target = np.zeros(shape=[num_steps, batch_size], dtype=np.int32) emitted_some = False for j in six.moves.range(num_generators): encoded_source, encoded_target = pop_window(j) n = len(encoded_source) if n > 0: emitted_some = True # Repeat it num_samples times. for i in six.moves.range(j*num_samples, (j+1)*num_samples): source_len[i] = n source[:n, i] = encoded_source target[:n, i] = encoded_target return (emitted_some, source, source_len, target) while max_batches is None or max_batches > 0: if max_batches is not None: max_batches -= 1 (emitted_some, source, source_len, target) = emit_batch() if not emitted_some: break # No conditioning for non-episodic. Add Nones for the conditioning part. yield (None, None, source, source_len, target) def get_batches(corpus_, vocab, batch_size, num_steps, num_samples=1, episodic=None, deterministic=None, equidistant=True, max_instances=None, max_epochs=None, max_batches=None, conditioning_separator=None): if episodic: return get_episodic_batches( corpus_.instance_iterator( shuffle=not deterministic, max_instances=max_instances, max_epochs=max_epochs), batch_size, vocab, num_steps, num_samples=num_samples, max_batches=max_batches, conditioning_separator=conditioning_separator) else: if deterministic: iterators = corpus_.equidistant_iterators( batch_size, random_starts=False, start_jitter=None, max_instances=max_instances, max_epochs=max_epochs) elif equidistant: iterators = corpus_.equidistant_iterators( batch_size, random_starts=True, start_jitter=100, max_instances=max_instances, max_epochs=max_epochs) else: iterators = corpus_.ordered_iterators( batch_size, max_instances=max_instances, max_epochs=max_epochs) return get_non_episodic_batches( iterators, vocab, num_steps, num_samples=num_samples, max_batches=max_batches)
lamb-master
lamb/corpus.py
# Copyright 2018 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ import tensorflow.compat.v1 as tf class DummyTest(tf.test.TestCase): def testCompilation(self): pass if __name__ == "__main__": tf.test.main()
lamb-master
lamb/test/dummy_test.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Install script for setuptools.""" from distutils import cmd import imp import os import pkg_resources from setuptools import find_namespace_packages from setuptools import setup from setuptools.command.build_ext import build_ext from setuptools.command.build_py import build_py _ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) # Tuple of proto message definitions to build Python bindings for. Paths must # be relative to root directory. _DM_ALCHEMY_PROTOS = ( 'dm_alchemy/protos/alchemy.proto', 'dm_alchemy/protos/trial.proto', 'dm_alchemy/protos/color_info.proto', 'dm_alchemy/protos/unity_types.proto', 'dm_alchemy/protos/episode_info.proto', 'dm_alchemy/protos/events.proto', 'dm_alchemy/protos/hypercube.proto', 'dm_alchemy/encode/chemistries.proto', 'dm_alchemy/encode/symbolic_actions.proto', 'dm_alchemy/encode/precomputed_maps.proto') class _GenerateProtoFiles(cmd.Command): """Command to generate protobuf bindings for dm_alchemy.""" descriptions = 'Generates Python protobuf bindings.' user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): # Import grpc_tools here, after setuptools has installed setup_requires # dependencies. from grpc_tools import protoc # pylint: disable=g-import-not-at-top grpc_protos_include = pkg_resources.resource_filename( 'grpc_tools', '_proto') for proto_path in _DM_ALCHEMY_PROTOS: proto_args = [ 'grpc_tools.protoc', '--proto_path={}'.format(grpc_protos_include), '--proto_path={}'.format(_ROOT_DIR), '--python_out={}'.format(_ROOT_DIR), '--grpc_python_out={}'.format(_ROOT_DIR), os.path.join(_ROOT_DIR, proto_path), ] if protoc.main(proto_args) != 0: raise RuntimeError('ERROR: {}'.format(proto_args)) class _BuildExt(build_ext): """Generate protobuf bindings in build_ext stage.""" def run(self): self.run_command('generate_protos') build_ext.run(self) class _BuildPy(build_py): """Generate protobuf bindings in build_py stage.""" def run(self): self.run_command('generate_protos') build_py.run(self) setup( name='dm-alchemy', version=imp.load_source('_version', 'dm_alchemy/_version.py').__version__, description=('DeepMind Alchemy environment, a meta-reinforcement learning' 'benchmark environment for deep RL agents.'), author='DeepMind', license='Apache License, Version 2.0', keywords='reinforcement-learning python machine learning', packages=find_namespace_packages(exclude=['examples']), package_data={ 'dm_alchemy.encode': ['*.proto'], 'dm_alchemy.protos': ['*.proto'], 'dm_alchemy.chemistries': ['**/**'], 'dm_alchemy.ideal_observer.data': ['**/**'], 'dm_alchemy.agent_events': ['**'], }, install_requires=[ 'absl-py', 'dataclasses', 'dm-env', 'dm-env-rpc>=1.0.4', 'dm-tree', 'docker', 'grpcio', 'numpy', 'scipy>=1.4.0', 'portpicker', 'frozendict', ], tests_require=['nose'], python_requires='>=3.6.1', setup_requires=['grpcio-tools'], extras_require={ 'examples': [ 'pygame', 'ipykernel', 'matplotlib', 'seaborn', ]}, cmdclass={ 'build_ext': _BuildExt, 'build_py': _BuildPy, 'generate_protos': _GenerateProtoFiles, }, test_suite='nose.collector', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Operating System :: POSIX :: Linux', 'Operating System :: Microsoft :: Windows', 'Operating System :: MacOS :: MacOS X', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: Scientific/Engineering :: Artificial Intelligence', ], )
dm_alchemy-master
setup.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Python utility functions for loading DeepMind Alchemy.""" import codecs import json import math import os import re import subprocess import time from absl import logging import dataclasses from dm_alchemy import partial_array_specs import dm_env from dm_env import specs as array_specs import docker import grpc import numpy as np import portpicker import tree from dm_alchemy.protos import events_pb2 from dm_env_rpc.v1 import connection as dm_env_rpc_connection from dm_env_rpc.v1 import dm_env_adaptor from dm_env_rpc.v1 import dm_env_rpc_pb2 from dm_env_rpc.v1 import error from dm_env_rpc.v1 import tensor_utils # Maximum number of times to attempt gRPC connection. _MAX_CONNECTION_ATTEMPTS = 10 # Port to expect the docker environment to internally listen on. _DOCKER_INTERNAL_GRPC_PORT = 10000 _DEFAULT_DOCKER_IMAGE_NAME = 'gcr.io/deepmind-environments/alchemy:v1.0.0' _ALCHEMY_OBSERVATIONS = ('RGB_INTERLEAVED', 'ACCELERATION', 'HAND_FORCE', 'HAND_IS_HOLDING', 'HAND_DISTANCE', 'Score', 'events') ALCHEMY_LEVEL_NAMES = frozenset(( 'alchemy/perceptual_mapping_randomized_with_rotation_and_random_bottleneck', 'alchemy/all_fixed', 'alchemy/all_fixed_w_shaping', 'alchemy/perceptual_mapping_randomized_with_random_bottleneck', ) + tuple(f'alchemy/evaluation_episodes/{i}' for i in range(1000))) @dataclasses.dataclass class _ConnectionDetails: channel: grpc.Channel connection: dm_env_rpc_connection.Connection specs: dm_env_rpc_pb2.ActionObservationSpecs def _maybe_as_partial_spec(spec: array_specs.Array): if -1 not in spec.shape: return spec if isinstance(spec, array_specs.BoundedArray): raise ValueError('Partial bounded arrays are not yet handled.') return partial_array_specs.PartialArray(spec.shape, spec.dtype, spec.name) def _unpack_world_event(event): decoded = events_pb2.WorldEvent() if not event.Unpack(decoded): raise ValueError('Event could not be decoded to WorldEvent. {event}'.format( event=str(event))) return decoded class _AlchemyEnv(dm_env_adaptor.DmEnvAdaptor): """An implementation of dm_env_rpc.DmEnvAdaptor for Alchemy env.""" def __init__(self, connection_details, requested_observations, num_action_repeats): super().__init__(connection_details.connection, connection_details.specs, requested_observations) self._channel = connection_details.channel self._num_action_repeats = num_action_repeats self._events = [] def close(self): super().close() self._channel.close() def step(self, action): """Implementation of dm_env.step that supports repeated actions.""" discount = None reward = None self._events = [] for _ in range(self._num_action_repeats): next_timestep = super().step(action) # Accumulate reward per timestep. if next_timestep.reward is not None: reward = (reward or 0.) + next_timestep.reward # Calculate the product for discount. if next_timestep.discount is not None: discount = discount if discount else [] discount.append(next_timestep.discount) timestep = dm_env.TimeStep(next_timestep.step_type, reward, # Note: np.product(None) returns None. np.product(discount), next_timestep.observation) self._events.extend([_unpack_world_event(event) for event in timestep.observation['events']]) if timestep.last(): return timestep return timestep def observation_spec(self): return tree.map_structure( _maybe_as_partial_spec, super().observation_spec()) def events(self): return self._events class _AlchemyContainerEnv(_AlchemyEnv): """An implementation of _AlchemyEnv. Ensures that the provided Docker container is closed on exit. """ def __init__(self, container, **base_kwargs): super().__init__(**base_kwargs) self._container = container def close(self): super().close() try: self._container.kill() except docker.errors.NotFound: pass # Ignore, container has already been closed. class _AlchemyProcessEnv(_AlchemyEnv): """An implementation of _AlchemyEnv. Ensures that the provided running process is closed on exit. """ def __init__(self, connection_details, requested_observations, num_action_repeats, process): super().__init__(connection_details, requested_observations, num_action_repeats) self._process = process def close(self): super().close() self._process.terminate() self._process.wait() def _check_grpc_channel_ready(channel): """Helper function to check the gRPC channel is ready N times.""" for _ in range(_MAX_CONNECTION_ATTEMPTS - 1): try: return grpc.channel_ready_future(channel).result(timeout=1) except grpc.FutureTimeoutError: pass return grpc.channel_ready_future(channel).result(timeout=1) def _can_send_message(connection): """Returns if `connection` is healthy and able to process requests.""" try: # This should return a response with an error unless the server isn't yet # receiving requests. connection.send(dm_env_rpc_pb2.StepRequest()) except error.DmEnvRpcError: return True except grpc.RpcError: return False return True def _create_channel_and_connection(port): """Returns a tuple of `(channel, connection)`.""" for i in range(_MAX_CONNECTION_ATTEMPTS): channel = grpc.secure_channel('localhost:{}'.format(port), grpc.local_channel_credentials()) _check_grpc_channel_ready(channel) connection = dm_env_rpc_connection.Connection(channel) if _can_send_message(connection): break else: # A gRPC server running within Docker sometimes reports that the channel # is ready but transitively returns an error (status code 14) on first # use. Giving the server some time to breath and retrying often fixes the # problem. connection.close() channel.close() time.sleep(math.pow(1.4, i)) return channel, connection def _parse_exception_message(message): """Returns a human-readable version of a dm_env_rpc json error message.""" try: match = re.match(r'^message\:\ \"(.*)\"$', message) json_data = codecs.decode(match.group(1), 'unicode-escape') parsed_json_data = json.loads(json_data) return ValueError(json.dumps(parsed_json_data, indent=4)) except: # pylint: disable=bare-except return message def _wrap_send(send): """Wraps `send` in order to reformat exceptions.""" try: return send() except ValueError as e: e.args = [_parse_exception_message(e.args[0])] raise def _connect_to_environment(port, settings): """Helper function for connecting to a running Alchemy environment.""" if settings.level_name not in ALCHEMY_LEVEL_NAMES: raise ValueError( 'Level named "{}" is not a valid dm_alchemy level.'.format( settings.level_name)) channel, connection = _create_channel_and_connection(port) original_send = connection.send connection.send = lambda request: _wrap_send(lambda: original_send(request)) world_name = connection.send( dm_env_rpc_pb2.CreateWorldRequest( settings={ 'seed': tensor_utils.pack_tensor(settings.seed), 'episodeId': tensor_utils.pack_tensor(0), 'levelName': tensor_utils.pack_tensor(settings.level_name), 'EventSubscriptionRegex': tensor_utils.pack_tensor('DeepMind/.*'), })).world_name join_world_settings = { 'width': tensor_utils.pack_tensor(settings.width), 'height': tensor_utils.pack_tensor(settings.height), } specs = connection.send( dm_env_rpc_pb2.JoinWorldRequest( world_name=world_name, settings=join_world_settings)).specs return _ConnectionDetails(channel=channel, connection=connection, specs=specs) @dataclasses.dataclass class EnvironmentSettings: """Collection of settings used to start a specific Alchemy level. Required attributes: seed: Seed to initialize the environment's RNG. level_name: Name of the level to load. Optional attributes: width: Width (in pixels) of the desired RGB observation; defaults to 96. height: Height (in pixels) of the desired RGB observation; defaults to 72. num_action_repeats: Number of times to step the environment with the provided action in calls to `step()`. """ seed: int level_name: str width: int = 96 height: int = 72 num_action_repeats: int = 1 def _validate_environment_settings(settings): """Helper function to validate the provided environment settings.""" if settings.num_action_repeats <= 0: raise ValueError('num_action_repeats must have a positive value.') if settings.width <= 0 or settings.height <= 0: raise ValueError('width and height must have a positive value.') def load_from_disk(path, settings, environment_variables=None): """Loads Alchemy from disk. Args: path: Directory containing dm_alchemy environment. settings: EnvironmentSettings required to start the environment. environment_variables: Optional dictionary containing the system environment variables to be set for the new process. The dictionary maps variable names to their string values. Returns: An implementation of dm_env.Environment. Raises: RuntimeError: If unable to start environment process. """ environment_variables = environment_variables or {} _validate_environment_settings(settings) executable_path = os.path.join(path, 'Linux64Player') libosmesa_path = os.path.join(path, 'external_libosmesa_llvmpipe.so') if not os.path.exists(executable_path) or not os.path.exists(libosmesa_path): raise RuntimeError( 'Cannot find dm_alchemy executable or dependent files at path: {}' .format(path)) port = portpicker.pick_unused_port() process_flags = [ executable_path, # Unity command-line flags. '-logfile', '-batchmode', '-noaudio', # Other command-line flags. '--logtostderr', '--server_type=GRPC', '--uri_address=[::]:{}'.format(port), ] os.environ.update({ 'UNITY_RENDERER': 'software', 'UNITY_OSMESA_PATH': libosmesa_path, }) os.environ.update(environment_variables) process = subprocess.Popen( process_flags, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) if process.poll() is not None: raise RuntimeError('Failed to start dm_alchemy process correctly.') return _AlchemyProcessEnv( _connect_to_environment(port, settings), _ALCHEMY_OBSERVATIONS, settings.num_action_repeats, process) def load_from_docker(settings, environment_variables=None, name=None): """Loads Alchemy env from docker container. Args: settings: EnvironmentSettings required to start the environment. environment_variables: Optional dictionary containing the system environment variables to be set inside the container. The dictionary maps variable names to their string values. name: Optional name of Docker image that contains the Alchemy environment. If left unset, uses the default name. Returns: An implementation of dm_env.Environment """ _validate_environment_settings(settings) environment_variables = environment_variables or {} name = name or _DEFAULT_DOCKER_IMAGE_NAME client = docker.from_env() port = portpicker.pick_unused_port() try: client.images.get(name) except docker.errors.ImageNotFound: logging.info('Downloading docker image "%s"...', name) client.images.pull(name) logging.info('Download finished.') container = client.containers.run( name, auto_remove=True, detach=True, ports={_DOCKER_INTERNAL_GRPC_PORT: port}, environment=environment_variables) kwargs = dict( connection_details=_connect_to_environment(port, settings), requested_observations=_ALCHEMY_OBSERVATIONS, num_action_repeats=settings.num_action_repeats, container=container) return _AlchemyContainerEnv(**kwargs)
dm_alchemy-master
dm_alchemy/_load_environment.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for the symbolic alchemy wrapper.""" from absl.testing import absltest from dm_alchemy import symbolic_alchemy_wrapper from dm_alchemy.protos import alchemy_pb2 from dm_alchemy.types import graphs from dm_alchemy.types import stones_and_potions from dm_alchemy.types import unity_python_conversion from dm_alchemy.types import utils import dm_env import numpy as np from google.protobuf import any_pb2 from dm_alchemy.protos import events_pb2 from dm_alchemy.protos import trial_pb2 Stone = stones_and_potions.Stone Potion = stones_and_potions.Potion LatentStone = stones_and_potions.LatentStone LatentPotion = stones_and_potions.LatentPotion AlignedStone = stones_and_potions.AlignedStone PerceivedPotion = stones_and_potions.PerceivedPotion CAULDRON = stones_and_potions.CAULDRON def encode_event(name, event): any_proto = any_pb2.Any() any_proto.Pack(event) world_event = events_pb2.WorldEvent( name=name + ':deepmind.dmworlds.WorldEvent', detail=any_proto) return world_event class Mock3DEnv: """A mock 3d environment which we can set events on.""" def __init__(self): self._new_trial = False self._next_step_new_trial = False self._last_step = True self.chemistry = None self.items = None self._trial_number = -1 self._used_events = [] self._next_step_used_events = [] def set_chemistry_and_items(self, chemistry, items): assert isinstance(chemistry, utils.Chemistry) self.chemistry = chemistry self.items = items def set_new_trial(self): self._next_step_new_trial = True self._trial_number += 1 def set_potion_used(self, potion_instance_id, stone_instance_id): self._next_step_used_events.append( ('DeepMind/Alchemy/PotionUsed', alchemy_pb2.PotionUsed( potion_instance_id=potion_instance_id, stone_instance_id=stone_instance_id))) def set_stone_used(self, stone_instance_id): self._next_step_used_events.append( ('DeepMind/Alchemy/StoneUsed', alchemy_pb2.StoneUsed( stone_instance_id=stone_instance_id))) def events(self): events = [] if self._new_trial: if self._trial_number == 0: unity_chemistry, rotation_mapping = ( unity_python_conversion.to_unity_chemistry( self.chemistry)) events.append( ('DeepMind/Alchemy/ChemistryCreated', alchemy_pb2.ChemistryCreated( chemistry=unity_chemistry, rotation_mapping=rotation_mapping))) else: events.append( ('DeepMind/Trial/TrialEnded', trial_pb2.TrialEnded( trial_id=self._trial_number - 1))) events.append( ('DeepMind/Alchemy/CauldronCreated', alchemy_pb2.CauldronCreated())) for potion in self.items.trials[self._trial_number].potions: latent_potion = potion.latent_potion() perceived_potion = self.chemistry.potion_map.apply_inverse( latent_potion) potion_properties = unity_python_conversion.to_potion_unity_properties( perceived_potion=perceived_potion, latent_potion=latent_potion, graph=self.chemistry.graph) events.append( ('DeepMind/Alchemy/PotionCreated', alchemy_pb2.PotionCreated( potion_instance_id=potion.idx, potion_properties=potion_properties))) for stone in self.items.trials[self._trial_number].stones: latent_stone = stone.latent_stone() aligned_stone = self.chemistry.stone_map.apply_inverse(latent_stone) perceived_stone = stones_and_potions.unalign( aligned_stone, self.chemistry.rotation) stone_properties = unity_python_conversion.to_stone_unity_properties( perceived_stone=perceived_stone, latent_stone=latent_stone) events.append( ('DeepMind/Alchemy/StoneCreated', alchemy_pb2.StoneCreated( stone_instance_id=stone.idx, stone_properties=stone_properties))) events.append( ('DeepMind/Trial/TrialStarted', trial_pb2.TrialStarted( trial_id=self._trial_number))) events.extend(self._used_events) return [encode_event(name, event) for name, event in events] def _timestep(self): # The timestep doesn't matter. return dm_env.TimeStep(None, None, None, None) def step(self, unused_action): del unused_action self._used_events = self._next_step_used_events self._next_step_used_events = [] self._new_trial = self._next_step_new_trial self._next_step_new_trial = False if self._last_step: return self.reset() return self._timestep() def reset(self): self._last_step = False self.set_new_trial() return self._timestep() class SymbolicAlchemyWrapperTest(absltest.TestCase): def setUp(self): super().setUp() self.env3d_mock = Mock3DEnv() self.chemistry = utils.Chemistry( potion_map=stones_and_potions.all_fixed_potion_map(), stone_map=stones_and_potions.all_fixed_stone_map(), graph=graphs.create_graph_from_constraint( graphs.no_bottleneck_constraints()[0]), rotation=np.eye(3)) self.items = utils.EpisodeItems( potions=[[Potion(0, 0, 1)], [Potion(1, 2, -1)]], stones=[[Stone(2, [-1, -1, -1])], [Stone(3, [1, 1, 1])]]) self.env3d_mock.set_chemistry_and_items(self.chemistry, self.items) self.wrapper = symbolic_alchemy_wrapper.SymbolicAlchemyWrapper( self.env3d_mock, 'alchemy/perceptual_mapping_randomized_with_random_bottleneck') def test_items_generated_each_trial(self): # Once the trial has started the items in the symbolic environment should # match the ones we let the mock 3d environment generate. self.wrapper.reset() # Action is not important self.wrapper.step(action=None) self.assertEqual( self.wrapper.env_symbolic._chemistry.potion_map, self.chemistry.potion_map) self.assertEqual( self.wrapper.env_symbolic._chemistry.stone_map, self.chemistry.stone_map) self.assertEqual( graphs.constraint_from_graph( self.wrapper.env_symbolic._chemistry.graph), graphs.constraint_from_graph(self.chemistry.graph)) self.assertEqual( self.wrapper.env_symbolic.game_state.existing_items(), self.items.trials[0]) # Check that the items in the second trial are also correct. for trial in self.items.trials[1:]: self.env3d_mock.set_new_trial() self.wrapper.step(action=None) self.assertEqual( self.wrapper.env_symbolic.game_state.existing_items(), trial) def test_potion_used(self): # Reset and take a step to ensure the items are generated. self.wrapper.reset() self.wrapper.step(None) self.assertIsNotNone(self.wrapper.env_symbolic.game_state) self.assertNotEmpty(self.wrapper.env_symbolic.game_state.existing_potions()) # Now ensure that on the next step a potion is used. self.env3d_mock.set_potion_used(potion_instance_id=0, stone_instance_id=2) self.wrapper.step(None) # Now the potion should be gone and the stone should have changed. self.assertEmpty(self.wrapper.env_symbolic.game_state.existing_potions()) self.assertEqual( self.wrapper.env_symbolic.game_state.existing_stones(), [Stone(2, [1, -1, -1])]) def test_stone_used(self): # Reset and take a step to ensure the items are generated. self.wrapper.reset() self.wrapper.step(None) # Now ensure that on the next step a stone is used. self.env3d_mock.set_stone_used(stone_instance_id=2) self.wrapper.step(None) # Now the stone should be gone. self.assertEmpty(self.wrapper.env_symbolic.game_state.existing_stones()) if __name__ == '__main__': absltest.main()
dm_alchemy-master
dm_alchemy/symbolic_alchemy_wrapper_test.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Symbolic alchemy tracker which runs bots in sync with environment.""" import copy from typing import Any, Callable, List, Optional, Tuple from dm_alchemy import symbolic_alchemy from dm_alchemy import symbolic_alchemy_bots from dm_alchemy import symbolic_alchemy_trackers from dm_alchemy.types import stones_and_potions from dm_alchemy.types import utils import numpy as np import tree AlignedStoneIndex = stones_and_potions.AlignedStoneIndex PerceivedPotionIndex = stones_and_potions.PerceivedPotionIndex PerceivedStone = stones_and_potions.PerceivedStone def get_envs_and_bots( env: symbolic_alchemy.SymbolicAlchemy, bot_from_env: Callable[[symbolic_alchemy.SymbolicAlchemy], symbolic_alchemy_bots.SymbolicAlchemyBot], num_bots: int, add_trackers_to_env: Callable[[symbolic_alchemy.SymbolicAlchemy], None] ) -> Tuple[List[symbolic_alchemy.SymbolicAlchemy], List[symbolic_alchemy_bots.SymbolicAlchemyBot]]: """Gets several copies of the environment and bots to run on them. Args: env: The base environment to make copies of. bot_from_env: A callable which given an environment creates a bot to run on it. num_bots: The number of bots and environment copies to make. add_trackers_to_env: A callable which adds any trackers required to the environment copies. Returns: A list of copies of the environment. A list of bots to run on copies of the environment. """ env_without_bot_running_trackers = copy.deepcopy(env) if env_without_bot_running_trackers: env_without_bot_running_trackers.trackers = {} add_trackers_to_env(env_without_bot_running_trackers) envs = [copy.deepcopy(env_without_bot_running_trackers) for _ in range(num_bots)] bots = [bot_from_env(e) for e in envs] return envs, bots class BotRunningTracker(symbolic_alchemy_trackers.SymbolicAlchemyTracker): """Run bots which take actions whenever an action is taken in the environment.""" NAME = 'bot_runner' @property def name(self) -> str: return self.NAME def __init__( self, env: symbolic_alchemy.SymbolicAlchemy, bot_from_env: Callable[[symbolic_alchemy.SymbolicAlchemy], symbolic_alchemy_bots.SymbolicAlchemyBot], num_bots: int, add_trackers_to_env: Callable[[symbolic_alchemy.SymbolicAlchemy], None]): self.envs, self.bots = get_envs_and_bots( env, bot_from_env, num_bots, add_trackers_to_env) def episode_start(self, unused_chemistry: utils.Chemistry) -> None: """Resets all environments when an episode has started.""" for env in self.envs: env.reset() def action_and_outcome( self, action: utils.TypeBasedAction, unused_outcome: Optional[PerceivedStone], unused_action_info: symbolic_alchemy_trackers.ActionInfo ) -> None: """Lets bots take an action when an action is taken in the main environment.""" del unused_outcome, unused_action_info # Only take an action if a potion is used (actions to put stones in the # cauldron will happen automatically when the end trial action is selected). if action.using_potion: for bot, env in zip(self.bots, self.envs): new_action = bot.select_action() # Don't select end trial actions as this will be done when the trial # ends in the original environment. if new_action.end_trial: continue symbolic_alchemy.take_simplified_action(new_action, env) def trial_end(self) -> None: """Ends the trial in all copies of the environment.""" for env in self.envs: symbolic_alchemy.take_simplified_action( utils.SlotBasedAction(end_trial=True), env) def episode_returns(self) -> Any: """Gets returns from trackers on environment copies.""" return tree.map_structure(lambda *args: np.mean(args, axis=0), *tuple( env.episode_returns() for env in self.envs)) def default_returns(self, num_trials: int, num_actions_per_trial: int) -> Any: """Gets a set of default returns.""" ep_returns = self.envs[0].episode_returns() def to_float(arg: Any) -> Any: if isinstance(arg, np.ndarray): return arg.astype(float) return float(arg) return tree.map_structure(to_float, ep_returns)
dm_alchemy-master
dm_alchemy/bot_running_tracker.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Wrapper for a 3d alchemy to keep a symbolic alchemy in sync.""" from dm_alchemy import get_meta_data from dm_alchemy import symbolic_alchemy from dm_alchemy.types import event_unpacking from dm_alchemy.types import stones_and_potions from dm_alchemy.types import unity_python_conversion from dm_alchemy.types import utils import dm_env def _add_to_obs(obs, to_add, name): if isinstance(obs, tuple): return obs + (to_add,) if isinstance(obs, dict): obs[name] = to_add return obs if isinstance(obs, list): return obs + [to_add] # If it is not already a tuple, dict or list, then make it a tuple. return obs, to_add class SymbolicAlchemyWrapper(dm_env.Environment): """Take a 3d alchemy environment and keep a symbolic env in sync with it.""" def __init__( self, env3d, level_name, see_chemistries=None, see_symbolic_observation=False): self.env3d = env3d value_coefficients, value_offset, _, bonus, _ = get_meta_data.to_meta_data( level_name) reward_weights = stones_and_potions.RewardWeights( coefficients=value_coefficients, offset=value_offset, bonus=bonus) self.env_symbolic = symbolic_alchemy.SymbolicAlchemy( chemistry_gen=lambda: self.chemistry, reward_weights=reward_weights, items_gen=lambda unused_trial_number: self.items, num_trials=10, see_chemistries=see_chemistries, observe_used=True, ) self.items = utils.TrialItems(stones=[], potions=[]) self._perceived_stones = [] self._perceived_potions = [] self.chemistry = None self.see_symbolic_observation = see_symbolic_observation self._trial_in_progress = False self._trial_has_started = False def process_step_events(self, events): for event in events: if 'TrialEnded' in event.name: self._trial_has_started = False self.items = utils.TrialItems(stones=[], potions=[]) self._perceived_stones = [] self._perceived_potions = [] elif 'TrialStarted' in event.name: self._trial_has_started = True # At this point we should have all stones and potions and the chemistry. aligned_stones = [ stones_and_potions.align(stone, self.chemistry.rotation) for stone, _ in self._perceived_stones] latent_stones = [self.chemistry.stone_map.apply(stone) for stone in aligned_stones] stones = [ stones_and_potions.Stone(i, stone.latent_coords) for (_, i), stone in zip(self._perceived_stones, latent_stones)] latent_potions = [self.chemistry.potion_map.apply(potion) for potion, _ in self._perceived_potions] potions = [ stones_and_potions.Potion(i, potion.latent_dim, potion.latent_dir) for (_, i), potion in zip(self._perceived_potions, latent_potions)] self.items = utils.TrialItems(stones=stones, potions=potions) # When we get an event saying that the new trial has started in the 3d # version it should be safe to end the previous trial in the symbolic # version. if self._trial_in_progress: self.env_symbolic.end_trial() if self.env_symbolic.is_last_step(): self.env_symbolic.reset() # Once the first trial is started there is always a trial in progress # from then on. self._trial_in_progress = True elif 'PotionUsed' in event.name: potion_inst_id, stone_inst_id = event_unpacking.unpack_potion_used( event) stone_ind = self.env_symbolic.game_state.get_stone_ind( stone_inst=stone_inst_id) potion_ind = self.env_symbolic.game_state.get_potion_ind( potion_inst=potion_inst_id) # Take an action putting the stone in the potion. self.env_symbolic.step_slot_based_action(utils.SlotBasedAction( stone_ind=stone_ind, potion_ind=potion_ind)) elif 'StoneUsed' in event.name: stone_inst_id = event_unpacking.unpack_stone_used(event) stone_ind = self.env_symbolic.game_state.get_stone_ind( stone_inst=stone_inst_id) # Take an action putting the stone in the cauldron. self.env_symbolic.step_slot_based_action(utils.SlotBasedAction( stone_ind=stone_ind, cauldron=True)) elif 'ChemistryCreated' in event.name: chem, rot = event_unpacking.unpack_chemistry_and_rotation(event) self.chemistry = unity_python_conversion.from_unity_chemistry(chem, rot) else: potions = event_unpacking.get_potions([event]) stones = event_unpacking.get_stones([event]) if (potions or stones) and self._trial_has_started: self.items = utils.TrialItems(stones=[], potions=[]) self._perceived_stones = [] self._perceived_potions = [] self._trial_has_started = False self._perceived_potions.extend(potions) self._perceived_stones.extend(stones) def step(self, action) -> dm_env.TimeStep: timestep = self.env3d.step(action) # If a symbolic action has occurred take the action in the symbolic # environment. self.process_step_events(self.env3d.events()) return self.add_observations(timestep) def reset(self) -> dm_env.TimeStep: timestep = self.env3d.reset() self.items = utils.TrialItems(stones=[], potions=[]) self._perceived_stones = [] self._perceived_potions = [] self._trial_has_started = False self.process_step_events(self.env3d.events()) return self.add_observations(timestep) def add_observations(self, timestep): new_observation = timestep.observation symbolic_observation = self.env_symbolic.observation() if self.see_symbolic_observation: new_observation = _add_to_obs( new_observation, symbolic_observation['symbolic_obs'], 'symbolic_obs') for name in self.env_symbolic.see_chemistries.keys(): new_observation = _add_to_obs( new_observation, symbolic_observation[name], name) return dm_env.TimeStep( step_type=timestep.step_type, reward=timestep.reward, discount=timestep.discount, observation=new_observation) def observation_spec(self): obs_spec = self.env3d.observation_spec() if self.see_symbolic_observation: symbolic_obs = self.env_symbolic.observation_spec()['symbolic_obs'] obs_spec = _add_to_obs(obs_spec, symbolic_obs, 'symbolic_obs') for name in self.env_symbolic.see_chemistries.keys(): chem_obs_spec = self.env_symbolic.observation_spec()[name] obs_spec = _add_to_obs(obs_spec, chem_obs_spec, name) return obs_spec def action_spec(self): return self.env3d.action_spec() # Forward other attribute lookups to the 3d environment. def __getattr__(self, name): return getattr(self.env3d, name)
dm_alchemy-master
dm_alchemy/symbolic_alchemy_wrapper.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Trackers running on symbolic alchemy.""" import abc import collections import copy import itertools from typing import Any, Callable, Dict, Optional, TypeVar from dm_alchemy import event_tracker from dm_alchemy.ideal_observer import ideal_observer from dm_alchemy.ideal_observer import precomputed_maps from dm_alchemy.types import graphs from dm_alchemy.types import stones_and_potions from dm_alchemy.types import utils import numpy as np Graph = graphs.Graph GameState = event_tracker.GameState NO_OUTCOME = event_tracker.NO_OUTCOME PerceivedStone = stones_and_potions.PerceivedStone PerceivedPotion = stones_and_potions.PerceivedPotion AlignedStoneIndex = stones_and_potions.AlignedStoneIndex PerceivedPotionIndex = stones_and_potions.PerceivedPotionIndex StoneMap = stones_and_potions.StoneMap PotionMap = stones_and_potions.PotionMap CAULDRON = stones_and_potions.CAULDRON RewardWeights = stones_and_potions.RewardWeights PrecomputedMaps = precomputed_maps.PrecomputedMaps # For typing symbolic_alchemy = Any ActionInfo = collections.namedtuple( 'ActionInfo', 'original_action has_stone has_potion') # Create a type which can refer to anything derived from SymbolicAlchemyTracker BaseOrDerivedTracker = TypeVar( 'BaseOrDerivedTracker', bound='SymbolicAlchemyTracker') class SequenceStatsTracker: """Tracks how a statistic changes throughout an episode.""" def __init__( self, tracker: BaseOrDerivedTracker, get_stat: Callable[[BaseOrDerivedTracker], Any], default_stat: Any = 0): self._get_stat = get_stat self._tracker = tracker self.stats = [] self.default_stat = default_stat def track(self) -> None: self.stats.append(self._get_stat(self._tracker)) def reset(self) -> None: self.stats = [] class SymbolicAlchemyTracker: """Object which has functions called for each action in symbolic alchemy.""" @property @abc.abstractmethod def name(self) -> str: pass @property def per_action_trackers(self) -> Dict[str, SequenceStatsTracker]: return {} @property def per_trial_trackers(self) -> Dict[str, SequenceStatsTracker]: return {} def episode_start(self, unused_chemistry: utils.Chemistry) -> None: del unused_chemistry for tracker in itertools.chain( self.per_trial_trackers.values(), self.per_action_trackers.values()): tracker.reset() def trial_start(self, unused_game_state: GameState) -> None: del unused_game_state for tracker in self.per_action_trackers.values(): tracker.track() def action_and_outcome( self, unused_action: utils.TypeBasedAction, unused_outcome: Optional[PerceivedStone], unused_action_info: ActionInfo ) -> None: del unused_action, unused_outcome, unused_action_info for tracker in self.per_action_trackers.values(): tracker.track() def trial_end(self) -> None: for tracker in self.per_trial_trackers.values(): tracker.track() def episode_returns(self) -> Any: return {k: tuple(tracker.stats) for k, tracker in itertools.chain(self.per_trial_trackers.items(), self.per_action_trackers.items())} def default_returns( self, num_trials: int, num_actions_per_trial: int ) -> Any: """Returns some default values for the tracker.""" per_trial = zip( self.per_trial_trackers.items(), itertools.repeat(num_trials)) num_actions = num_trials * (num_actions_per_trial + 1) per_action = zip( self.per_action_trackers.items(), itertools.repeat(num_actions)) return {k: tuple(tracker.default_stat for _ in range(expected_length)) for (k, tracker), expected_length in itertools.chain( per_trial, per_action)} StatTrackerOrDerived = TypeVar('StatTrackerOrDerived', bound='StatTracker') GetStat = Callable[[StatTrackerOrDerived, utils.TypeBasedAction, Optional[PerceivedStone], ActionInfo], Any] Condition = Callable[[utils.TypeBasedAction, Optional[PerceivedStone], ActionInfo], bool] class StatTracker(SymbolicAlchemyTracker): """Tracks a statistic each time an action occurs.""" def __init__(self, get_stat: GetStat, init_step_stat: Any = 0): self._get_stat = get_stat self.cumul_action_occurred = copy.deepcopy(init_step_stat) self.last_step_stat = copy.deepcopy(init_step_stat) self._init_step_stat = init_step_stat self.per_action_tracker = SequenceStatsTracker( self, lambda tracker: tracker.last_step_stat, copy.deepcopy(self._init_step_stat)) self.per_trial_tracker = SequenceStatsTracker( self, lambda tracker: tracker.cumul_action_occurred, copy.deepcopy(self._init_step_stat)) @property def per_action_trackers(self) -> Dict[str, SequenceStatsTracker]: return {'per_action': self.per_action_tracker} @property def per_trial_trackers(self) -> Dict[str, SequenceStatsTracker]: return {'per_trial': self.per_trial_tracker} def action_and_outcome( self, action: utils.TypeBasedAction, outcome: Optional[PerceivedStone], action_info: ActionInfo ) -> None: self.last_step_stat = self._get_stat(self, action, outcome, action_info) self.cumul_action_occurred += self.last_step_stat super().action_and_outcome(action, outcome, action_info) def trial_end(self) -> None: super().trial_end() self.cumul_action_occurred = copy.deepcopy(self._init_step_stat) class SpecificActionTracker(StatTracker): """Counts number of actions which satisfy some condition.""" def __init__(self, condition: Condition): def get_stat( unused_tracker: StatTracker, action: utils.TypeBasedAction, outcome: Optional[PerceivedStone], action_info: ActionInfo) -> int: return 1 if condition(action, outcome, action_info) else 0 super().__init__(get_stat=get_stat) class NoChangeActionTracker(SpecificActionTracker): """Counts number of actions which do not cause stone to change.""" NAME = 'no_change' @property def name(self) -> str: return self.NAME def __init__(self): def condition( action: utils.TypeBasedAction, outcome: Optional[PerceivedStone], unused_action_info: ActionInfo) -> bool: del unused_action_info return (all(stone is not None for stone in [outcome, action.perceived_stone]) and action.perceived_stone == outcome) super().__init__(condition=condition) class NegStoneCashedTracker(SpecificActionTracker): """Counts number of times a negative stone is put in the cauldron.""" NAME = 'neg_stone' @property def name(self) -> str: return self.NAME def __init__(self): def condition( action: utils.TypeBasedAction, unused_outcome: Optional[PerceivedStone], unused_action_info: ActionInfo ) -> bool: del unused_outcome, unused_action_info return (action.cauldron and action.perceived_stone is not None and action.perceived_stone.reward < 0) super().__init__(condition=condition) class CashedStoneValueTracker(SymbolicAlchemyTracker): """Counts average value of cashed stone.""" NAME = 'cashed_stone_value' @property def name(self) -> str: return self.NAME def __init__( self, reward_weights: RewardWeights, stone_map: StoneMap, rotation: np.ndarray): self._stone_map = stone_map self._rotation = rotation self.average_stone_value = 0.0 self._num_stones_cashed = 0 self._reward_weights = reward_weights self.per_trial_tracker = SequenceStatsTracker( self, lambda tracker: tracker.average_stone_value, 0.0) @property def per_trial_trackers(self) -> Dict[str, SequenceStatsTracker]: return {'per_trial': self.per_trial_tracker} def action_and_outcome( self, action: utils.TypeBasedAction, outcome: Optional[PerceivedStone], action_info: ActionInfo ) -> None: if action.cauldron and action.using_stone: aligned_stone = stones_and_potions.align( action.perceived_stone, self._rotation) latent_stone = self._stone_map.apply(aligned_stone) self.average_stone_value += self._reward_weights( latent_stone.latent_coords) self._num_stones_cashed += 1 super().action_and_outcome(action, outcome, action_info) def trial_end(self) -> None: if self._num_stones_cashed > 0: self.average_stone_value /= self._num_stones_cashed super().trial_end() self.average_stone_value = 0.0 self._num_stones_cashed = 0 class ChangeGoldstoneTracker(SpecificActionTracker): """Counts number of times a goldstone is changed to something else.""" NAME = 'gold_changed' @property def name(self) -> str: return self.NAME def __init__(self, threshold: int = 2): def condition( action: utils.TypeBasedAction, outcome: Optional[PerceivedStone], unused_action_info: ActionInfo) -> bool: del unused_action_info if not action.using_stone or not action.using_potion: return False stone_reward = (action.perceived_stone.reward if action.perceived_stone else 0) return outcome is not None and stone_reward > threshold > outcome.reward super().__init__(condition=condition) def pos_stone_not_cashed_tracker_name( lb: int = 0, ub: Optional[int] = None ) -> str: if lb == 0 and ub is None: return 'pos_stone_not_cashed' elif ub is None: return 'stone_above_' + str(lb) + '_not_cashed' return 'stone_between_' + str(lb) + '_and_' + str(ub) + '_not_cashed' class PosStoneNotCashedTracker(SymbolicAlchemyTracker): """Counts number of times a stone with specified reward is not cashed.""" def __init__( self, reward_weights: RewardWeights, lb: int = 0, ub: Optional[int] = None): self.pos_stones_at_end = 0 self._condition = lambda r: lb < r < ub if ub is not None else lb < r self._game_state = None self._reward_weights = reward_weights self.lb = lb self.ub = ub self.per_trial_tracker = SequenceStatsTracker( self, lambda tracker: tracker.pos_stones_at_end) @property def per_trial_trackers(self) -> Dict[str, SequenceStatsTracker]: return {'per_trial': self.per_trial_tracker} @property def name(self) -> str: return pos_stone_not_cashed_tracker_name(self.lb, self.ub) def trial_start(self, game_state: GameState) -> None: self._game_state = game_state super().trial_start(game_state) def trial_end(self) -> None: self.pos_stones_at_end = len( [s for s in self._game_state.existing_stones() if self._condition(self._reward_weights(s.latent))]) super().trial_end() class StoneImprovementTracker(SymbolicAlchemyTracker): """Counts number of times a goldstone is changed to something else.""" # pylint: disable=protected-access # TODO(b/173784755): avoid protected access by using event tracker to tracker # latest slot based action. NAME = 'stone_improvement' @property def name(self) -> str: return self.NAME def __init__( self, reward_weights: RewardWeights, stone_map: StoneMap, rotation: np.ndarray): self._stone_map = stone_map self._rotation = rotation self.average_stone_improvement = 0.0 self._reward_weights = reward_weights self._game_state = None self._start_rewards = {} self._end_rewards = {} self._prev_existing_stones = set() self.per_trial_tracker = SequenceStatsTracker( self, lambda tracker: tracker.average_stone_improvement, 0.0) @property def per_trial_trackers(self) -> Dict[str, SequenceStatsTracker]: return {'per_trial': self.per_trial_tracker} def action_and_outcome( self, action: utils.TypeBasedAction, outcome: Optional[PerceivedStone], action_info: ActionInfo ) -> None: if action.cauldron: # We can't get the stone ind as it has already been removed from the game # state, so instead just see what stone ind is missing. missing_stones = self._prev_existing_stones.difference( self._game_state._existing_stones) assert len(missing_stones) == 1, ( 'Should be 1 missing stone when stone is used.') aligned_stone = stones_and_potions.align( action.perceived_stone, self._rotation) latent_stone = self._stone_map.apply(aligned_stone) for ind in missing_stones: self._end_rewards[ind] = self._reward_weights( latent_stone.latent_coords) self._prev_existing_stones = copy.deepcopy( self._game_state._existing_stones) super().action_and_outcome(action, outcome, action_info) def trial_start(self, game_state: GameState) -> None: self._game_state = game_state self._prev_existing_stones = copy.deepcopy( self._game_state._existing_stones) self._start_rewards = { i: self._reward_weights(self._game_state.get_stone(i).latent) for i in self._prev_existing_stones} super().trial_start(game_state) def trial_end(self) -> None: stone_improvements = [reward - self._start_rewards[idx] for idx, reward in self._end_rewards.items()] self.average_stone_improvement = ( 0.0 if not stone_improvements else np.mean(stone_improvements)) super().trial_end() self.average_stone_improvement = 0.0 self._start_rewards = {} self._end_rewards = {} # pylint: enable=protected-access class AddMatrixEventTracker(SymbolicAlchemyTracker): """Adds a matrix event tracker per trial and add these to episode returns.""" NAME = 'matrix_event' @property def name(self) -> str: return self.NAME def __init__(self): self._event_trackers = None self.game_state = None self.per_trial_tracker = SequenceStatsTracker( self, lambda tracker: tracker.game_state.trackers[self.name], event_tracker.MatrixEventTracker(1, 1)) @property def per_trial_trackers(self) -> Dict[str, SequenceStatsTracker]: return {'event_tracker': self.per_trial_tracker} def trial_start(self, game_state: GameState) -> None: matrix_event_tracker = event_tracker.MatrixEventTracker( game_state.num_stones, game_state.num_potions) self.game_state = game_state game_state.add_event_trackers([matrix_event_tracker]) super().trial_start(game_state) class ItemGeneratedTracker(SymbolicAlchemyTracker): """Tracks the items generated during the episode.""" NAME = 'items_generated' @property def name(self) -> str: return self.NAME def __init__(self): self.trials = None self.per_trial_tracker = SequenceStatsTracker( self, lambda tracker: tracker.trials, utils.TrialItems(stones=[], potions=[])) @property def per_trial_trackers(self) -> Dict[str, SequenceStatsTracker]: return {'trials': self.per_trial_tracker} def trial_start(self, game_state: GameState) -> None: self.trials = copy.deepcopy(game_state.existing_items()) super().trial_start(game_state) def episode_returns(self) -> Any: items = utils.EpisodeItems([], []) items.trials = super().episode_returns()['trials'] return items class ScoreTracker(StatTracker): """Adds a reward tracker and return reward per trial.""" NAME = 'score' @property def name(self) -> str: return self.NAME def __init__(self, reward_weights: RewardWeights): self._reward_weights = reward_weights self.prev_reward = 0 self.game_state = None def latest_reward(tracker, *unused_args, **unused_kwargs): del unused_args, unused_kwargs cumul_reward = tracker.game_state.trackers['reward'].reward reward = cumul_reward - tracker.prev_reward tracker.prev_reward = cumul_reward return reward super().__init__(get_stat=latest_reward) def trial_start(self, game_state: GameState) -> None: reward_tracker = event_tracker.RewardTracker(self._reward_weights) self.game_state = game_state game_state.add_event_trackers([reward_tracker]) self.prev_reward = 0 super().trial_start(game_state) class ItemsUsedTracker(StatTracker): """Tracks what stones and potions are used.""" NAME = 'items_used' @property def name(self) -> str: return self.NAME def __init__(self): self.prev_items = np.zeros((2,), dtype=int) self.game_state: Optional[GameState] = None def latest_items_used( tracker: 'ItemsUsedTracker', unused_action: utils.TypeBasedAction, unused_outcome: Optional[PerceivedStone], unused_action_info: ActionInfo ) -> np.ndarray: del unused_action, unused_outcome, unused_action_info items_used = tracker.game_state.trackers['items_used'] cumul_items_used = np.array( [items_used.num_potions_used, items_used.num_stones_used], dtype=int) items_used = cumul_items_used - tracker.prev_items tracker.prev_items = cumul_items_used return items_used super().__init__(get_stat=latest_items_used, init_step_stat=np.zeros((2,), dtype=int)) def trial_start(self, game_state: GameState) -> None: self.game_state = game_state game_state.add_event_trackers([event_tracker.ItemsUsedTracker()]) self.prev_items = np.zeros((2,), dtype=int) super().trial_start(game_state) TrialExtraInfo = collections.namedtuple( 'TrialExtraInfo', 'num_world_states num_potion_maps num_stone_maps num_graphs') class BeliefStateTracker(SymbolicAlchemyTracker): """Adds a belief state which is updated to a symbolic alchemy bot.""" NAME = 'belief_state' @property def name(self) -> str: return self.NAME def __init__( self, precomputed: PrecomputedMaps, env: 'symbolic_alchemy.SymbolicAlchemy', init_belief_state=None): self.precomputed = precomputed self.belief_state = None self._init_belief_state = ( init_belief_state or ideal_observer.BeliefStateWithRotation( self.precomputed)) self._extra_info = None self._world_states_per_action = None self._env = env self.extra_info_per_action_tracker = SequenceStatsTracker( self, lambda tracker: tracker.extra_info, TrialExtraInfo( num_world_states=0, num_stone_maps=0, num_potion_maps=0, num_graphs=0)) self.extra_info_per_trial_tracker = SequenceStatsTracker( self, lambda tracker: tracker.extra_info, TrialExtraInfo( num_world_states=0, num_stone_maps=0, num_potion_maps=0, num_graphs=0)) @property def per_action_trackers(self) -> Dict[str, SequenceStatsTracker]: return {'per_action_extra_info': self.extra_info_per_action_tracker} @property def per_trial_trackers(self) -> Dict[str, SequenceStatsTracker]: return {'extra_info': self.extra_info_per_trial_tracker} def episode_start(self, unused_chemistry: utils.Chemistry): self.belief_state = copy.deepcopy(self._init_belief_state) super().episode_start(unused_chemistry) def trial_start(self, game_state: GameState) -> None: current_stones = collections.Counter(self._env.perceived_stones()) current_potions = collections.Counter(self._env.perceived_potions()) self.belief_state.new_trial(current_stones, current_potions) super().trial_start(game_state) def action_and_outcome( self, action: utils.TypeBasedAction, outcome: Optional[PerceivedStone], action_info: ActionInfo ) -> None: # A stone value of -1 indicates that the action was invalid if not action.using_stone: super().action_and_outcome(action, outcome, action_info) return if action.perceived_stone is None: raise ValueError('Action says using stone but perceived stone is None.') # An outcome of -1 means the stone did not change. current_outcome = outcome or action.perceived_stone assert current_outcome is not None if action.using_potion: self.belief_state.action_and_outcome( action.perceived_stone, action.perceived_potion, current_outcome, self.precomputed) super().action_and_outcome(action, outcome, action_info) @property def extra_info(self) -> TrialExtraInfo: return TrialExtraInfo( num_world_states=self.belief_state.num_world_states, num_potion_maps=self.belief_state.num_potion_maps, num_stone_maps=self.belief_state.num_stone_maps, num_graphs=self.belief_state.num_graphs) def get_partial_potion_map( self, index_to_perm_index: np.ndarray ) -> stones_and_potions.PartialPotionMap: return self.belief_state.partial_potion_map(index_to_perm_index) def get_partial_stone_map(self) -> stones_and_potions.PartialStoneMap: return self.belief_state.partial_stone_map() def get_partial_graph( self, possible_partial_graph_indices: np.ndarray ) -> graphs.PartialGraph: return self.belief_state.partial_graph(possible_partial_graph_indices)
dm_alchemy-master
dm_alchemy/symbolic_alchemy_trackers.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Provide meta data about symbolic alchemy levels.""" # Each latent dimension contributes equal value to the stone reward. _VALUE_COEFFICIENTS = [1, 1, 1] _VALUE_OFFSET = 0 # Getting the best stone increases the value by 12. _BONUS = 12 def to_meta_data(level_name: str): potion_reward = 1 if 'shaping' in level_name else 0 vary_spawns = 'vary_spawns' in level_name return _VALUE_COEFFICIENTS, _VALUE_OFFSET, potion_reward, _BONUS, vary_spawns
dm_alchemy-master
dm_alchemy/get_meta_data.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Package version for dm_alchemy. Kept in separate file so it can be used during installation. """ __version__ = '1.0.0' # https://www.python.org/dev/peps/pep-0440/
dm_alchemy-master
dm_alchemy/_version.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for dm_alchemy.load_from_disk.""" from absl import flags from absl.testing import absltest from absl.testing import parameterized import dm_alchemy from dm_env import test_utils FLAGS = flags.FLAGS flags.DEFINE_string('path', '', 'Directory that contains dm_alchemy environment.') _TEST_LEVEL = ('alchemy/perceptual_mapping_randomized_with_rotation_and_random' '_bottleneck') _TEST_LEVELS = ( 'alchemy/perceptual_mapping_randomized_with_rotation_and_random_bottleneck', 'alchemy/all_fixed', 'alchemy/all_fixed_w_shaping', 'alchemy/evaluation_episodes/321', ) class LoadFromDiskTest(test_utils.EnvironmentTestMixin, absltest.TestCase): def make_object_under_test(self): return dm_alchemy.load_from_disk( FLAGS.path, settings=dm_alchemy.EnvironmentSettings( seed=123, level_name=_TEST_LEVEL)) class AlchemyTest(parameterized.TestCase): @parameterized.parameters(*_TEST_LEVELS) def test_load_level(self, level_name): self.assertIsNotNone( dm_alchemy.load_from_disk( FLAGS.path, settings=dm_alchemy.EnvironmentSettings( seed=123, level_name=level_name))) if __name__ == '__main__': absltest.main()
dm_alchemy-master
dm_alchemy/load_from_disk_test.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Read and write.""" import inspect import os _ROOT_DIR = os.path.dirname(inspect.getfile(inspect.currentframe())) def read_proto(filename: str): with open(os.path.join(_ROOT_DIR, filename), 'rb') as f: return f.read() def write_proto(filename: str, serialized): with open(os.path.join(_ROOT_DIR, filename), 'wb') as f: f.write(serialized)
dm_alchemy-master
dm_alchemy/io.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Python utilities for running dm_alchemy.""" from dm_alchemy import _load_environment from dm_alchemy._version import __version__ EnvironmentSettings = _load_environment.EnvironmentSettings LEVEL_NAMES = _load_environment.ALCHEMY_LEVEL_NAMES load_from_disk = _load_environment.load_from_disk load_from_docker = _load_environment.load_from_docker
dm_alchemy-master
dm_alchemy/__init__.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Bots which run on symbolic environment for alchemy.""" import abc import random from typing import Any, Callable, Dict, Sequence, Union from dm_alchemy import event_tracker from dm_alchemy import symbolic_alchemy from dm_alchemy import symbolic_alchemy_trackers from dm_alchemy.ideal_observer import ideal_observer from dm_alchemy.ideal_observer import precomputed_maps from dm_alchemy.types import stones_and_potions from dm_alchemy.types import utils LatentStone = stones_and_potions.LatentStone LatentPotion = stones_and_potions.LatentPotion PotionMap = stones_and_potions.PotionMap StoneMap = stones_and_potions.StoneMap AlignedStone = stones_and_potions.AlignedStone PerceivedPotion = stones_and_potions.PerceivedPotion RewardWeights = stones_and_potions.RewardWeights PrecomputedMaps = precomputed_maps.PrecomputedMaps AddMatrixEventTracker = symbolic_alchemy_trackers.AddMatrixEventTracker BeliefStateTracker = symbolic_alchemy_trackers.BeliefStateTracker ScoreTracker = symbolic_alchemy_trackers.ScoreTracker class SymbolicAlchemyBot(abc.ABC): """Bot running on the symbolic alchemy environment.""" def __init__(self, env: symbolic_alchemy.SymbolicAlchemy): self._env = env @abc.abstractmethod def select_action( self ) -> Union[utils.SlotBasedAction, utils.TypeBasedAction]: pass def run_episode(self) -> Dict[str, Any]: """Runs the bot on an episode of the symbolic alchemy env.""" timestep = self._env.reset() while not timestep.last(): action = self.select_action() timestep = symbolic_alchemy.take_simplified_action(action, self._env) return self._env.episode_returns() class IdealObserverBot(SymbolicAlchemyBot): """Bot which runs the ideal observer on the symbolic alchemy environment.""" def __init__( self, reward_weights: RewardWeights, precomputed: PrecomputedMaps, env: symbolic_alchemy.SymbolicAlchemy, minimise_world_states: bool): self._bonus = reward_weights.bonus self._precomputed = precomputed self._minimise_world_states = minimise_world_states super().__init__(env) self._search_results = None def run_episode(self) -> Dict[str, Any]: # Start with no search results. self._search_results = {} return super().run_episode() def select_action(self) -> utils.TypeBasedAction: belief_state_tracker: BeliefStateTracker = ( self._env.trackers['belief_state']) action, _, self._search_results = ideal_observer.ideal_observer( belief_state_tracker.belief_state.belief_state, self._search_results, self._bonus, self._precomputed, self._minimise_world_states) return utils.type_based_action_from_ints( *action, belief_state_tracker.belief_state.rotation) class RandomActionBot(SymbolicAlchemyBot): """Bot which takes random actions on the symbolic alchemy environment. If a stone reaches the maximum value it will not change it further. When there are no more potions or all of the stones have reached the maximum possible value then the positive stones are put into the cauldron. """ def __init__( self, reward_weights: RewardWeights, env: symbolic_alchemy.SymbolicAlchemy, threshold_for_leaving: int = 2 ): self._reward_weights = reward_weights self._threshold_for_leaving = threshold_for_leaving super().__init__(env) def select_action(self) -> utils.TypeBasedAction: # Get stones which are not the maximum stones = [s for s in self._env.game_state.existing_stones() if self._reward_weights(s.latent) < self._threshold_for_leaving] potions = self._env.game_state.existing_potions() stones_to_use = [self._env.perceived_stone(s) for s in stones] potions_to_use = [self._env.perceived_potion(p) for p in potions] if not stones_to_use or not potions_to_use: return utils.TypeBasedAction(end_trial=True) return utils.TypeBasedAction( stone=random.sample(stones_to_use, 1)[0], potion=random.sample(potions_to_use, 1)[0]) class ReplayBot(SymbolicAlchemyBot): """Bot which replays a sequence of actions.""" def __init__( self, trial_trackers: Sequence[event_tracker.TrialTracker], env: symbolic_alchemy.SymbolicAlchemy): self._actions = [] for trial_tracker in trial_trackers: self._actions.append([ (stone_ind, potion_ind) for stone_ind, potion_ind, _ in trial_tracker.events_list()]) self._action_num = [0 for _ in trial_trackers] super().__init__(env) def select_action(self) -> utils.SlotBasedAction: if 0 <= self._env.trial_number < len(self._actions): action_num = self._action_num[self._env.trial_number] actions = self._actions[self._env.trial_number] if action_num < len(actions): action = actions[action_num] self._action_num[self._env.trial_number] += 1 stone_ind, potion_ind = action if potion_ind == -1: return utils.SlotBasedAction(stone_ind=stone_ind, cauldron=True) return utils.SlotBasedAction(stone_ind=stone_ind, potion_ind=potion_ind) return utils.SlotBasedAction(no_op=True) class NoOpBot(SymbolicAlchemyBot): """Bot which always selects no op actions.""" def select_action(self) -> utils.SlotBasedAction: return utils.SlotBasedAction(no_op=True) def run_symbolic_alchemy_bot( episode_items: utils.EpisodeItems, chemistry: utils.Chemistry, reward_weights: RewardWeights, bot_from_env: Callable[[symbolic_alchemy.SymbolicAlchemy], SymbolicAlchemyBot], add_trackers_to_env: Callable[[symbolic_alchemy.SymbolicAlchemy], None], ) -> Dict[str, Any]: """Runs a symbolic alchemy bot for 1 episode. Args: episode_items: Named tuple with the fields: init_stones - The stones for each trial. init_potions - The potions for each trial. chemistry: Named tuple with the fields: potion_map - The potion map which is actually present in this episode. stone_map - The stone map which is actually present in this episode. graph - The graph which is actually present in this episode. reward_weights: A callable which gives a reward for some stone coords. bot_from_env: Callable which returns the bot to run given the environment. add_trackers_to_env: Add trackers to the environment. Returns: The results of running the bot for an episode. """ env = symbolic_alchemy.get_symbolic_alchemy_fixed( episode_items, chemistry, reward_weights=reward_weights) add_trackers_to_env(env) return bot_from_env(env).run_episode() def get_multi_trial_ideal_observer_reward( episode_items: utils.EpisodeItems, chemistry: utils.Chemistry, reward_weights: RewardWeights, precomputed: PrecomputedMaps, minimise_world_states: bool, add_trackers_to_env: Callable[[symbolic_alchemy.SymbolicAlchemy], None], ) -> Dict[str, Any]: """Applies a greedy policy using ideal observer reward estimates for n trials. Args: episode_items: Named tuple with the fields: init_stones - The stones for each trial. init_potions - The potions for each trial. chemistry: Named tuple with the fields: potion_map - The potion map which is actually present in this episode. stone_map - The stone map which is actually present in this episode. graph - The graph which is actually present in this episode. reward_weights: A callable which gives a reward for some stone coords. precomputed: Precomputed maps used for speed. minimise_world_states: Let the objective be to minimise the number of world states at the end of the trial instead of to maximise the accumulated reward. add_trackers_to_env: Add trackers to the environment. Returns: The reward for each trial. An event tracker for each trial. A dictionary of extra information to record about how the ideal observer ran on each trial. """ def bot_from_env( env: symbolic_alchemy.SymbolicAlchemy) -> IdealObserverBot: return IdealObserverBot( reward_weights, precomputed, env, minimise_world_states) return run_symbolic_alchemy_bot( episode_items, chemistry, reward_weights, bot_from_env, add_trackers_to_env=add_trackers_to_env)
dm_alchemy-master
dm_alchemy/symbolic_alchemy_bots.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Define versions of dm_env specs with some dimensions of shape unknown.""" from dm_env import specs import numpy as np _INVALID_DTYPE = 'Expected dtype %r but found %r' _INVALID_SHAPE_LEN = 'Shape %r has different length to spec %r' _INVALID_SHAPE = 'Shape %r does not conform to spec %r' class PartialArray(specs.Array): """An `Array` spec with optionally an unknown size on one dimension.""" def __init__(self, shape, dtype, name=None): """Initializes a new `PartialArray` spec. Args: shape: An iterable specifying the array shape with up to 1 dimension with unknown size specified by -1. dtype: numpy dtype or string specifying the array dtype. name: Optional string containing a semantic name for the corresponding array. Defaults to `None`. Raises: ValueError: If there is more than 1 dimension with unknown size or a dimension with value < -1. TypeError: If `shape` is not an iterable of elements convertible to int, or if `dtype` is not convertible to a numpy dtype. """ if any(size < -1 for size in shape): raise ValueError('No entry in shape may be < -1, shape is {}'.format( shape)) if sum(size == -1 for size in shape) > 1: raise ValueError('Only 1 entry in shape may be -1, shape is {}'.format( shape)) super().__init__(shape, dtype, name) def _validate_shape(self, shape): if len(shape) != len(self.shape): self._fail_validation(_INVALID_SHAPE_LEN, shape, self.shape) for array_size, spec_size in zip(shape, self.shape): if spec_size == -1: continue if array_size != spec_size: self._fail_validation(_INVALID_SHAPE, shape, self.shape) def validate(self, value): """Checks if value conforms to this spec. Args: value: a numpy array or value convertible to one via `np.asarray`. Returns: value, converted if necessary to a numpy array. Raises: ValueError: if value doesn't conform to this spec. """ value = np.asarray(value) self._validate_shape(value.shape) if value.dtype != self.dtype: self._fail_validation(_INVALID_DTYPE, self.dtype, value.dtype) return value def generate_value(self): """Generate a test value which conforms to this spec. If the size is -1 on a dimension we can use any positive value, here we use 1. Returns: Test value. """ example_shape = tuple(1 if size == -1 else size for size in self.shape) return np.zeros(shape=example_shape, dtype=self.dtype) def __repr__(self): return 'PartialArray(shape={}, dtype={}, name={})'.format( self.shape, repr(self.dtype), repr(self.name)) def __reduce__(self): return PartialArray, (self._shape, self._dtype, self._name)
dm_alchemy-master
dm_alchemy/partial_array_specs.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for the ideal observer.""" import random from absl.testing import absltest from absl.testing import parameterized from dm_alchemy import symbolic_alchemy from dm_alchemy.types import graphs from dm_alchemy.types import stones_and_potions from dm_alchemy.types import utils from dm_env import test_utils import numpy as np Stone = stones_and_potions.Stone Potion = stones_and_potions.Potion LatentStone = stones_and_potions.LatentStone LatentPotion = stones_and_potions.LatentPotion AlignedStone = stones_and_potions.AlignedStone PerceivedPotion = stones_and_potions.PerceivedPotion PerceivedStone = stones_and_potions.PerceivedStone CAULDRON = stones_and_potions.CAULDRON _NUM_TRIALS = 2 _TRIAL_STONES = [Stone(0, [-1, -1, 1]), Stone(1, [1, 1, 1]), Stone(2, [1, 1, -1])] _TEST_STONES = [_TRIAL_STONES for _ in range(_NUM_TRIALS)] _TRIAL_POTIONS = [Potion(0, 1, 1), Potion(1, 1, -1), Potion(2, 1, 1), Potion(3, 1, 1), Potion(4, 2, 1), Potion(5, 1, 1), Potion(6, 2, -1), Potion(7, 0, 1), Potion(8, 2, -1), Potion(9, 2, 1), Potion(10, 1, 1), Potion(11, 1, -1)] _TEST_POTIONS = [_TRIAL_POTIONS for _ in range(_NUM_TRIALS)] _MAX_STEPS_PER_TRIAL = 20 _FIXED_POTION_MAP = stones_and_potions.all_fixed_potion_map() _FIXED_STONE_MAP = stones_and_potions.all_fixed_stone_map() _FIXED_ROTATION = np.eye(3, dtype=int) _CHEM_NAME = 'test_chem' def random_slot_based_action(): stone_ind = random.randint(0, symbolic_alchemy.MAX_STONES - 1) potion_ind = random.randint(-1, symbolic_alchemy.MAX_POTIONS - 1) if potion_ind < 0: return utils.SlotBasedAction(stone_ind=stone_ind, cauldron=True) return utils.SlotBasedAction(stone_ind=stone_ind, potion_ind=potion_ind) def reward_fcn(): return stones_and_potions.RewardWeights([1, 1, 1], 0, 12) def make_fixed_chem_env( constraint=None, potion_map=_FIXED_POTION_MAP, stone_map=_FIXED_STONE_MAP, rotation=_FIXED_ROTATION, test_stones=None, test_potions=None, **kwargs): if constraint is None: constraint = graphs.no_bottleneck_constraints()[0] env = symbolic_alchemy.get_symbolic_alchemy_fixed( episode_items=utils.EpisodeItems( potions=test_potions or _TEST_POTIONS, stones=test_stones or _TEST_STONES), chemistry=utils.Chemistry( graph=graphs.create_graph_from_constraint(constraint), potion_map=potion_map, stone_map=stone_map, rotation=rotation), reward_weights=reward_fcn(), max_steps_per_trial=_MAX_STEPS_PER_TRIAL, **kwargs) return env def make_random_chem_env(**kwargs): env = symbolic_alchemy.get_symbolic_alchemy_level( level_name='perceptual_mapping_randomized_with_random_bottleneck', reward_weights=reward_fcn(), max_steps_per_trial=_MAX_STEPS_PER_TRIAL, **kwargs) return env def make_random_action_sequence(num_trials, end_trial_action): num_random_actions = 10 assert num_random_actions <= _MAX_STEPS_PER_TRIAL # On each trial take some random actions then end the trial. actions = [] for _ in range(num_trials): # Create random actions, some of which may not be possible. actions.extend( [random_slot_based_action() for _ in range(num_random_actions)]) # End the trial if end_trial_action: actions.append(utils.SlotBasedAction(end_trial=True)) else: for _ in range(_MAX_STEPS_PER_TRIAL - num_random_actions): actions.append(utils.SlotBasedAction(no_op=True)) return [symbolic_alchemy.slot_based_action_to_int(action, end_trial_action) for action in actions] def type_based_use_stone(env, perceived_stone, unused_stone): del unused_stone return env.step_type_based_action(utils.TypeBasedAction( stone=perceived_stone, cauldron=True)) def slot_based_use_stone(env, unused_perceived_stone, stone): del unused_perceived_stone return env.step_slot_based_action(utils.SlotBasedAction( stone_ind=stone.idx, cauldron=True)) def type_based_use_potion( env, perceived_stone, unused_stone, perceived_potion, unused_potion): del unused_stone, unused_potion return env.step_type_based_action(utils.TypeBasedAction( stone=perceived_stone, potion=perceived_potion)) def slot_based_use_potion( env, unused_perceived_stone, stone, unused_perceived_potion, potion): del unused_perceived_stone, unused_perceived_potion return env.step_slot_based_action(utils.SlotBasedAction( stone_ind=stone.idx, potion_ind=potion.idx)) class SymbolicAlchemyTest(test_utils.EnvironmentTestMixin): num_trials = _NUM_TRIALS def test_no_op(self): env = self.make_object_under_test() timestep = env.reset() # Perform a no-op new_timestep = env.step_slot_based_action(utils.SlotBasedAction(no_op=True)) np.testing.assert_allclose(timestep.observation['symbolic_obs'], new_timestep.observation['symbolic_obs']) self.assertEqual(new_timestep.reward, 0) def env_mid_trial(self, reset=True, no_op_steps=0): env = self.make_object_under_test() if reset: env.reset() for _ in range(no_op_steps): env.step_slot_based_action(utils.SlotBasedAction(no_op=True)) return env def end_trial_test(self, reset=True, no_op_steps=0): env = self.env_mid_trial(reset=reset, no_op_steps=no_op_steps) self.assertEqual(env.trial_number, 0 if reset else -1) env.end_trial() self.assertEqual(env.trial_number, 1) def test_end_trial(self): # parameterised tests in base class do not work # end trial straight away self.end_trial_test(reset=False) # end trial after each number of no ops from 0 to max steps per trial - 1. # Note if we take all of max steps the trial will end before we call # end_trial. for no_op_steps in range(_MAX_STEPS_PER_TRIAL): self.end_trial_test(no_op_steps=no_op_steps) class SymbolicAlchemyFixedChemTest(SymbolicAlchemyTest): """Test symbolic alchemy using the mixin.""" def use_pos_stones_test(self, expected_reward, reset=True, no_op_steps=0): env = self.env_mid_trial(reset=reset, no_op_steps=no_op_steps) timestep = env.use_positive_stones() self.assertAlmostEqual(timestep.reward, expected_reward, 4) def test_use_positive_stones(self): # parameterised tests in base class do not work # end trial straight away self.use_pos_stones_test(expected_reward=16.0, reset=False) # use positive stones after each number of no ops from 0 to # max steps per trial - 1. For the last one we will only get a reward of 15 # as there is only time to use 1 stone. for no_op_steps in range(_MAX_STEPS_PER_TRIAL): exp_reward = 15.0 if no_op_steps == _MAX_STEPS_PER_TRIAL - 1 else 16.0 self.use_pos_stones_test( expected_reward=exp_reward, no_op_steps=no_op_steps) def _test_use_stone(self, take_action): env = self.make_object_under_test() env.reset() num_stone_features, _ = symbolic_alchemy.slot_based_num_features( env.observe_used) default_stone_features, _ = env._default_features() latent_stones = [stone.latent_stone() for stone in _TEST_STONES[0]] aligned_stones = [_FIXED_STONE_MAP.apply_inverse(stone) for stone in latent_stones] perceived_stones = [stones_and_potions.unalign(stone, _FIXED_ROTATION) for stone in aligned_stones] for stone, perceived_stone, latent_stone in zip( _TEST_STONES[0], perceived_stones, latent_stones): new_timestep = take_action(env, perceived_stone, stone) expected_reward = reward_fcn()(latent_stone.latent_coords) self.assertEqual(new_timestep.reward, expected_reward) # Observation should be set to the default. stone_obs = new_timestep.observation['symbolic_obs'][ num_stone_features * stone.idx:num_stone_features * (stone.idx + 1)] for stone_feat, default_stone_feat in zip( stone_obs, default_stone_features[0, :]): self.assertAlmostEqual(stone_feat, default_stone_feat, 4) # After using the stones end the trial end_trial_reward, _ = env.end_trial() self.assertEqual(end_trial_reward, 0) def test_use_stone(self): self._test_use_stone(slot_based_use_stone) self._test_use_stone(type_based_use_stone) def _test_use_potion(self, take_action): env = self.make_object_under_test() env.reset() stone = _TEST_STONES[0][0] potion = _TEST_POTIONS[0][0] aligned_stone = _FIXED_STONE_MAP.apply_inverse(stone.latent_stone()) perceived_stone = stones_and_potions.unalign(aligned_stone, _FIXED_ROTATION) perceived_potion = _FIXED_POTION_MAP.apply_inverse(potion.latent_potion()) new_timestep = take_action( env, perceived_stone, stone, perceived_potion, potion) self.assertEqual(new_timestep.reward, 0) stone_features, _ = symbolic_alchemy.slot_based_num_features( env.observe_used) potion_start_index = stone_features * symbolic_alchemy.MAX_STONES potion0_obs = new_timestep.observation['symbolic_obs'][potion_start_index] self.assertAlmostEqual(potion0_obs, 1.0, 4) stone_obs = new_timestep.observation['symbolic_obs'][:stone_features] # Coords change to -1, 1, 1 and reward changes to 1/max reward self.assertAlmostEqual(stone_obs[0], -1.0, 4) self.assertAlmostEqual(stone_obs[1], 1.0, 4) self.assertAlmostEqual(stone_obs[2], 1.0, 4) self.assertAlmostEqual( stone_obs[3], 1.0 / stones_and_potions.max_reward(), 4) # After using the potion end the trial end_trial_reward, _ = env.end_trial() self.assertEqual(end_trial_reward, 0) def test_use_potion(self): self._test_use_potion(slot_based_use_potion) self._test_use_potion(type_based_use_potion) def make_object_under_test(self): """Make an environment which will be tested by the mixin.""" return make_fixed_chem_env( observe_used=self.observe_used, end_trial_action=self.end_trial_action) def make_action_sequence(self): return make_random_action_sequence(self.num_trials, self.end_trial_action) def test_initial_observation(self): # Type based observation should have scaled count for each type env = self.make_object_under_test() timestep = env.reset() # All fixed so the perceptual mapping is the identity num_axes = stones_and_potions.get_num_axes() stone_features, potion_features = symbolic_alchemy.slot_based_num_features( env.observe_used) for stone in _TEST_STONES[0]: # The features should be the perceptual features then the scaled reward stone_obs = timestep.observation['symbolic_obs'][ stone_features * stone.idx:stone_features * (stone.idx + 1)] for dim in range(num_axes): self.assertAlmostEqual(stone_obs[dim], stone.latent[dim], 4) self.assertAlmostEqual( stone_obs[num_axes], sum(stone.latent)/stones_and_potions.max_reward(), 4) if self.observe_used: self.assertAlmostEqual(stone_obs[num_axes + 1], 0.0, 4) # Test that 2 potions of the same type are observed the same, and potions of # different types are observed different. # 0 and 2 are the same, 1 is different. potion_start_index = stone_features * symbolic_alchemy.MAX_STONES potion_obs = [] for i in range(symbolic_alchemy.MAX_POTIONS): feat_start = potion_start_index + i * potion_features feat_end = feat_start + potion_features potion_obs.append( timestep.observation['symbolic_obs'][feat_start:feat_end]) self.assertAlmostEqual(potion_obs[0][0], potion_obs[2][0], 4) self.assertNotAlmostEqual(potion_obs[0][0], potion_obs[1][0], 4) if self.observe_used: self.assertAlmostEqual(potion_obs[0][1], 0.0, 4) class SymbolicAlchemyFixedChemObserveUsedTest( SymbolicAlchemyFixedChemTest): observe_used = True class SymbolicAlchemyFixedChemObserveUsedEndTrialTest( SymbolicAlchemyFixedChemObserveUsedTest, absltest.TestCase): end_trial_action = True class SymbolicAlchemyFixedChemObserveUsedNoEndTrialTest( SymbolicAlchemyFixedChemObserveUsedTest, absltest.TestCase): end_trial_action = False class SymbolicAlchemyFixedChemNoObserveUsedTest( SymbolicAlchemyFixedChemTest): observe_used = False class SymbolicAlchemyFixedChemNoObserveUsedEndTrialTest( SymbolicAlchemyFixedChemNoObserveUsedTest, absltest.TestCase): end_trial_action = True class SymbolicAlchemyFixedChemNoObserveUsedNoEndTrialTest( SymbolicAlchemyFixedChemNoObserveUsedTest, absltest.TestCase): end_trial_action = False class SymbolicAlchemyRandomChemTest(SymbolicAlchemyTest): """Test symbolic alchemy with random chem each episode using the mixin.""" def make_object_under_test(self, **kwargs): """Make an environment which will be tested by the mixin.""" return make_random_chem_env( observe_used=self.observe_used, end_trial_action=self.end_trial_action, num_trials=self.num_trials, **kwargs) def make_action_sequence(self): return make_random_action_sequence( self.num_trials, self.end_trial_action) def test_seed(self): env1 = self.make_object_under_test(seed=0) env1.reset() env2 = self.make_object_under_test(seed=0) env2.reset() self.assertEqual(graphs.constraint_from_graph(env1._chemistry.graph), graphs.constraint_from_graph(env2._chemistry.graph)) self.assertEqual(env1._chemistry.potion_map, env2._chemistry.potion_map) self.assertEqual(env1._chemistry.stone_map, env2._chemistry.stone_map) self.assertEqual(env1.game_state.existing_items(), env2.game_state.existing_items()) class SymbolicAlchemyRandomChemObserveUsedTest( SymbolicAlchemyRandomChemTest): observe_used = True class SymbolicAlchemyRandomChemObserveUsedEndTrialTest( SymbolicAlchemyRandomChemObserveUsedTest, absltest.TestCase): end_trial_action = True class SymbolicAlchemyRandomChemObserveUsedNoEndTrialTest( SymbolicAlchemyRandomChemObserveUsedTest, absltest.TestCase): end_trial_action = False class SymbolicAlchemyRandomChemNoObserveUsedTest( SymbolicAlchemyRandomChemTest): observe_used = False class SymbolicAlchemyRandomChemNoObserveUsedEndTrialTest( SymbolicAlchemyRandomChemNoObserveUsedTest, absltest.TestCase): end_trial_action = True class SymbolicAlchemyRandomChemNoObserveUsedNoEndTrialTest( SymbolicAlchemyRandomChemNoObserveUsedTest, absltest.TestCase): end_trial_action = False class SymbolicAlchemySeeChemistryTest(parameterized.TestCase): """We don't do the full mixin tests for the chemistry observation.""" def _make_env(self, see_chemistry, constraint, **kwargs): return make_fixed_chem_env( constraint=constraint, see_chemistries={_CHEM_NAME: see_chemistry}, observe_used=True, end_trial_action=False, **kwargs) @parameterized.parameters( # In the graph observations edges are in the following order: # _________11__________ # /| /| # 9/ | 10/ | # / | / | # /___|_____8_________/ | # | |6 | |7 # | | | | # |2 | |4 | # | |_______5_______|___| # | / | / # | /1 | /3 # | / | / # |/________0_________|/ # # With coordinate system: # | # |z / # | /y # | / # |/___x___ # {'see_chemistry': utils.ChemistrySeen( potion_map=utils.PotionMapElement(present=False), stone_map=utils.StoneMapElement(present=False), rotation=utils.RotationElement(present=False), content=utils.ElementContent.GROUND_TRUTH), 'constraint': graphs.no_bottleneck_constraints()[0], # With no constraints all edges should be present 'expected_obs': np.ones((12,), np.float32), 'expected_len': 12}, {'see_chemistry': utils.ChemistrySeen( potion_map=utils.PotionMapElement(present=False), stone_map=utils.StoneMapElement(present=False), rotation=utils.RotationElement(present=False), content=utils.ElementContent.GROUND_TRUTH), 'constraint': graphs.bottleneck1_constraints()[0], # For bottleneck1 constraint the only x direction edge that exists is 8, # so 0, 5 and 11 are missing. 'expected_obs': np.array([0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], np.float32), 'expected_len': 12}, {'see_chemistry': utils.ChemistrySeen( stone_map=utils.StoneMapElement(present=False), graph=utils.GraphElement(present=False), rotation=utils.RotationElement(present=False), content=utils.ElementContent.GROUND_TRUTH), 'constraint': graphs.no_bottleneck_constraints()[0], # First 6 entries are a 1-hot for the dimension map, in this case the # dimension map used is the first one. # The next 3 entries are 0 or 1 for the direction map, in this case all # directions are positive. 'expected_obs': np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0], np.float32), 'expected_len': 9}, {'see_chemistry': utils.ChemistrySeen( potion_map=utils.PotionMapElement(present=False), graph=utils.GraphElement(present=False), rotation=utils.RotationElement(present=False), content=utils.ElementContent.GROUND_TRUTH), 'constraint': graphs.no_bottleneck_constraints()[0], # 3 entries are 0 or 1 for the direction map, in this case all directions # are positive. 'expected_obs': np.array([1.0, 1.0, 1.0], np.float32), 'expected_len': 3}, {'see_chemistry': utils.ChemistrySeen( content=utils.ElementContent.GROUND_TRUTH), 'constraint': graphs.no_bottleneck_constraints()[0], # Observations are from the previous tests concatenated with graph first, # then potion map then stone map. 'expected_obs': np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, # graph 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, # graph 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, # potion dim map 1.0, 1.0, 1.0, # potion dir map 1.0, 1.0, 1.0, # stone map 1.0, 0.0, 0.0, 0.0], np.float32), # rotation 'expected_len': 28}, # Tests for the belief state observation. {'see_chemistry': utils.ChemistrySeen( potion_map=utils.PotionMapElement(present=False), stone_map=utils.StoneMapElement(present=False), rotation=utils.RotationElement(present=False), content=utils.ElementContent.BELIEF_STATE, precomputed='perceptual_mapping_randomized_with_random_bottleneck'), 'constraint': graphs.no_bottleneck_constraints()[0], # With no actions the belief state should be unknown for all edges. 'expected_obs': 0.5 * np.ones((12,), np.float32), 'expected_len': 12}, {'see_chemistry': utils.ChemistrySeen( potion_map=utils.PotionMapElement(present=False), stone_map=utils.StoneMapElement(present=False), rotation=utils.RotationElement(present=False), content=utils.ElementContent.BELIEF_STATE, precomputed='perceptual_mapping_randomized_with_random_bottleneck'), 'constraint': graphs.bottleneck1_constraints()[0], # It shouldn't make a difference whether the underlying chemistry has a # constraint or not everythin is unknown. 'expected_obs': 0.5 * np.ones((12,), np.float32), 'expected_len': 12}, {'see_chemistry': utils.ChemistrySeen( stone_map=utils.StoneMapElement(present=False), graph=utils.GraphElement(present=False), rotation=utils.RotationElement(present=False), content=utils.ElementContent.BELIEF_STATE, precomputed='perceptual_mapping_randomized_with_random_bottleneck'), 'constraint': graphs.no_bottleneck_constraints()[0], # First 6 entries are a 1-hot for the dimension map, with no actions all # of the dimesnsion maps are possible so the entries are all unknown. # The next 3 entries are 0 or 1 for the direction map, or 0.5 for unknown # which is the case if no actions are taken. 'expected_obs': 0.5 * np.ones((9,), np.float32), 'expected_len': 9}, {'see_chemistry': utils.ChemistrySeen( potion_map=utils.PotionMapElement(present=False), graph=utils.GraphElement(present=False), rotation=utils.RotationElement(present=False), content=utils.ElementContent.BELIEF_STATE, precomputed='perceptual_mapping_randomized_with_random_bottleneck'), 'constraint': graphs.no_bottleneck_constraints()[0], # 3 entries are 0 or 1 for the direction map, in this case all directions # are positive, since the test stones include an instance of the best # stone, the stone map should be known from the start. 'expected_obs': np.array([1.0, 1.0, 1.0], np.float32), 'expected_len': 3}, {'see_chemistry': utils.ChemistrySeen( content=utils.ElementContent.BELIEF_STATE, rotation=utils.RotationElement(present=False), precomputed='perceptual_mapping_randomized_with_random_bottleneck'), 'constraint': graphs.no_bottleneck_constraints()[0], # Observations are from the previous tests concatenated with graph first, # then potion map then stone map. 'expected_obs': np.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5, # graph 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, # graph 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, # potion dim map 0.5, 0.5, 0.5, # potion dir map 1.0, 1.0, 1.0], np.float32), # stone map 'expected_len': 24}, {'see_chemistry': utils.ChemistrySeen( content=utils.ElementContent.BELIEF_STATE, rotation=utils.RotationElement(present=False), precomputed='perceptual_mapping_randomized_with_random_bottleneck'), 'constraint': graphs.no_bottleneck_constraints()[0], 'actions': [utils.SlotBasedAction(stone_ind=0, potion_ind=0)], # If we put the 0th stone into the 0th potion we will see a change on # axis 1, we will become certain that the dim map is either [0, 1, 2] or # [2, 1, 0], we will become certain that the edge from (-1, -1, 1) to # (-1, 1, 1) exists. 'expected_obs': np.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5, # graph 0.5, 0.5, 0.5, 1.0, 0.5, 0.5, # graph 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, # potion dim map 0.5, 1.0, 0.5, # potion dir map 1.0, 1.0, 1.0], np.float32), # stone map 'expected_len': 24}, # Tests for a combination of content types {'see_chemistry': utils.ChemistrySeen( potion_map=utils.PotionMapElement(present=False), stone_map=utils.StoneMapElement(present=False), rotation=utils.RotationElement(present=False), groups=[ utils.GroupInChemistry( {utils.ElementType.GRAPH: {0, 1, 2, 3}}, [1.0, 0.0, 0.0]), utils.GroupInChemistry( {utils.ElementType.GRAPH: {4, 5, 6}}, [0.0, 0.0, 1.0]), utils.GroupInChemistry( {utils.ElementType.GRAPH: {7, 8, 9, 10, 11}}, [0.0, 1.0, 0.0]), ], precomputed='perceptual_mapping_randomized_with_random_bottleneck'), 'constraint': graphs.bottleneck1_constraints()[0], 'actions': [utils.SlotBasedAction(stone_ind=0, potion_ind=0)], # With no actions the belief state should be unknown for all edges. 'expected_obs': np.array( [0.0, 1.0, 1.0, 1.0, # ground truth - 0 missing 1, 2, 3 exist 0.5, 0.5, 0.5, # unknown - these are set to 0.5 # belief state - after the action 9 is known, others are unknown 0.5, 0.5, 1.0, 0.5, 0.5], np.float32), 'expected_len': 12}, # Rotation tests {'see_chemistry': utils.ChemistrySeen( potion_map=utils.PotionMapElement(present=False), stone_map=utils.StoneMapElement(present=False), graph=utils.GraphElement(present=False), content=utils.ElementContent.GROUND_TRUTH), 'constraint': graphs.no_bottleneck_constraints()[0], 'make_env_kwargs': { 'rotation': stones_and_potions.rotation_from_angles([0, 0, 0])}, 'expected_obs': np.array([1.0, 0.0, 0.0, 0.0], np.float32), 'expected_len': 4}, {'see_chemistry': utils.ChemistrySeen( potion_map=utils.PotionMapElement(present=False), stone_map=utils.StoneMapElement(present=False), graph=utils.GraphElement(present=False), content=utils.ElementContent.GROUND_TRUTH), 'constraint': graphs.no_bottleneck_constraints()[0], 'make_env_kwargs': { 'rotation': stones_and_potions.rotation_from_angles([0, 0, -45])}, 'expected_obs': np.array([0.0, 1.0, 0.0, 0.0], np.float32), 'expected_len': 4}, {'see_chemistry': utils.ChemistrySeen( potion_map=utils.PotionMapElement(present=False), stone_map=utils.StoneMapElement(present=False), graph=utils.GraphElement(present=False), content=utils.ElementContent.GROUND_TRUTH), 'constraint': graphs.no_bottleneck_constraints()[0], 'make_env_kwargs': { 'rotation': stones_and_potions.rotation_from_angles([0, -45, 0])}, 'expected_obs': np.array([0.0, 0.0, 1.0, 0.0], np.float32), 'expected_len': 4}, {'see_chemistry': utils.ChemistrySeen( potion_map=utils.PotionMapElement(present=False), stone_map=utils.StoneMapElement(present=False), graph=utils.GraphElement(present=False), content=utils.ElementContent.GROUND_TRUTH), 'constraint': graphs.no_bottleneck_constraints()[0], 'make_env_kwargs': { 'rotation': stones_and_potions.rotation_from_angles([-45, 0, 0])}, 'expected_obs': np.array([0.0, 0.0, 0.0, 1.0], np.float32), 'expected_len': 4}, # In belief state if we have stones which are unique to a particular # rotation then the rotation should be known and possibly part of the # stone map. {'see_chemistry': utils.ChemistrySeen( potion_map=utils.PotionMapElement(present=False), graph=utils.GraphElement(present=False), content=utils.ElementContent.BELIEF_STATE, precomputed=('perceptual_mapping_randomized_with_rotation_and_' 'random_bottleneck')), 'constraint': graphs.no_bottleneck_constraints()[0], 'make_env_kwargs': { 'rotation': stones_and_potions.rotation_from_angles([-45, 0, 0]), 'test_stones': [[Stone(0, [1, 1, 1]), Stone(0, [1, 1, -1])]]}, 'expected_obs': np.array( [1.0, 1.0, 1.0, # stone map 0.0, 0.0, 0.0, 1.0], np.float32), # rotation 'expected_len': 7}, # Otherwise rotation and stone map observations should both be unknown. {'see_chemistry': utils.ChemistrySeen( potion_map=utils.PotionMapElement(present=False), graph=utils.GraphElement(present=False), content=utils.ElementContent.BELIEF_STATE, precomputed=('perceptual_mapping_randomized_with_rotation_and_' 'random_bottleneck')), 'constraint': graphs.no_bottleneck_constraints()[0], 'make_env_kwargs': { 'rotation': stones_and_potions.rotation_from_angles([-45, 0, 0]), 'test_stones': [[Stone(0, [1, 1, 1])]]}, 'expected_obs': np.array( [0.5, 0.5, 0.5, # stone map 0.5, 0.5, 0.5, 0.5], np.float32), # rotation 'expected_len': 7}, {'see_chemistry': utils.ChemistrySeen( potion_map=utils.PotionMapElement(present=False), graph=utils.GraphElement(present=False), content=utils.ElementContent.BELIEF_STATE, precomputed=('perceptual_mapping_randomized_with_rotation_and_' 'random_bottleneck')), 'constraint': graphs.no_bottleneck_constraints()[0], 'make_env_kwargs': { 'rotation': stones_and_potions.rotation_from_angles([-45, 0, 0]), 'test_stones': [[Stone(0, [1, 1, 1])]]}, 'actions': [utils.SlotBasedAction(stone_ind=0, potion_ind=6)], 'expected_obs': np.array( [1.0, 1.0, 1.0, # stone map 0.0, 0.0, 0.0, 1.0], np.float32), # rotation 'expected_len': 7}, ) def test_see_chemistry( self, see_chemistry, constraint, expected_obs, expected_len, actions=None, make_env_kwargs=None): """Test the ground truth chemistry observations.""" env = self._make_env( see_chemistry=see_chemistry, constraint=constraint, **(make_env_kwargs or {})) timestep = env.reset() if actions: for action in actions: timestep = env.step_slot_based_action(action) np.testing.assert_allclose( timestep.observation[_CHEM_NAME], expected_obs) self.assertLen( timestep.observation[_CHEM_NAME], expected_len) def test_see_chem_before_reset(self): env = self._make_env( see_chemistry=utils.ChemistrySeen( content=utils.ElementContent.GROUND_TRUTH), constraint=graphs.no_bottleneck_constraints()[0]) obs = env.observation() # Observation should be all unknown because we have not reset the # environment yet. np.testing.assert_allclose(obs[_CHEM_NAME], [0.5] * 28) # After resetting none of the chem should be unknown. env.reset() obs = env.observation() np.testing.assert_array_less( 0.01 * np.ones((28,)), np.abs(obs[_CHEM_NAME] - np.array([0.5] * 28))) if __name__ == '__main__': absltest.main()
dm_alchemy-master
dm_alchemy/symbolic_alchemy_test.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Symbolic environment for alchemy.""" import abc import copy import functools from typing import Any, Callable, Dict, List, Mapping, MutableMapping, Optional, Sequence, Tuple, Union from dm_alchemy import event_tracker from dm_alchemy import symbolic_alchemy_trackers from dm_alchemy.ideal_observer import precomputed_maps from dm_alchemy.types import graphs from dm_alchemy.types import helpers from dm_alchemy.types import stones_and_potions from dm_alchemy.types import utils as type_utils import dm_env from dm_env import specs import numpy as np Stone = stones_and_potions.Stone Potion = stones_and_potions.Potion LatentStoneIndex = stones_and_potions.LatentStoneIndex LatentStone = stones_and_potions.LatentStone LatentPotion = stones_and_potions.LatentPotion AlignedStoneIndex = stones_and_potions.AlignedStoneIndex PerceivedPotionIndex = stones_and_potions.PerceivedPotionIndex AlignedStone = stones_and_potions.AlignedStone PerceivedStone = stones_and_potions.PerceivedStone PerceivedPotion = stones_and_potions.PerceivedPotion CAULDRON = stones_and_potions.CAULDRON random_stone_map = stones_and_potions.random_stone_map random_potion_map = stones_and_potions.random_potion_map random_latent_stone = stones_and_potions.random_latent_stone random_latent_potion = stones_and_potions.random_latent_potion random_rotation = stones_and_potions.random_rotation random_graph = graphs.random_graph graph_distr = graphs.graph_distr possible_constraints = graphs.possible_constraints bottleneck1_constraints = graphs.bottleneck1_constraints bottleneck2_constraints = graphs.bottleneck2_constraints bottleneck3_constraints = graphs.bottleneck3_constraints no_bottleneck_constraints = graphs.no_bottleneck_constraints Chemistry = type_utils.Chemistry TrialItems = type_utils.TrialItems ElementContent = type_utils.ElementContent SeeChemistry = type_utils.ChemistrySeen GetChemistryObs = type_utils.GetChemistryObsFns SymbolicAlchemyTracker = symbolic_alchemy_trackers.SymbolicAlchemyTracker ActionInfo = symbolic_alchemy_trackers.ActionInfo STONE_COUNT_SCALE = 3.0 POTION_COUNT_SCALE = 12.0 POTION_TYPE_SCALE = PerceivedPotion.num_types / 2.0 REWARD_SCALE = stones_and_potions.max_reward() END_TRIAL = helpers.END_TRIAL NO_OP = -1 UNKNOWN_TYPE = -3 MAX_STONES = 3 MAX_POTIONS = 12 NO_EDGE = graphs.NO_EDGE DEFAULT_MAX_STEPS_PER_TRIAL = 20 def int_action_to_tuple( action: int, slot_based: bool, end_trial_action: bool ) -> Tuple[int, int]: """Converts integer action to tuple. In the integer representation, if we have an end trial action the mapping is as follows, otherwise subtract 1 from the integers shown below: 0 represents ending the trial 1 represents doing nothing The remaining integers represent putting a stone into a potion or into the cauldron, i.e. s * (num potion types + 1) + 2 represents putting stone type s into the cauldron (or stone index s in the slot based version) and s * (num potion types + 1) + 3 + p represents putting stone type s (or again index s) into potion type p (or index p). In the tuple representation: (-2, -2) represents ending the trial (-1, -1) represents no-op (s, -1) represents putting a stone of type (or index) s into the cauldron (s, p) represents putting a stone of type (or index) s into a potion of type (or index) p Args: action: Integer representing the action to take. slot_based: Whether the action is for a slot based env or type based. end_trial_action: Whether we have an end trial action Returns: Tuple representing the action to take. """ altered_action = copy.deepcopy(action) altered_action -= 1 if end_trial_action: altered_action -= 1 if altered_action < 0: return altered_action, altered_action if slot_based: potions_and_cauldron = MAX_POTIONS + 1 else: potions_and_cauldron = PerceivedPotion.num_types + 1 return (altered_action // potions_and_cauldron, (altered_action % potions_and_cauldron) - 1) def tuple_action_to_int( action: Tuple[int, int], slot_based: bool, end_trial_action: bool ) -> int: """Converts tuple action to integer.""" stone, potion = action num_special_actions = 2 if end_trial_action else 1 if stone < 0: return stone + num_special_actions if slot_based: potions_and_cauldron = MAX_POTIONS + 1 else: potions_and_cauldron = PerceivedPotion.num_types + 1 return stone * potions_and_cauldron + potion + 1 + num_special_actions def slot_based_action_to_int( action: type_utils.SlotBasedAction, end_trial_action: bool ) -> int: """Converts tuple action to integer.""" num_special_actions = 2 if end_trial_action else 1 if action.end_trial: val = END_TRIAL elif action.no_op: val = NO_OP else: potions_and_cauldron = MAX_POTIONS + 1 if action.cauldron: potion_as_int = 0 else: potion_as_int = action.potion_ind + 1 val = action.stone_ind * potions_and_cauldron + potion_as_int return val + num_special_actions def normalized_poss_dim_map_observation( dim_maps: Sequence[Sequence[int]] ) -> List[float]: """Gets an observation for which dimension maps are possible.""" return [0.0 if (list(x) not in dim_maps) else ( 1.0 if len(dim_maps) == 1 else 0.5) for x in stones_and_potions.get_all_dim_ordering()] def normalized_dir_map_observation( dir_map: Sequence[int] ) -> List[float]: """Normalizes a direction map to be between 0 and 1.""" return [(x + 1.0) / 2.0 for x in dir_map] class SymbolicAlchemy(dm_env.Environment, abc.ABC): """Symbolic alchemy environment. The chemistry and stones and potions are generated using the callables passed in. We assume the potion map, stone map, graph, stones and potions can be independently generated. Currently observations are just the outcome stone index of the action performed or -1 if no stone was used. Later observations will be the whole set of stones and potions available in some format. """ def __init__( self, chemistry_gen: Callable[[], type_utils.Chemistry], reward_weights: stones_and_potions.RewardWeights, items_gen: Callable[[int], type_utils.TrialItems], num_trials: int, end_trial_action: bool = False, max_steps_per_trial: int = DEFAULT_MAX_STEPS_PER_TRIAL, see_chemistries: Optional[Mapping[str, type_utils.ChemistrySeen]] = None, generate_events: bool = False, fix_obs_length: bool = False, observe_used: bool = True): """Constructs a symbolic alchemy environment. Args: chemistry_gen: Generate a chemistry for an episode. reward_weights: Structure which tells us the reward for a given stone. items_gen: Generate a set of stones and potions for a trial in an episode. num_trials: The number of trials in each episode. end_trial_action: Whether the agent has an action to end the trial early. max_steps_per_trial: The number of steps the agent can take before the trial is automatically ended. see_chemistries: Optional map from name to a structure containing information about how to form a chemistry, i.e. which parts and whether the content should be ground truth or the belief state. These are added to the observation dictionary. If None, then no chemistries are added. generate_events: Whether to track items generated and potions and stones used and return this information when events is called on the environment. This is not necessary during training but is used when we run analysis on the environment. fix_obs_length: Whether to fix the length of the chemistry observation that's fed in as input. If False, will only concatenate parts of the chemistry that are supposed to be seen. observe_used: Whether to have a feature for each item slot which is set to 1 if the item is used and 0 otherwise. """ self._is_new_trial = False self.observe_used = observe_used self._chemistry_gen = chemistry_gen self._reward_weights = reward_weights or stones_and_potions.RewardWeights( coefficients=[1, 1, 1], offset=0, bonus=12) # These are the items generated each trial. self._items_gen = items_gen self._num_trials = num_trials self._chemistry = None self.trial_number = -1 self.game_state: Optional[event_tracker.GameState] = None self._is_last_step = True self._steps_this_trial = 0 self.max_steps_per_trial = max_steps_per_trial self.trackers: MutableMapping[str, SymbolicAlchemyTracker] = {} if generate_events: trackers = { symbolic_alchemy_trackers.AddMatrixEventTracker.NAME: symbolic_alchemy_trackers.AddMatrixEventTracker(), symbolic_alchemy_trackers.ItemGeneratedTracker.NAME: symbolic_alchemy_trackers.ItemGeneratedTracker()} self.add_trackers(trackers) self._outcome_tracker = None self._end_trial_action = end_trial_action self._fix_obs_length = fix_obs_length # Whether we can see the ground truth chemistry in observations self.see_chemistries = see_chemistries or {} precomputeds: List[precomputed_maps.PrecomputedMaps] = [] for see_chemistry in self.see_chemistries.values(): see_chemistry.initialise_precomputed() if see_chemistry.precomputed is not None: precom = see_chemistry.precomputed # type: precomputed_maps.PrecomputedMaps precomputeds.append(precom) self._precomputed = precomputeds[0] if precomputeds else None self._possible_partial_graph_indices = None self._contents = None if self._precomputed is not None: belief_state_tracker = { symbolic_alchemy_trackers.BeliefStateTracker.NAME: symbolic_alchemy_trackers.BeliefStateTracker( self._precomputed, self)} num_possible_partial_graphs = len( self._precomputed.partial_graph_index_to_possible_index) self._possible_partial_graph_indices = np.array([ 0 for _ in range(num_possible_partial_graphs)], dtype=int) for ind, i in ( self._precomputed.partial_graph_index_to_possible_index.items()): self._possible_partial_graph_indices[i] = ind self.add_trackers(belief_state_tracker) def add_trackers( self, trackers: Mapping[str, SymbolicAlchemyTracker] ) -> None: self.trackers.update(trackers) def events(self) -> Dict[str, Any]: """If it is the last step returns events for the episode.""" events = {} if not self._is_last_step: return events events.update({'chemistry': self._chemistry}) for tracker_name in ['matrix_event', 'items_generated']: if tracker_name in self.trackers: events.update({ tracker_name: self.trackers[tracker_name].episode_returns()}) return events def _new_trial(self) -> None: self._steps_this_trial = 0 if self.trial_number + 1 >= self._num_trials: self.trial_number = -1 self._is_last_step = True else: self._is_new_trial = True self.trial_number += 1 items = self._items_gen(self.trial_number) reward_tracker = event_tracker.RewardTracker(self._reward_weights) self.game_state = event_tracker.GameState( self._chemistry.graph, trial_items=items, event_trackers=[reward_tracker, self._outcome_tracker]) self.trial_start(self.game_state) def reset_no_observation(self) -> None: self.trial_number = -1 self._is_last_step = False # Generate a chemistry for this episode. self._chemistry = self._chemistry_gen() # At the start of the episode sample what the contents of each element of # the chemistry observation will be. self._contents = {k: see_chemistry.sample_contents() for k, see_chemistry in self.see_chemistries.items()} self._outcome_tracker = event_tracker.LatestOutcomeTracker( self._chemistry.potion_map, self._chemistry.stone_map, self._chemistry.rotation) self.episode_start(self._chemistry) self._new_trial() def reset(self) -> dm_env.TimeStep: self.reset_no_observation() return dm_env.TimeStep( dm_env.StepType.FIRST, None, None, self.observation()) def step_no_observation( self, action: type_utils.SlotBasedAction, original_action: Optional[Union[ type_utils.SlotBasedAction, type_utils.TypeBasedAction]] = None ) -> Optional[float]: """Takes a step in the environment without producing an observation. Args: action: The action to take in integer representation. original_action: The original action in whatever form which may not be runnable passed in for tracking. Returns: The reward gained this step or None if we must reset. """ if self._is_last_step: self.reset_no_observation() return None self._is_new_trial = False reward = 0 self.game_state.trackers['latest_outcome'].reset() self._steps_this_trial += 1 if action.using_stone: reward_start = self.game_state.trackers['reward'].reward if action.cauldron: self.game_state.stone_used(action.stone_ind) elif action.using_potion: self.game_state.potion_used(action.stone_ind, action.potion_ind) else: # Need to call this for outcome tracker to know what the stone type # is. self.game_state.failed_potion_use(action.stone_ind) reward = self.game_state.trackers['reward'].reward - reward_start type_based_action = ( self.game_state.trackers['latest_outcome'].type_based_action or type_utils.TypeBasedAction( end_trial=action.end_trial, no_op=action.no_op)) self.action_and_outcome( action=type_based_action, outcome=self.game_state.trackers['latest_outcome'].outcome, action_info=ActionInfo( original_action, action.using_stone, action.using_potion)) if action.end_trial or self._steps_this_trial >= self.max_steps_per_trial: # If the current stone is -1 then end the trial. self.trial_end() self._new_trial() return float(reward) def step(self, action: int) -> dm_env.TimeStep: """Takes a step in the environment using the action passed in. Args: action: The action to take in integer representation. Returns: A timestep with the observation, reward, step type and discount. """ return self.step_slot_based_action(self._int_to_slot_based_action(action)) def step_slot_based_action( self, action: type_utils.SlotBasedAction ) -> dm_env.TimeStep: return self._internal_step(self._runnable_slot_based_action(action), action) def step_type_based_action( self, action: type_utils.TypeBasedAction ) -> dm_env.TimeStep: """Takes a step in the environment using a slot based action.""" return self._internal_step(self._type_based_to_slot_based(action), action) def _internal_step( self, action: type_utils.SlotBasedAction, original_action: Union[ type_utils.SlotBasedAction, type_utils.TypeBasedAction] ) -> dm_env.TimeStep: """Takes a step in the environment using a slot based action.""" if not self._end_trial_action and action.end_trial: raise ValueError('Env has no end trial action') return self.construct_step(self.step_no_observation( action, original_action)) def _type_based_to_slot_based( self, action: type_utils.TypeBasedAction ) -> type_utils.SlotBasedAction: stone_ind, potion_ind = None, None if action.using_stone: aligned_stone = stones_and_potions.align( action.perceived_stone, self._chemistry.rotation) latent_stone = self._chemistry.stone_map.apply(aligned_stone) stone_ind = self.game_state.get_stone_ind(stone=graphs.Node( -1, latent_stone.latent_coords)) if action.using_potion: latent_potion = self._chemistry.potion_map.apply(action.perceived_potion) potion_ind = self.game_state.get_potion_ind(potion=latent_potion) return type_utils.SlotBasedAction( end_trial=action.end_trial, no_op=action.no_op, stone_ind=stone_ind, cauldron=action.cauldron, potion_ind=potion_ind) def construct_step( self, reward: Optional[float], discount: Optional[float] = 1.0 ) -> dm_env.TimeStep: if reward is None: # If reward is None this is the first step of an episode. step_type = dm_env.StepType.FIRST discount = None elif self._is_last_step: step_type = dm_env.StepType.LAST # There should be no rewards considered beyond the last step. discount = 0.0 else: step_type = dm_env.StepType.MID return dm_env.TimeStep(step_type, reward, discount, self.observation()) def _int_to_slot_based_action( self, action: int ) -> type_utils.SlotBasedAction: """Converts integer action to simplified action. In the integer representation, if we have an end trial action the mapping is as follows, otherwise subtract 1 from the integers shown below: 0 represents ending the trial 1 represents doing nothing The remaining integers represent putting a stone into a potion or into the cauldron, i.e. s * (num potion types + 1) + 2 represents putting stone type s into the cauldron (or stone index s in the slot based version) and s * (num potion types + 1) + 3 + p represents putting stone type s (or again index s) into potion type p (or index p). Args: action: Integer representing the action to take. Returns: SlotBasedAction representing the action to take. """ altered_action = copy.deepcopy(action) altered_action -= 1 if self._end_trial_action: altered_action -= 1 if altered_action < 0: return type_utils.SlotBasedAction( end_trial=altered_action == END_TRIAL, no_op=altered_action == NO_OP) potions_and_cauldron = MAX_POTIONS + 1 stone_ind = altered_action // potions_and_cauldron potion_ind = (altered_action % potions_and_cauldron) - 1 if potion_ind < 0: return type_utils.SlotBasedAction( stone_ind=stone_ind, cauldron=True) return type_utils.SlotBasedAction( stone_ind=stone_ind, potion_ind=potion_ind) def _slot_based_action_to_int( self, action: type_utils.SlotBasedAction ) -> int: """Converts tuple action to integer.""" return slot_based_action_to_int(action, self._end_trial_action) def _runnable_slot_based_action( self, action: type_utils.SlotBasedAction ) -> type_utils.SlotBasedAction: new_action = copy.deepcopy(action) if action.stone_ind is not None and not self.game_state.has_stone_ind( action.stone_ind): new_action.stone_ind = None if action.potion_ind is not None and not self.game_state.has_potion_ind( action.potion_ind): new_action.potion_ind = None return new_action def observation_spec(self): num_stone_features, num_potion_features = slot_based_num_features( self.observe_used) obs_features = ((num_stone_features * MAX_STONES) + (num_potion_features * MAX_POTIONS)) obs_spec = { 'symbolic_obs': specs.Array(shape=(obs_features,), dtype=np.float32, name='symbolic_obs')} obs_spec.update(self.chem_observation_spec()) return obs_spec def action_spec(self): # Actions for each stone slot in each potion slot, each stone slot in the # cauldron, end trial and no-op. num_special_actions = 2 if self._end_trial_action else 1 num_actions = MAX_STONES * (MAX_POTIONS + 1) + num_special_actions return (specs.BoundedArray( shape=(), dtype=int, minimum=0, maximum=num_actions - 1, name='action')) def _num_features(self): return slot_based_num_features(self.observe_used) def _default_features(self): num_axes = stones_and_potions.get_num_axes() stone_features = [2 for _ in range(num_axes + 1)] potion_features = [1] if self.observe_used: # Set used to 1 by default, we will set it to 0 for items that exist. stone_features.append(1) potion_features.append(1) return (np.array([stone_features], dtype=np.float32), np.array([potion_features], dtype=np.float32)) def observation(self): # If we are using the slot based representation then get features for each # stone which is present. num_axes = stones_and_potions.get_num_axes() default_stone_features, default_potion_features = self._default_features() stone_features = np.concatenate( [default_stone_features for _ in range(MAX_STONES)], axis=0) potion_features = np.concatenate( [default_potion_features for _ in range(MAX_POTIONS)], axis=0) existing_stones = (self.game_state.existing_stones() if self.game_state else []) existing_potions = (self.game_state.existing_potions() if self.game_state else []) for stone in existing_stones: stone_ind = self.game_state.get_stone_ind(stone_inst=stone.idx) assert 0 <= stone_ind < MAX_STONES, 'stone idx out of range' aligned_stone = self._chemistry.stone_map.apply_inverse( stone.latent_stone()) perceived_stone = stones_and_potions.unalign( aligned_stone, self._chemistry.rotation) for f in range(num_axes): stone_features[stone_ind, f] = perceived_stone.perceived_coords[f] # This feature is equivalent to the value indicator seen on the stone as # it distinguishes different reward values. stone_features[stone_ind, num_axes] = ( perceived_stone.reward / stones_and_potions.max_reward()) if self.observe_used: stone_features[stone_ind, num_axes + 1] = 0.0 for potion in existing_potions: potion_ind = self.game_state.get_potion_ind(potion_inst=potion.idx) assert potion_ind < MAX_POTIONS, 'potion idx out of range' latent_potion = potion.latent_potion() perceived_potion = self._chemistry.potion_map.apply_inverse(latent_potion) potion_features[potion_ind, 0] = ( (perceived_potion.index() / POTION_TYPE_SCALE) - 1.0) if self.observe_used: potion_features[potion_ind, 1] = 0.0 concat_obs = {'symbolic_obs': np.concatenate( (stone_features.reshape((-1,)), potion_features.reshape((-1,))))} concat_obs.update(self.chem_observation()) return concat_obs def _rotation_known(self) -> bool: belief_state_tracker: symbolic_alchemy_trackers.BeliefStateTracker = ( self.trackers['belief_state']) return len(belief_state_tracker.belief_state.possible_rotations) == 1 def get_belief_state_edge_vals( self, unknown_edge_vals: List[float] ) -> List[float]: if not self._rotation_known(): return unknown_edge_vals # First attempt - do the simplest thing of setting all unknown to 0.5 # and otherwise set to 0 or 1 belief_state_tracker: symbolic_alchemy_trackers.BeliefStateTracker = ( self.trackers['belief_state']) this_adjmat = belief_state_tracker.get_partial_graph( self._possible_partial_graph_indices).known_adj_mat.astype( np.float32) this_adjmat[this_adjmat == helpers.UNKNOWN] = 0.5 return graphs.edge_values_from_adj_mat(this_adjmat) def get_ground_truth_edge_vals(self) -> List[float]: # Get adjacency matrix corresponding to current graph this_adjmat = graphs.convert_graph_to_adj_mat(self._chemistry.graph).astype( np.float32) this_adjmat[this_adjmat != NO_EDGE] = 1.0 return graphs.edge_values_from_adj_mat(this_adjmat) def get_belief_state_potion_map_obs( self, unknown_potion_map: List[float] ) -> List[float]: if not self._rotation_known(): return unknown_potion_map belief_state_tracker = self.trackers['belief_state'] # type: symbolic_alchemy_trackers.BeliefStateTracker partial_potion_map = belief_state_tracker.get_partial_potion_map( self._precomputed.index_to_perm_index) potion_map_possible = ( belief_state_tracker.belief_state.belief_state.world_state_distribution. potion_map_possible) dim_maps = [stones_and_potions.potion_map_from_index( p, self._precomputed.index_to_perm_index).dim_map for p in potion_map_possible] dir_map = [0 if d == helpers.UNKNOWN else d for d in partial_potion_map.dir_map] return (normalized_poss_dim_map_observation(dim_maps) + normalized_dir_map_observation(dir_map)) def get_ground_truth_potion_map_obs(self) -> List[float]: dim_maps = [self._chemistry.potion_map.dim_map] dir_map = self._chemistry.potion_map.dir_map return (normalized_poss_dim_map_observation(dim_maps) + normalized_dir_map_observation(dir_map)) def get_belief_state_stone_map_obs( self, unknown_stone_map: List[float] ) -> List[float]: if not self._rotation_known(): return unknown_stone_map belief_state_tracker: symbolic_alchemy_trackers.BeliefStateTracker = ( self.trackers['belief_state']) partial_stone_map = belief_state_tracker.get_partial_stone_map() return normalized_dir_map_observation([ 0 if d == helpers.UNKNOWN else d for d in partial_stone_map.latent_pos_dir]) def get_ground_truth_stone_map_obs(self) -> List[float]: return normalized_dir_map_observation( self._chemistry.stone_map.latent_pos_dir) def get_belief_state_rotation( self, unknown_rotation: List[float] ) -> List[float]: if not self._rotation_known(): return unknown_rotation return self.get_ground_truth_rotation() def get_ground_truth_rotation(self) -> List[float]: return [1.0 if stones_and_potions.rotations_equal( self._chemistry.rotation, rotation) else 0.0 for rotation in stones_and_potions.possible_rotations()] def chem_observation(self) -> Dict[str, np.ndarray]: """Converts the ground truth chemistry/mappings into observation vector.""" # full representation of chemistry should be length 28 unknown_potion_obs = [0.5 for _ in range( len(stones_and_potions.get_all_dim_ordering()) + stones_and_potions.get_num_axes())] unknown_stone_obs = [0.5 for _ in range(stones_and_potions.get_num_axes())] unknown_edge_vals = [0.5 for _ in range(graphs.num_edges_in_cube())] unknown_rotation = [0.5 for _ in range(len( stones_and_potions.possible_rotations()))] # If we try to get an observation before we have contents (i.e. before the # environment is reset) we return unknown values. This happens when we are # running a 3d environment and we cannot reset the symbolic environment # until the 3d environment sends messages containing the chemistry and # items. contents = (self._contents or {k: [ElementContent.UNKNOWN] * len(see_chemistry.groups) for k, see_chemistry in self.see_chemistries.items()}) get_obs = type_utils.GetChemistryObsFns( potion_map={ ElementContent.UNKNOWN: lambda: unknown_potion_obs, ElementContent.GROUND_TRUTH: functools.partial( SymbolicAlchemy.get_ground_truth_potion_map_obs, self), ElementContent.BELIEF_STATE: functools.partial( SymbolicAlchemy.get_belief_state_potion_map_obs, self, unknown_potion_obs)}, stone_map={ElementContent.UNKNOWN: lambda: unknown_stone_obs, ElementContent.GROUND_TRUTH: functools.partial( SymbolicAlchemy.get_ground_truth_stone_map_obs, self), ElementContent.BELIEF_STATE: functools.partial( SymbolicAlchemy.get_belief_state_stone_map_obs, self, unknown_stone_obs)}, graph={ElementContent.UNKNOWN: lambda: unknown_edge_vals, ElementContent.GROUND_TRUTH: functools.partial( SymbolicAlchemy.get_ground_truth_edge_vals, self), ElementContent.BELIEF_STATE: functools.partial( SymbolicAlchemy.get_belief_state_edge_vals, self, unknown_edge_vals)}, rotation={ElementContent.UNKNOWN: lambda: unknown_rotation, ElementContent.GROUND_TRUTH: functools.partial( SymbolicAlchemy.get_ground_truth_rotation, self), ElementContent.BELIEF_STATE: functools.partial( SymbolicAlchemy.get_belief_state_rotation, self, unknown_rotation)}) return {k: np.array(see_chemistry.form_observation(contents[k], get_obs), dtype=np.float32) for k, see_chemistry in self.see_chemistries.items()} def chem_observation_spec(self) -> Dict[str, specs.Array]: return {k: specs.Array(shape=(see_chemistry.obs_size(),), dtype=np.float32, name=k) for k, see_chemistry in self.see_chemistries.items()} def step_spec(self) -> None: raise NotImplementedError def is_new_trial(self) -> bool: return self._is_new_trial def is_last_step(self) -> bool: return self._is_last_step def perceived_stone(self, stone: Stone) -> PerceivedStone: aligned_stone = self._chemistry.stone_map.apply_inverse( stone.latent_stone()) perceived_stone = stones_and_potions.unalign( aligned_stone, self._chemistry.rotation) return perceived_stone def perceived_stones(self) -> List[PerceivedStone]: return [self.perceived_stone(s) for s in self.game_state.existing_stones()] def perceived_potion(self, potion: Potion) -> PerceivedPotion: return self._chemistry.potion_map.apply_inverse( potion.latent_potion()) def perceived_potions(self) -> List[PerceivedPotion]: return [self.perceived_potion(s) for s in self.game_state.existing_potions()] def use_positive_stones(self) -> dm_env.TimeStep: overall_reward = 0 overall_discount = 1.0 # If it is the last step of an episode reset to start a new one. if self._is_last_step: self.reset() # Reward and discount will be None as we have started a new episode. pos_stone_inds = self.game_state.get_stones_above_thresh( self._reward_weights, threshold=0) for stone_ind in pos_stone_inds: timestep = self.step_slot_based_action(type_utils.SlotBasedAction( stone_ind=stone_ind, cauldron=True)) overall_reward += timestep.reward overall_discount *= timestep.discount if self._is_last_step or self.is_new_trial(): return self.construct_step(float(overall_reward), overall_discount) end_trial_reward, end_trial_discount = self.end_trial() overall_reward += end_trial_reward overall_discount *= end_trial_discount # Get the cumulative reward and discount and the final step type and # observation. return self.construct_step(float(overall_reward), overall_discount) def end_trial(self) -> Tuple[float, float]: overall_reward = 0.0 overall_discount = 1.0 # If it is the last step of an episode reset to start a new one. if self._is_last_step: self.reset() # Reward and discount will be None as we have started a new episode. if self._end_trial_action: reward = self.step_no_observation(type_utils.SlotBasedAction( end_trial=True)) overall_reward += reward else: # If it is a new trial take at least one step. if self.is_new_trial(): reward = self.step_no_observation(type_utils.SlotBasedAction( no_op=True)) overall_reward += reward while not (self._is_last_step or self.is_new_trial()): reward = self.step_no_observation(type_utils.SlotBasedAction( no_op=True)) overall_reward += reward return overall_reward, overall_discount def episode_start(self, chemistry: type_utils.Chemistry) -> None: for tracker in self.trackers.values(): tracker.episode_start(chemistry) def trial_start(self, game_state: event_tracker.GameState) -> None: for tracker in self.trackers.values(): tracker.trial_start(game_state) def action_and_outcome( self, action: type_utils.TypeBasedAction, outcome: Optional[PerceivedStone], action_info: ActionInfo ) -> None: for tracker in self.trackers.values(): tracker.action_and_outcome(action, outcome, action_info) def trial_end(self) -> None: for tracker in self.trackers.values(): tracker.trial_end() def episode_returns(self) -> Dict[str, Any]: returns = {} for name, tracker in self.trackers.items(): returns.update({name: tracker.episode_returns()}) return returns def slot_based_num_features(observe_used: bool) -> Tuple[int, int]: num_axes = stones_and_potions.get_num_axes() num_stone_features = num_axes + 1 num_potion_features = 1 if observe_used: num_stone_features += 1 num_potion_features += 1 return num_stone_features, num_potion_features def slot_based_stone_feature_dims(observe_used, rotated_stone_positions): """Gets the dimensions of the observation containing each stone feature.""" stone_features, _ = slot_based_num_features(observe_used) potion_section = MAX_STONES * stone_features poss_vals = [-1, 0, 1] if rotated_stone_positions else [-1, 1] return ((slice(i, potion_section, stone_features) for i in range(stones_and_potions.get_num_axes())), np.array(poss_vals, dtype=float)) def slot_based_stone_reward_dims(observe_used): """Gets the dimensions of the observation containing stone rewards.""" stone_features, _ = slot_based_num_features(observe_used) potion_section = MAX_STONES * stone_features return (slice( stones_and_potions.get_num_axes(), potion_section, stone_features), np.array([r / REWARD_SCALE for r in stones_and_potions.POSS_REWARDS], dtype=float)) def slot_based_potion_colour_dims(observe_used): """Gets the dimensions of the observation containing potion colours.""" stone_features, potion_features = slot_based_num_features(observe_used) potion_section = MAX_STONES * stone_features return (slice(potion_section, None, potion_features), np.array([(p / POTION_TYPE_SCALE) - 1.0 for p in range(PerceivedPotion.num_types)], dtype=float)) def take_simplified_action( simplified_action: Union[ type_utils.SlotBasedAction, type_utils.TypeBasedAction], env: SymbolicAlchemy ) -> dm_env.TimeStep: """Takes action from the simplified action spec.""" # In the simplified action spec the agent can end the trial and use all # positive stones with a single action. if simplified_action.end_trial: return env.use_positive_stones() if isinstance(simplified_action, type_utils.SlotBasedAction): return env.step_slot_based_action(simplified_action) return env.step_type_based_action(simplified_action) def get_symbolic_alchemy_level( level_name, observe_used=True, end_trial_action=False, num_trials=10, num_stones_per_trial=3, num_potions_per_trial=12, seed=None, reward_weights=None, max_steps_per_trial=DEFAULT_MAX_STEPS_PER_TRIAL, see_chemistries=None, generate_events=False): """Gets a symbolic alchemy instance of the level passed in.""" random_state = np.random.RandomState(seed) if 'perceptual_mapping_randomized' in level_name: _, index_to_perm_index = precomputed_maps.get_perm_index_conversion() stone_map_gen = functools.partial( random_stone_map, random_state=random_state) seeded_rand_potion_map = functools.partial( random_potion_map, random_state=random_state) potion_map_gen = lambda: seeded_rand_potion_map(index_to_perm_index) else: stone_map_gen = stones_and_potions.all_fixed_stone_map potion_map_gen = stones_and_potions.all_fixed_potion_map seeded_rand_graph = functools.partial( random_graph, random_state=random_state) if 'random_bottleneck' in level_name: graph_gen = lambda: seeded_rand_graph(graph_distr(possible_constraints())) elif 'bottleneck1' in level_name: graph_gen = ( lambda: seeded_rand_graph(graph_distr(bottleneck1_constraints()))) elif 'bottleneck2' in level_name: graph_gen = ( lambda: seeded_rand_graph(graph_distr(bottleneck2_constraints()))) elif 'bottleneck3' in level_name: graph_gen = ( lambda: seeded_rand_graph(graph_distr(bottleneck3_constraints()))) else: graph_gen = ( lambda: seeded_rand_graph(graph_distr(no_bottleneck_constraints()))) if 'rotation' in level_name: rotation_gen = functools.partial(random_rotation, random_state=random_state) else: rotation_gen = lambda: np.eye(3) def items_gen(unused_trial_number): del unused_trial_number stones_in_trial = [random_latent_stone(random_state=random_state) for _ in range(num_stones_per_trial)] potions_in_trial = [random_latent_potion(random_state=random_state) for _ in range(num_potions_per_trial)] return TrialItems(potions=potions_in_trial, stones=stones_in_trial) def chemistry_gen(): return Chemistry( potion_map_gen(), stone_map_gen(), graph_gen(), rotation_gen()) return SymbolicAlchemy( observe_used=observe_used, chemistry_gen=chemistry_gen, reward_weights=reward_weights, items_gen=items_gen, num_trials=num_trials, end_trial_action=end_trial_action, max_steps_per_trial=max_steps_per_trial, see_chemistries=see_chemistries, generate_events=generate_events) def get_symbolic_alchemy_fixed( episode_items, chemistry, observe_used=True, reward_weights=None, end_trial_action=False, max_steps_per_trial=DEFAULT_MAX_STEPS_PER_TRIAL, see_chemistries=None, generate_events=False): """Symbolic alchemy which generates same chemistry and items every episode.""" return SymbolicAlchemy( observe_used=observe_used, chemistry_gen=lambda: chemistry, reward_weights=reward_weights, items_gen=lambda i: episode_items.trials[i], num_trials=episode_items.num_trials, end_trial_action=end_trial_action, max_steps_per_trial=max_steps_per_trial, see_chemistries=see_chemistries, generate_events=generate_events)
dm_alchemy-master
dm_alchemy/symbolic_alchemy.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tracks the order of alchemy events and resulting stones and potions.""" import abc import collections import copy import itertools import random from typing import Any, Dict, List, Mapping, Optional, Sequence, Set, Tuple, Union from dm_alchemy.types import graphs from dm_alchemy.types import stones_and_potions from dm_alchemy.types import utils import numpy as np Stone = stones_and_potions.Stone Potion = stones_and_potions.Potion LatentStone = stones_and_potions.LatentStone LatentPotion = stones_and_potions.LatentPotion AlignedStone = stones_and_potions.AlignedStone PerceivedPotion = stones_and_potions.PerceivedPotion StoneMap = stones_and_potions.StoneMap PotionMap = stones_and_potions.PotionMap CAULDRON = stones_and_potions.CAULDRON RewardWeights = stones_and_potions.RewardWeights Graph = graphs.Graph NEVER_USED = -1 NO_OUTCOME = -1 UNKNOWN_TYPE = -3 class EventTracker(abc.ABC): """Base class for things that track alchemy events.""" def __init__(self, name): self.name = name @abc.abstractmethod def potion_used( self, stone_ind: int, potion_ind: int, val: int, start_stone: graphs.Node, stone_inst: int, potion: Potion, end_stone: graphs.Node) -> None: pass def failed_potion_use( self, stone_ind: int, start_stone: graphs.Node, stone_inst: int) -> None: """Optional callback when a potion use is attempted but fails.""" pass class GameState: """Keeps track of the symbolic state of an alchemy game.""" def __init__( self, graph: graphs.Graph, trial_items: utils.TrialItems, event_trackers: Optional[Sequence[EventTracker]] = None ): self._stones = copy.deepcopy(trial_items.stones) self._stone_idx_to_ind = {p.idx: i for i, p in enumerate(self._stones)} self._stone_ind_to_idx = {i: p.idx for i, p in enumerate(self._stones)} self._potions = copy.deepcopy(trial_items.potions) self._potion_idx_to_ind = {p.idx: i for i, p in enumerate(self._potions)} self._graph = graph num_stones = len(self._stones) num_potions = len(self._potions) self._existing_stones = set(range(num_stones)) self._existing_potions = set(range(num_potions)) trackers = event_trackers if event_trackers is not None else [] self.trackers = {tracker.name: tracker for tracker in trackers} self._count = 0 def add_event_trackers(self, event_trackers: Sequence[EventTracker]) -> None: """Adds event trackers if they are not already there.""" self.trackers.update({tracker.name: tracker for tracker in event_trackers}) def get_stone_ind( self, stone_inst: Optional[int] = None, stone: Optional[Union[graphs.Node, LatentStone]] = None ) -> Optional[int]: """Gets a stone referred to through a variety of methods. The caller must pass exactly one of stone_inst and stone. Args: stone_inst: The instance id of the stone used in the potion. stone: The stone used. Returns: The index (into the list of stones originally passed to the EventTracker in construction) for the stone used in the potion or None if no match can be found. """ if len([e for e in [stone_inst, stone] if e is not None]) != 1: raise ValueError('Exactly one of stone inst and stone must be given.') if stone_inst is not None: return self._stone_idx_to_ind[stone_inst] if isinstance(stone, LatentStone): stone_node = graphs.Node(-1, stone.latent_coords) else: stone_node = stone matches = self._matching_stones(stone_node) if not matches: return None return matches[0] def get_potion_ind( self, potion_inst: Optional[int] = None, potion: Optional[Union[Potion, LatentPotion]] = None) -> Optional[int]: """Gets a potion referred to through a variety of methods. The caller must pass exactly one of potion_inst and potion. Args: potion_inst: The instance id of the potion used. potion: The potion used. Returns: The index (into the list of potions originally passed to the EventTracker in construction) for the potion used or None if no match can be found. -1 refers to the cauldron. """ if len([e for e in [potion_inst, potion] if e is not None]) != 1: raise ValueError('Exactly one of potion inst and potion must be given.') if potion_inst is not None: return self._potion_idx_to_ind[potion_inst] if isinstance(potion, LatentPotion): potion = Potion(-1, potion.latent_dim, potion.latent_dir) matches = self._matching_potions(potion) if not matches: return None return matches[0] def _stone_node(self, ind: int) -> graphs.Node: node_ = self._graph.node_list.get_node_by_coords( list(self._stones[ind].latent)) assert node_ is not None node: graphs.Node = node_ return node def _matching_potions(self, potion: Potion) -> List[int]: return [p for p in self._existing_potions if self._potions[p].as_index == potion.as_index] def _matching_stones(self, stone_node: graphs.Node) -> List[int]: return [i for i in self._existing_stones if tuple(self._stone_node(i).coords) == tuple(stone_node.coords)] def has_stone_ind(self, stone_ind: int) -> bool: return stone_ind in self._existing_stones def has_potion_ind(self, potion_ind: int) -> bool: return potion_ind in self._existing_potions def _remove_potion(self, potion_ind: int) -> None: self._existing_potions.remove(potion_ind) def _remove_stone(self, stone_ind: int) -> None: self._existing_stones.remove(stone_ind) def potion_used( self, stone_ind: int, potion_ind: int, val: Optional[int] = None ) -> int: """Records that a potion has been used. The caller must pass exactly one of stone_ind, stone_inst and stone, and exactly one of potion_ind, potion_inst and potion. Args: stone_ind: The index (into the list of stones originally passed to the EventTracker in construction) for the stone used in the potion. potion_ind: The index (into the list of potions originally passed to the EventTracker in construction) for the potion used. -1 refers to the cauldron. val: The value to record in this event (typically the frame number that this event occurs). If this is not set then the value set will be arbitrary but will preserve the order in which the potion_used and stone_used functions are called. Returns: The index (into the list of stones originally passed to the EventTracker in construction) for the stone used in the potion. This may not have been passed into the function (if stone_inst or stone was passed instead). """ # -1 corresponds to the cauldron and so there is no potion to remove and the # stone does not change old_node = self._stone_node(stone_ind) outcome_stone = None potion = None if potion_ind != CAULDRON: outcome_stone = copy.deepcopy(old_node) potion = self._potions[potion_ind] # Change the stone in _stones if old_node in self._graph.edge_list.edges: outcome_stone = [end_node for end_node, v in self._graph.edge_list.edges[old_node].items() if potion.same_effect(v[1])] if outcome_stone: assert len(outcome_stone) == 1 outcome_stone = outcome_stone[0] self._stones[stone_ind].latent = np.array(list(outcome_stone.coords)) else: outcome_stone = old_node self._remove_potion(potion_ind) if self.trackers: if val is None: val = self._count self._count += 1 for event_tracker in self.trackers.values(): event_tracker.potion_used( stone_ind, potion_ind, val, old_node, self._stone_ind_to_idx[stone_ind], potion, outcome_stone) return stone_ind def stone_used(self, stone_ind: int, val: Optional[int] = None) -> None: """Records that a stone has been used (placed in the cauldron). The caller must pass exactly one of stone_ind, stone_inst and stone. Args: stone_ind: The index (into the list of stones originally passed to the EventTracker in construction) for the stone used in the potion. val: The value to record in this event (typically the frame number that this event occurs). If this is not set then the value set will be arbitrary but will preserve the order in which the potion_used and stone_used functions are called. """ self.potion_used( stone_ind=stone_ind, potion_ind=CAULDRON, val=val) self._remove_stone(stone_ind) def failed_potion_use(self, stone_ind: int) -> None: old_node = self._stone_node(stone_ind) for event_tracker in self.trackers.values(): event_tracker.failed_potion_use( stone_ind, old_node, self._stone_ind_to_idx[stone_ind]) def has_stones(self) -> bool: return bool(self._existing_stones) def has_potions(self) -> bool: return bool(self._existing_potions) def has_stones_and_potions(self) -> bool: return self.has_stones() and self.has_potions() def rand_stone_ind(self) -> int: return random.sample(self._existing_stones, 1)[0] def rand_potion_ind(self) -> int: return random.sample(self._existing_potions, 1)[0] def use_rand_stone_potion_pair(self) -> Tuple[Stone, int]: """Uses a random stone with a random potion. Returns: The new value of the stone and the index of that stone. """ stone_index = self.rand_stone_ind() return self.use_rand_potion(stone_index) def use_rand_potion(self, stone_ind: int) -> Tuple[Stone, int]: """Uses the stone passed with a random potion. Args: stone_ind: The index (into the list of stones originally passed to the EventTracker in construction) for the stone to use in a random potion. Returns: The new value of the stone and the index of that stone. """ potion_index = self.rand_potion_ind() self.potion_used(stone_ind, potion_index) return self._stones[stone_ind], stone_ind def existing_stone_nodes(self) -> List[graphs.Node]: """Returns a list of nodes for the remaining existing stones.""" return [self._stone_node(i) for i in self._existing_stones] def existing_stones(self) -> List[Stone]: """Returns a list of the remaining existing stones.""" return [self._stones[i] for i in self._existing_stones] def existing_potions(self) -> List[Potion]: """Returns a list of the remaining existing potions.""" return [self._potions[i] for i in self._existing_potions] def existing_items(self) -> utils.TrialItems: return utils.TrialItems( stones=self.existing_stones(), potions=self.existing_potions()) @property def num_stones(self) -> int: return len(self._existing_stones) @property def num_potions(self) -> int: return len(self._existing_potions) def check_have_potions(self, needed_potions: Sequence[Potion]) -> bool: """Checks that we have all the potions we need.""" need = collections.Counter([p.as_index for p in needed_potions]) have = collections.Counter([self._potions[p].as_index for p in self._existing_potions]) for k in need.keys(): if k not in have.keys(): return False else: if have[k] < need[k]: return False return True def get_stones_above_thresh( self, reward_weights: RewardWeights, threshold: int) -> List[int]: """Gets all the stones whose value exceeds the threshold passed in.""" current_vals = {i: reward_weights(self._stones[i].latent) for i in self._existing_stones} return [i for i, current_val in current_vals.items() if current_val > threshold] def use_stones_above_thresh( self, reward_weights: RewardWeights, threshold: int) -> None: """Uses all the stones whose value exceeds the threshold passed in.""" for i in self.get_stones_above_thresh(reward_weights, threshold): self.stone_used(i) def get_stone(self, ind: int) -> Stone: return self._stones[ind] def get_potion(self, ind: int) -> Potion: return self._potions[ind] @property def node_list(self) -> graphs.NodeList: return self._graph.node_list @property def edge_list(self) -> graphs.EdgeList: return self._graph.edge_list @property def stone_ind_to_idx(self) -> Dict[int, int]: return self._stone_ind_to_idx @property def stone_idx_to_ind(self) -> Dict[int, int]: return self._stone_idx_to_ind @property def potion_idx_to_ind(self) -> Dict[int, int]: return self._potion_idx_to_ind class TrialTracker(EventTracker): """Type which tracks all events in a trial.""" @abc.abstractmethod def events_list(self) -> List[Tuple[int, int, int]]: """Returns a list of stone index, potion index, val for the trial events.""" pass class MatrixEventTracker(TrialTracker): """Tracks the order of potion used and stone used events in matrix.""" def __init__(self, num_stones: int, num_potions: int): self.events = np.full( shape=(num_stones, num_potions + 1), fill_value=-1, dtype=int) super().__init__(name='matrix_event') def potion_used( self, stone_ind: int, potion_ind: int, val: int, start_stone: graphs.Node, stone_inst: int, potion: Potion, end_stone: graphs.Node) -> None: """Records that a potion has been used. Args: stone_ind: The index (into the list of stones originally passed to the EventTracker in construction) for the stone used in the potion. potion_ind: The index (into the list of potions originally passed to the EventTracker in construction) for the potion used. -1 refers to the cauldron. val: The value to record in this event (typically the frame number that this event occurs). If this is not set then the value set will be arbitrary but will preserve the order in which the potion_used and stone_used functions are called. start_stone: The stone node before the potion is used. stone_inst: The instance id for the stone we are using. potion: The potion used. end_stone: The stone node after the potion is used. """ self.events[stone_ind, potion_ind] = val def events_list(self) -> List[Tuple[int, int, int]]: stone_used, potion_used = np.where(self.events != -1) frame = [self.events[x, y] for (x, y) in zip(stone_used, potion_used)] num_potions = self.events.shape[1] - 1 events = sorted(zip(stone_used, potion_used, frame), key=lambda x: x[2]) return [ (stone_ind, CAULDRON if potion_ind == num_potions else potion_ind, frame) for stone_ind, potion_ind, frame in events] ActionSequenceElement = Tuple[int, Mapping[str, Any], int, int] class ActionSequenceTracker(TrialTracker): """Tracks the order of potion used and stone used events in matrix.""" def __init__(self): self._action_sequence = [] super().__init__(name='action_sequence') def potion_used( self, stone_ind: int, potion_ind: int, val: int, start_stone: graphs.Node, stone_inst: int, potion: Potion, end_stone: graphs.Node) -> None: """Records that a potion has been used. Args: stone_ind: The index (into the list of stones originally passed to the EventTracker in construction) for the stone used in the potion. potion_ind: The index (into the list of potions originally passed to the EventTracker in construction) for the potion used. -1 refers to the cauldron. val: The value to record in this event (typically the frame number that this event occurs). If this is not set then the value set will be arbitrary but will preserve the order in which the potion_used and stone_used functions are called. start_stone: The stone node before the potion is used. stone_inst: The instance id for the stone we are using. potion: The potion used. end_stone: The stone node after the potion is used. """ # add to action sequence action_dict = {'node': (start_stone.idx, start_stone.coords), 'stone_idx': stone_inst} # -1 corresponds to the cauldron and so there is no potion to remove and the # stone does not change if potion_ind == CAULDRON: action_dict['action'] = 'cauldron' else: # Change the stone in _stones action_dict['action'] = (potion.as_index, (potion.dimension, potion.direction)) action_dict['potion_idx'] = potion.idx action_dict['outcome_node'] = (end_stone.idx, end_stone.coords) self._action_sequence.append((val, action_dict, stone_ind, potion_ind)) @property def action_sequence(self) -> List[Tuple[int, Dict[str, Any], int, int]]: self._action_sequence.sort(key=lambda x: x[0]) return self._action_sequence def events_list(self) -> List[Tuple[int, int, int]]: return [(stone_ind, potion_ind, val) for val, _, stone_ind, potion_ind in self.action_sequence] class LatestOutcomeTracker(EventTracker): """Tracks the most recent outcome of using a potion.""" def __init__( self, potion_map: PotionMap, stone_map: StoneMap, rotation: np.ndarray): # -1 represents no change and is the default value for outcome. self.outcome = None self.type_based_action = None self._potion_map, self._stone_map = potion_map, stone_map self._rotation = rotation super().__init__(name='latest_outcome') def reset(self) -> None: self.outcome = None self.type_based_action = None def _perceived_stone(self, stone: graphs.Node): aligned_stone = self._stone_map.apply_inverse(LatentStone(np.array( stone.coords))) return stones_and_potions.unalign(aligned_stone, self._rotation) def potion_used( self, stone_ind: int, potion_ind: int, val: int, start_stone: graphs.Node, stone_inst: int, potion: Potion, end_stone: Optional[graphs.Node]) -> None: if end_stone is not None: aligned_stone = self._stone_map.apply_inverse(LatentStone(np.array( end_stone.coords))) self.outcome = stones_and_potions.unalign(aligned_stone, self._rotation) perceived_stone = self._perceived_stone(start_stone) if potion_ind == CAULDRON: self.type_based_action = utils.TypeBasedAction( stone=perceived_stone, cauldron=True) else: perceived_potion = self._potion_map.apply_inverse(LatentPotion( potion.dimension, potion.direction)) self.type_based_action = utils.TypeBasedAction( stone=perceived_stone, potion=perceived_potion) def failed_potion_use( self, stone_ind: int, start_stone: graphs.Node, stone_inst: int): """Optional callback when a potion use is attempted but fails.""" self.outcome = None perceived_stone = self._perceived_stone(start_stone) # This is an invalid action but the stone type can be used for # visualization. self.type_based_action = utils.TypeBasedAction(stone=perceived_stone) class RewardTracker(EventTracker): """Tracks the reward obtained.""" def __init__(self, reward_weights: RewardWeights): self._reward = 0 self._reward_weights = reward_weights super().__init__(name='reward') def potion_used( self, stone_ind: int, potion_ind: int, val: int, start_stone: graphs.Node, stone_inst: int, potion: Potion, end_stone: graphs.Node) -> None: """Adds reward when a potion has been used. Args: stone_ind: The index (into the list of stones originally passed to the EventTracker in construction) for the stone used in the potion. potion_ind: The index (into the list of potions originally passed to the EventTracker in construction) for the potion used. -1 refers to the cauldron. val: The value to record in this event (typically the frame number that this event occurs). If this is not set then the value set will be arbitrary but will preserve the order in which the potion_used and stone_used functions are called. start_stone: The stone node before the potion is used. stone_inst: The instance id for the stone we are using. potion: The potion used. end_stone: The stone node after the potion is used. """ if potion_ind == CAULDRON: self._reward += self._reward_weights(start_stone.coords) @property def reward(self) -> int: return self._reward class ItemsUsedTracker(EventTracker): """Tracks the stones and potions used.""" def __init__(self): self.potions_used = [] self.stones_used = [] super().__init__(name='items_used') def potion_used( self, stone_ind: int, potion_ind: int, val: int, start_stone: graphs.Node, stone_inst: int, potion: Potion, end_stone: graphs.Node) -> None: """Keeps lists of potions and stones which have been used. Args: stone_ind: The index (into the list of stones originally passed to the EventTracker in construction) for the stone used in the potion. potion_ind: The index (into the list of potions originally passed to the EventTracker in construction) for the potion used. -1 refers to the cauldron. val: The value to record in this event (typically the frame number that this event occurs). This is not relevant for this tracker. start_stone: The stone node before the potion is used. stone_inst: The instance id for the stone we are using. potion: The potion used. end_stone: The stone node after the potion is used. """ if potion_ind == CAULDRON: self.stones_used.append(stone_ind) else: self.potions_used.append(potion_ind) @property def num_potions_used(self) -> int: return len(self.potions_used) @property def num_stones_used(self) -> int: return len(self.stones_used) class Event(abc.ABC): """Abstract base class for events we want to check in the event tracker.""" @abc.abstractmethod def next_occurrence( self, events: np.ndarray) -> Tuple[int, int, Optional[Set[int]]]: pass def occurs(self, events: np.ndarray) -> bool: event_start, _, _ = self.next_occurrence(events) not_occurred = event_start == NEVER_USED return not not_occurred class SingleEvent(Event): """A single event where a stone is used with one of a set of potions.""" def __init__(self, stone_ind: int, potion_inds: Set[int]): self.stone_ind = stone_ind self.potion_inds = potion_inds def next_occurrence( self, events: np.ndarray) -> Tuple[int, int, Optional[Set[int]]]: """Gets the next occurrence of this event. Args: events: numpy array of stones against potions with the last entry corresponding to the cauldron with a -1 in places where that stone was never used with that potion and the time of usage otherwise. Returns: When event starts, when event ends, which potions were used by event. """ frames_potions = [(events[self.stone_ind, p], p) for p in self.potion_inds if events[self.stone_ind, p] >= 0] if not frames_potions: return NEVER_USED, NEVER_USED, None frame, potion_used = min(frames_potions, key=lambda v: v[0]) return frame, frame, {potion_used} class AnyOrderEvents(Event): """A set of events which can happen in any order.""" def __init__(self, set_events: Set[Event]): self.set_events = set_events def next_occurrence( self, events: np.ndarray) -> Tuple[int, int, Optional[Set[int]]]: """Gets the next occurrence of this event. Args: events: numpy array of stones against potions with the last entry corresponding to the cauldron with a -1 in places where that stone was never used with that potion and the time of usage otherwise. Returns: When event starts, when event ends, which potions were used by event. """ results = [e.next_occurrence(events) for e in self.set_events] if any(v[0] == NEVER_USED for v in results): return NEVER_USED, NEVER_USED, None return (min(v[0] for v in results), max(v[1] for v in results), set(itertools.chain.from_iterable([v[2] for v in results]))) class OrderedEvents(Event): """A list of events which must happen in the order passed in.""" def __init__(self, iter_events: Sequence[Event]): self.iter_events = iter_events def next_occurrence( self, events: np.ndarray) -> Tuple[int, int, Optional[Set[int]]]: """Gets the next occurrence of this event. Args: events: numpy array of stones against potions with the last entry corresponding to the cauldron with a -1 in places where that stone was never used with that potion and the time of usage otherwise. Returns: When event starts, when event ends, which potions were used by event. """ results = [e.next_occurrence(events) for e in self.iter_events] if any(v[0] == NEVER_USED for v in results): return NEVER_USED, NEVER_USED, None for end_first, start_next in zip([v[1] for v in results[:-1]], [v[0] for v in results[1:]]): # If the events happen on the same step this is allowed. if end_first > start_next: return NEVER_USED, NEVER_USED, None return (results[0][0], results[-1][1], set(itertools.chain.from_iterable([v[2] for v in results]))) def replay_events(game_state: GameState, event_tracker: TrialTracker) -> None: for stone_ind, potion_ind, val in event_tracker.events_list(): if potion_ind == CAULDRON: game_state.stone_used(stone_ind=stone_ind, val=val) else: game_state.potion_used( stone_ind=stone_ind, potion_ind=potion_ind, val=val) def matrix_events_to_action_sequence( graph: Graph, items: utils.TrialItems, matrix_events: MatrixEventTracker ) -> List[ActionSequenceElement]: """Takes events/output of evaluation analysis and creates an event tracker.""" action_sequence_tracker = ActionSequenceTracker() game_state = GameState( trial_items=items, graph=graph, event_trackers=[action_sequence_tracker]) if matrix_events.events.shape != (items.num_stones, items.num_potions + 1): raise ValueError( 'Matrix of events shape does not match the number of stones and ' 'potions present.') replay_events(game_state, matrix_events) return action_sequence_tracker.action_sequence
dm_alchemy-master
dm_alchemy/event_tracker.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for dm_alchemy.load_from_docker.""" from absl import flags from absl.testing import absltest from absl.testing import parameterized import dm_alchemy from dm_env import test_utils FLAGS = flags.FLAGS flags.DEFINE_string( 'docker_image_name', None, 'Name of the Docker image that contains the DM Alchemy environment. ' 'If None, uses the default name') _TEST_LEVEL = ('alchemy/perceptual_mapping_randomized_with_rotation_and_random' '_bottleneck') _TEST_LEVELS = ( 'alchemy/perceptual_mapping_randomized_with_rotation_and_random_bottleneck', 'alchemy/all_fixed', 'alchemy/all_fixed_w_shaping', 'alchemy/evaluation_episodes/321', ) class LoadFromDockerTest(test_utils.EnvironmentTestMixin, absltest.TestCase): def make_object_under_test(self): return dm_alchemy.load_from_docker( name=FLAGS.docker_image_name, settings=dm_alchemy.EnvironmentSettings( seed=123, level_name=_TEST_LEVEL)) class AlchemyTest(parameterized.TestCase): @parameterized.parameters(*_TEST_LEVELS) def test_load_level(self, level_name): self.assertIsNotNone( dm_alchemy.load_from_docker( name=FLAGS.docker_image_name, settings=dm_alchemy.EnvironmentSettings( seed=123, level_name=level_name))) if __name__ == '__main__': absltest.main()
dm_alchemy-master
dm_alchemy/load_from_docker_test.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for partial array specs.""" import pickle from absl.testing import absltest from absl.testing import parameterized from dm_alchemy import partial_array_specs import numpy as np class PartialArrayTest(parameterized.TestCase): @parameterized.parameters( dict(spec_shape=(1, 2), is_valid=True), dict(spec_shape=(-1, 2), is_valid=True), dict(spec_shape=(1, -2), is_valid=False), dict(spec_shape=(-1, -1), is_valid=False), ) def testShapeValueError(self, spec_shape, is_valid): if is_valid: partial_array_specs.PartialArray(spec_shape, np.int32) else: with self.assertRaises(ValueError): partial_array_specs.PartialArray(spec_shape, np.int32) @parameterized.parameters( dict(value=np.zeros((1, 2), dtype=np.int32), is_valid=True), dict(value=np.zeros((2, 2), dtype=np.int32), is_valid=True), dict(value=np.zeros((2, 3), dtype=np.int32), is_valid=False), dict(value=np.zeros((1, 2, 3), dtype=np.int32), is_valid=False, error_format=partial_array_specs._INVALID_SHAPE_LEN), ) def testValidateShape( self, value, is_valid, error_format=partial_array_specs._INVALID_SHAPE): spec = partial_array_specs.PartialArray((-1, 2), np.int32) if is_valid: # Should not raise any exception. spec.validate(value) else: with self.assertRaisesWithLiteralMatch( ValueError, error_format % (value.shape, spec.shape)): spec.validate(value) def testGenerateValue(self): spec = partial_array_specs.PartialArray((2, -1), np.int32) test_value = spec.generate_value() spec.validate(test_value) def testSerialization(self): desc = partial_array_specs.PartialArray([-1, 5], np.float32, "test") self.assertEqual(pickle.loads(pickle.dumps(desc)), desc) if __name__ == "__main__": absltest.main()
dm_alchemy-master
dm_alchemy/partial_array_specs_test.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Create graph from unity type.""" from typing import Sequence from dm_alchemy.protos import alchemy_pb2 from dm_alchemy.types import graphs from dm_alchemy.types import stones_and_potions def create_graph( potions: Sequence[stones_and_potions.Potion], chemistry: alchemy_pb2.Chemistry ) -> graphs.Graph: """Creates the graph from the chemistry event and the existing potions. Args: potions: list of Potion objects chemistry: a Chemistry event containing bottleneck topology Returns: A Graph object """ vertices = chemistry.stones potion_effects = chemistry.potions node_list = graphs.NodeList() edge_list = graphs.EdgeList() for vertex in vertices: idx = vertex.latent.index coord = vertex.latent.coordinates node_list.add_node(graphs.Node(idx, coord)) for potion in potions: dimension = potion.dimension direction = potion.direction allowable_effects = [effect.reactions for effect in potion_effects if \ effect.label.dimension_index == dimension \ and effect.label.direction == direction][0] for effect in allowable_effects: from_node = node_list.get_node_by_idx(effect.from_stone_index) to_node = node_list.get_node_by_idx(effect.to_stone_index) edge_list.add_edge(from_node, to_node, potion) return graphs.Graph(node_list, edge_list)
dm_alchemy-master
dm_alchemy/types/graph_from_unity_type.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ from absl.testing import absltest from dm_alchemy.types import graphs class GraphsTest(absltest.TestCase): def test_graph_constraint_conversion(self): constraints = graphs.possible_constraints() for constraint in constraints: graph = graphs.create_graph_from_constraint(constraint) new_constraint = graphs.constraint_from_graph(graph) self.assertEqual(constraint, new_constraint) if __name__ == '__main__': absltest.main()
dm_alchemy-master
dm_alchemy/types/graphs_test.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Classes and functions for stones and potions used in ideal observer.""" import collections import copy import itertools import math from typing import Counter, Dict, List, MutableSequence, NewType, Optional, Reversible, Sequence, Tuple import dataclasses from dm_alchemy.types import helpers import numpy as np from scipy.spatial import transform import scipy.special CAULDRON = -1 @dataclasses.dataclass class RewardWeights: """Stores parameters defining the reward of a stone and apply them.""" coefficients: Sequence[int] offset: int bonus: int def __call__(self, coords: Sequence[int]): """Gets value of latent coords, must be a list from set [-1, 1].""" node_value = np.dot(coords, self.coefficients) + self.offset if np.all([x == 1 for x in coords]): node_value += self.bonus return node_value _POSS_DIRS = (-1, 1) _POSS_AXES = (0, 1, 2) POSS_REWARDS = (-3, -1, 1, 3) def get_num_axes() -> int: return len(_POSS_AXES) def get_num_dirs() -> int: return len(_POSS_DIRS) def max_reward() -> int: return max(POSS_REWARDS) def poss_reward_to_index(reward: int) -> int: # Possible rewards are -3, -1, 1, 3 convert this to 0, 1, 2, 3. return (reward + 3) // 2 def index_to_poss_reward(ind: int) -> int: # Inverse of poss_reward_to_index. return (ind * 2) - 3 def dir_to_index(direction: int) -> int: # Possible directions are -1, 1 convert this to 0, 1. return (direction + 1) // 2 def index_to_dir(ind: int) -> int: # Inverse of dir_to_index. return (ind * 2) - 1 def unknown_dir_to_index(direction: int) -> int: # Possible directions are -1, 1, helpers.UNKNOWN convert this to 0, 1, 2. if direction == helpers.UNKNOWN: return len(_POSS_DIRS) return dir_to_index(direction) def index_to_unknown_dir(ind: int) -> int: # Inverse of unknown_dir_to_index. if ind == len(_POSS_DIRS): return helpers.UNKNOWN return index_to_dir(ind) def coords_to_index(coords: Reversible[int]) -> int: # Possible directions are -1, 1 convert this to 0, 1. # Coords are reversed to match the way unity converts between coords and # indices. return int(np.ravel_multi_index( tuple(dir_to_index(c) for c in reversed(coords)), tuple(len(_POSS_DIRS) for _ in _POSS_AXES))) def index_to_coords(ind: int) -> np.ndarray: # Inverse of coords_to_index. return np.array([index_to_dir(int(i)) for i in reversed(np.unravel_index( ind, tuple(len(_POSS_DIRS) for _ in _POSS_AXES)))], object) def get_all_dim_ordering() -> List[Tuple[int, ...]]: # return a sorted list of all possible orderings of axes all_dim_ordering = sorted(set(itertools.permutations(_POSS_AXES))) return all_dim_ordering AlignedStoneIndex = NewType('AlignedStoneIndex', int) class AlignedStone: """The perceived stone with perceptual features rotated to align with axes.""" # The number of possible perceived stones 32: 2 possible coords on each of 3 # dimensions and 4 possible rewards. num_dir_assignments = len(_POSS_DIRS) ** len(_POSS_AXES) num_types = len(POSS_REWARDS) * num_dir_assignments num_bits = math.ceil(math.log2(num_types)) # For space efficiency in the ideal observer we do not allow more than 3 # stones per trial. max_present = 3 count_num_bits = math.ceil(math.log2(max_present)) def __init__(self, reward: int, coords: np.ndarray): self.reward = reward self.aligned_coords = coords def __hash__(self) -> int: return hash((self.reward, tuple(self.aligned_coords))) def __eq__(self, other: 'AlignedStone') -> bool: return (self.reward == other.reward and np.array_equal(self.aligned_coords, other.aligned_coords)) def __repr__(self) -> str: return ('AlignedStone(reward={reward}, ' 'coords={coords})'.format( reward=self.reward, coords=helpers.str_np_array_construct(self.aligned_coords))) def index(self) -> AlignedStoneIndex: coords_index = self.coords_only_index() reward_index = poss_reward_to_index(self.reward) return AlignedStoneIndex(np.ravel_multi_index( (coords_index, reward_index), (AlignedStone.num_dir_assignments, len(POSS_REWARDS)))) def coords_only_index(self) -> int: return coords_to_index(self.aligned_coords) def swap_dims(self, dim_map: Sequence[int]) -> None: aligned_coords = np.copy(self.aligned_coords) for i, new_dim in enumerate(dim_map): self.aligned_coords[new_dim] = aligned_coords[i] def aligned_stone_from_index(ind: AlignedStoneIndex) -> AlignedStone: coords_index, reward_index = np.unravel_index( ind, (AlignedStone.num_dir_assignments, len(POSS_REWARDS))) coords = index_to_coords(int(coords_index)) reward = index_to_poss_reward(int(reward_index)) return AlignedStone(reward, coords) def possible_aligned_stones() -> List[AlignedStone]: return [aligned_stone_from_index(AlignedStoneIndex(i)) for i in range(AlignedStone.num_types)] def possible_aligned_stones_coords_only() -> List[AlignedStone]: return [AlignedStone(-3, stone.latent_coords) for stone in possible_latent_stones()] def random_aligned_stone( random_state: np.random.RandomState) -> AlignedStone: return aligned_stone_from_index( random_state.randint(0, AlignedStone.num_types)) def aligned_stones_with_coords( coords: 'LatentStone') -> List[AlignedStone]: return [AlignedStone(reward, coords.latent_coords) for reward in POSS_REWARDS] class PerceivedStone: """The perceived reward and coordinates (in perceptual space) of a stone.""" def __init__(self, reward: int, coords: np.ndarray): self.reward = reward self.perceived_coords = coords def __hash__(self) -> int: return hash((self.reward, tuple(self.perceived_coords))) def __eq__(self, other: 'PerceivedStone') -> bool: return (self.reward == other.reward and np.array_equal(self.perceived_coords, other.perceived_coords)) def __repr__(self) -> str: return ('PerceivedStone(reward={reward}, ' 'coords={coords})'.format( reward=self.reward, coords=helpers.str_np_array_construct(self.perceived_coords))) def align_coords( perceived_stone: PerceivedStone, rotation: np.ndarray ) -> np.ndarray: return np.matmul(rotation, perceived_stone.perceived_coords.astype(int)) def aligns( perceived_stone: PerceivedStone, rotation: np.ndarray ) -> Tuple[bool, np.ndarray]: coords = align_coords(perceived_stone, rotation) # Valid if all coords are close to 1 or -1. valid = all(abs(abs(apc) - 1.0) < 0.0001 for apc in coords) return valid, coords def aligned_stone_from_coords(coords: np.ndarray, reward: int) -> AlignedStone: coords = np.array([ int(round(i)) for i in coords], dtype=object) return AlignedStone(reward, coords) def align( perceived_stone: PerceivedStone, rotation: np.ndarray ) -> AlignedStone: """Gets aligned stone from perceived stone given the rotation.""" valid, coords = aligns(perceived_stone, rotation) if not valid: raise ValueError('Rotation passed does not align stone.') return aligned_stone_from_coords(coords, perceived_stone.reward) def unalign( aligned_stone: AlignedStone, rotation: np.ndarray ) -> PerceivedStone: """Get perceived stone from aligned stone given the rotation.""" perceived_coords = np.matmul( np.linalg.inv(rotation), aligned_stone.aligned_coords.astype(int)) # All coords should be close to 1, 0 or -1. if not all(abs(apc) < 0.0001 or abs(abs(apc) - 1.0) < 0.0001 for apc in perceived_coords): raise ValueError('Rotation does not take aligned stone to integer coords.') perceived_coords = np.array([ int(round(i)) for i in perceived_coords], dtype=object) return PerceivedStone(aligned_stone.reward, perceived_coords) def get_new_mapping_to_old_mapping( prev_rotation: np.ndarray, new_rotation: np.ndarray, observed_stone: AlignedStone ) -> 'StoneMap': """Maps aligned stones under previous rotation to aligned stones under new rotation.""" # Apply inv new_rotation to aligned coords to get the new set of perceived # coords, then apply prev_rotation to these, coordinates that end up valid # could have been learned about so need to go into the mapping. perceived_stone = unalign(observed_stone, prev_rotation) new_aligned = align(perceived_stone, new_rotation) return StoneMap(np.where(observed_stone.aligned_coords == new_aligned.aligned_coords, 1, -1)) def rotation_from_angles(angles: Sequence[float]) -> np.ndarray: """Gets rotation matrix from list of angles, scaling as required.""" if not any(angles): return np.eye(3, dtype=int) rotation = transform.Rotation.from_euler( 'xyz', angles, degrees=True).as_matrix() scale = np.diag([1.0 if angle else math.sqrt(2) for angle in angles]) transformation = np.matmul(scale, rotation) if not all(abs(c) < 0.0001 or abs(abs(c) - 1.0) < 0.0001 for c in transformation.reshape((-1,))): raise ValueError( 'Transformation should be all -1, 0 or 1 but is ' + str(transformation) + ' for angles ' + str(angles)) return transformation.astype(int) def rotation_to_angles(rotation: np.ndarray) -> Sequence[float]: # First scale it. column_norm = np.linalg.norm(rotation, axis=0) s = np.diag(1 / column_norm) rotation = np.matmul(s, rotation) return transform.Rotation.from_matrix(rotation).as_euler( 'xyz', degrees=True).tolist() def possible_rotations() -> List[np.ndarray]: list_angles = [[0, 0, 0], [0, 0, -45], [0, -45, 0], [-45, 0, 0]] return [rotation_from_angles(angles) for angles in list_angles] def random_rotation(random_state: np.random.RandomState): poss_rotations = possible_rotations() return poss_rotations[random_state.choice(len(poss_rotations))] def rotations_equal(rotation1: np.ndarray, rotation2: np.ndarray): return rotation_to_angles(rotation1) == rotation_to_angles(rotation2) LatentStoneIndex = NewType('LatentStoneIndex', int) class LatentStone: """The latent coordinates of a stone.""" # The number of possible latent stones 8: 2 possible coords on each of 3 # dimensions. num_types = len(_POSS_DIRS) ** len(_POSS_AXES) num_bits = math.ceil(math.log2(num_types)) def __init__(self, coords: np.ndarray): self.latent_coords = coords def reward(self) -> int: return int(sum(self.latent_coords)) def __hash__(self) -> int: return hash(tuple(self.latent_coords)) def __eq__(self, other: 'LatentStone') -> bool: return np.array_equal(self.latent_coords, other.latent_coords) def __repr__(self) -> str: return 'LatentStone(coords={coords})'.format( coords=helpers.str_np_array_construct(self.latent_coords)) def index(self) -> LatentStoneIndex: return LatentStoneIndex(coords_to_index(self.latent_coords)) def latent_stone_from_index(ind: LatentStoneIndex) -> LatentStone: return LatentStone(index_to_coords(ind)) def random_latent_stone(random_state: np.random.RandomState) -> LatentStone: return latent_stone_from_index( random_state.randint(0, LatentStone.num_types)) def possible_latent_stones() -> List[LatentStone]: return [latent_stone_from_index(LatentStoneIndex(ind)) for ind in range(LatentStone.num_types)] StoneMapIndex = NewType('StoneMapIndex', int) class StoneMap: """A map that takes stones from perceptual to latent coordinates.""" # The number of possible stone maps 8: (2 possible directions on each of 3 # dimensions). The assignment of perceived axes to latent axes can be chosen # arbitrarily given that all oriented graphs and assignment of potions to # dimensions and directions are considered. num_types = len(_POSS_DIRS) ** len(_POSS_AXES) num_bits = math.ceil(math.log2(num_types)) def __init__(self, pos_dir: np.ndarray): self.latent_pos_dir = pos_dir def __hash__(self) -> int: return hash(tuple(self.latent_pos_dir)) def __eq__(self, other: 'StoneMap') -> bool: return np.array_equal(self.latent_pos_dir, other.latent_pos_dir) def __repr__(self) -> str: return ('StoneMap(pos_dir={pos_dir})'.format( pos_dir=helpers.str_np_array_construct(self.latent_pos_dir))) def apply(self, stone: AlignedStone) -> LatentStone: return LatentStone(np.where(stone.aligned_coords == self.latent_pos_dir, 1, -1)) def apply_inverse(self, stone: LatentStone) -> AlignedStone: # Map is actually equal to its inverse return AlignedStone(stone.reward(), np.where( stone.latent_coords == self.latent_pos_dir, 1, -1)) def apply_to_potion(self, potion: 'LatentPotion') -> 'LatentPotion': return LatentPotion( potion.latent_dim, potion.latent_dir * self.latent_pos_dir[potion.latent_dim]) def index(self) -> StoneMapIndex: return StoneMapIndex(coords_to_index(self.latent_pos_dir)) def swap_dims(self, dim_map: Sequence[int]) -> None: latent_pos_dir = np.copy(self.latent_pos_dir) for i, new_dim in enumerate(dim_map): self.latent_pos_dir[new_dim] = latent_pos_dir[i] def chain(self, new_to_old: 'StoneMap') -> None: # Combine this map with the one passed in. new_to_reward = np.where( self.latent_pos_dir == new_to_old.latent_pos_dir, 1, -1) self.latent_pos_dir = new_to_reward def consistent_with_stones( self, aligned_stones: Sequence[AlignedStone] ) -> bool: for aligned_stone in aligned_stones: if self.apply(aligned_stone).reward() != aligned_stone.reward: return False return True def stone_map_from_index(ind: StoneMapIndex) -> StoneMap: return StoneMap(index_to_coords(ind)) def random_stone_map(random_state: np.random.RandomState) -> StoneMap: return stone_map_from_index(random_state.randint(0, StoneMap.num_types)) def possible_stone_maps() -> List[StoneMap]: return [stone_map_from_index(StoneMapIndex(i)) for i in range(StoneMap.num_types)] def stone_map_distr( stone_maps: Sequence[StoneMapIndex] ) -> Dict[StoneMapIndex, float]: """Returns a distribution over possible stone maps.""" # I think it is valid to assume the latent space axis corresponds directly to # the perceptual space axis since we do not make this assumption for potion # colours. return {s: 1 / len(stone_maps) for s in stone_maps} def all_fixed_stone_map() -> StoneMap: return StoneMap(pos_dir=np.array([1, 1, 1])) PartialStoneMapIndex = NewType('PartialStoneMapIndex', int) class PartialStoneMap(StoneMap): """Partial info on the map from stone perceptual space to latent space.""" # The number of possible partial stone maps 27: (3 possible directions # including unknown on each of 3 dimensions). num_types = (len(_POSS_DIRS) + 1) ** len(_POSS_AXES) num_bits = math.ceil(math.log2(num_types)) def matches(self, other: StoneMap) -> bool: return all(d == helpers.UNKNOWN or d == other_d for d, other_d in zip(self.latent_pos_dir, other.latent_pos_dir)) def fill_gaps(self) -> List[StoneMap]: """Returns all stone maps possible given the partial information.""" return [StoneMap(np.array(m)) for m in itertools.product( *[_POSS_DIRS if d == helpers.UNKNOWN else [d] for d in self.latent_pos_dir])] def update(self, axis: int, pos_dir: int) -> None: self.latent_pos_dir = tuple(d if i != axis else pos_dir for i, d in enumerate(self.latent_pos_dir)) def index(self) -> PartialStoneMapIndex: return PartialStoneMapIndex(np.ravel_multi_index( tuple(unknown_dir_to_index(d) for d in self.latent_pos_dir), tuple(len(_POSS_DIRS) + 1 for _ in self.latent_pos_dir))) def partial_stone_map_from_index(ind: PartialStoneMapIndex) -> PartialStoneMap: pos_dir_indices = np.unravel_index( ind, tuple(len(_POSS_DIRS) + 1 for _ in _POSS_AXES)) pos_dir = np.array( [index_to_unknown_dir(int(d)) for d in pos_dir_indices], object) return PartialStoneMap(pos_dir) def partial_stone_map_from_possibles( possibles: Sequence[StoneMapIndex]) -> PartialStoneMap: all_poss = np.stack((stone_map_from_index(s).latent_pos_dir for s in possibles)) uniques = [np.unique(all_poss[:, i]) for i in range(get_num_axes())] return PartialStoneMap(np.array([u[0] if len(u) == 1 else helpers.UNKNOWN for u in uniques])) def partial_stone_map_from_single_obs( axis: int, pos_dir: int) -> PartialStoneMap: return PartialStoneMap(np.array([pos_dir if i == axis else helpers.UNKNOWN for i in _POSS_AXES])) def possible_partial_stone_maps() -> List[PartialStoneMap]: return [partial_stone_map_from_index(PartialStoneMapIndex(i)) for i in range(PartialStoneMap.num_types)] PerceivedPotionIndex = NewType('PerceivedPotionIndex', int) class PerceivedPotion: """The perceived dimension and direction (in perceptual space) of a potion.""" # The number of possible perceived potions 6: one per axis and direction. num_types = len(_POSS_DIRS) * len(_POSS_AXES) num_bits = math.ceil(math.log2(num_types)) # For space efiiciency in the ideal observer we do not allow more than 12 # potions per trial. max_present = 12 count_num_bits = math.ceil(math.log2(max_present)) def __init__(self, perceived_dim: int, perceived_dir: int): self.perceived_dim = perceived_dim self.perceived_dir = perceived_dir def __hash__(self) -> int: return hash((self.perceived_dim, self.perceived_dir)) def __eq__(self, other: 'PerceivedPotion') -> bool: return (self.perceived_dim == other.perceived_dim and self.perceived_dir == other.perceived_dir) def __repr__(self) -> str: return ('PerceivedPotion(perceived_dim={perceived_dim}, ' 'perceived_dir={perceived_dir})'.format( perceived_dim=self.perceived_dim, perceived_dir=self.perceived_dir)) def index(self) -> PerceivedPotionIndex: return PerceivedPotionIndex(np.ravel_multi_index( (self.perceived_dim, dir_to_index(self.perceived_dir)), (len(_POSS_AXES), len(_POSS_DIRS)))) def perceived_potion_from_index(ind: PerceivedPotionIndex) -> PerceivedPotion: perceived_dim, perceived_dir = np.unravel_index( ind, (len(_POSS_AXES), len(_POSS_DIRS))) return PerceivedPotion(int(perceived_dim), index_to_dir(int(perceived_dir))) def possible_perceived_potions() -> List[PerceivedPotion]: return [perceived_potion_from_index(PerceivedPotionIndex(i)) for i in range(PerceivedPotion.num_types)] def random_perceived_potion( random_state: np.random.RandomState) -> PerceivedPotion: return perceived_potion_from_index( random_state.randint(0, PerceivedPotion.num_types)) LatentPotionIndex = NewType('LatentPotionIndex', int) class LatentPotion: """The latent space dimension and direction of a potion.""" # The number of possible latent potions 6: one per axis and direction. num_types = len(_POSS_DIRS) * len(_POSS_AXES) num_bits = math.ceil(math.log2(num_types)) def __init__(self, latent_dim: int, latent_dir: int): self.latent_dim = latent_dim self.latent_dir = latent_dir def __hash__(self) -> int: return hash((self.latent_dim, self.latent_dir)) def __eq__(self, other: 'LatentPotion') -> bool: return (self.latent_dim == other.latent_dim and self.latent_dir == other.latent_dir) def __repr__(self) -> str: return ('LatentPotion(latent_dim={latent_dim}, latent_dir={latent_dir})'. format(latent_dim=self.latent_dim, latent_dir=self.latent_dir)) def index(self) -> LatentPotionIndex: return LatentPotionIndex(np.ravel_multi_index( (self.latent_dim, dir_to_index(self.latent_dir)), (len(_POSS_AXES), len(_POSS_DIRS)))) def latent_potion_from_index(ind: LatentPotionIndex) -> LatentPotion: latent_dim, latent_dir = np.unravel_index( ind, (len(_POSS_AXES), len(_POSS_DIRS))) return LatentPotion(int(latent_dim), index_to_dir(int(latent_dir))) def random_latent_potion(random_state: np.random.RandomState) -> LatentPotion: return latent_potion_from_index( random_state.randint(0, LatentPotion.num_types)) def possible_latent_potions() -> List[LatentPotion]: return [latent_potion_from_index(LatentPotionIndex(ind)) for ind in range(LatentPotion.num_types)] PotionMapIndex = NewType('PotionMapIndex', int) class PotionMap: """A map that takes potions from perceptual to latent coordinates.""" # The number of possible potion maps - 6 permutations of the 3 axes and 2 # possible directions on each of the 3 axes. num_axis_assignments = math.factorial(len(_POSS_AXES)) num_dir_assignments = len(_POSS_DIRS) ** len(_POSS_AXES) num_types = num_axis_assignments * num_dir_assignments num_bits = math.ceil(math.log2(num_types)) def __init__(self, dim_map: Sequence[int], dir_map: Sequence[int]): # We may update these so specify that they are mutable. self.dim_map = dim_map # type: MutableSequence[int] self.dir_map = dir_map # type: MutableSequence[int] def __hash__(self) -> int: return hash((tuple(self.dim_map), tuple(self.dir_map))) def __eq__(self, other: 'PotionMap') -> bool: return self.dim_map == other.dim_map and self.dir_map == other.dir_map def __repr__(self) -> str: return 'PotionMap(dim_map={dim_map}, dir_map={dir_map})'.format( dim_map=self.dim_map, dir_map=self.dir_map) def apply(self, potion: PerceivedPotion) -> LatentPotion: latent_dim = self.dim_map[potion.perceived_dim] latent_dir = 1 if self.dir_map[latent_dim] == potion.perceived_dir else -1 return LatentPotion(latent_dim, latent_dir) def apply_inverse(self, potion: LatentPotion) -> PerceivedPotion: if self.dir_map[potion.latent_dim] == potion.latent_dir: perceived_dir = 1 else: perceived_dir = -1 inverse_dim_map = [0, 0, 0] for i, d in enumerate(self.dim_map): inverse_dim_map[d] = i perceived_dim = inverse_dim_map[potion.latent_dim] return PerceivedPotion(perceived_dim, perceived_dir) def index(self, perm_index_to_index: np.ndarray) -> PotionMapIndex: dim_index = helpers.perm_to_index(self.dim_map, perm_index_to_index) dir_index = coords_to_index(self.dir_map) return PotionMapIndex(np.ravel_multi_index( (dim_index, dir_index), (PotionMap.num_axis_assignments, PotionMap.num_dir_assignments))) def potion_map_from_index( ind: PotionMapIndex, index_to_perm_index: np.ndarray) -> PotionMap: dim_map_index, dir_map_index = np.unravel_index( ind, (PotionMap.num_axis_assignments, PotionMap.num_dir_assignments)) dim_map = helpers.perm_from_index( int(dim_map_index), len(_POSS_AXES), index_to_perm_index) dir_map = index_to_coords(int(dir_map_index)).tolist() return PotionMap(dim_map, dir_map) def random_potion_map( index_to_perm_index: np.ndarray, random_state: np.random.RandomState ) -> PotionMap: return potion_map_from_index(random_state.randint(0, PotionMap.num_types), index_to_perm_index) def possible_potion_maps( index_to_perm_index: np.ndarray) -> List[PotionMap]: return [potion_map_from_index(PotionMapIndex(i), index_to_perm_index) for i in range(PotionMap.num_types)] def potion_map_distr( potion_maps: np.ndarray) -> Dict[PotionMapIndex, float]: """Makes a uniform distribution over the possible potion maps.""" return {p: 1 / len(potion_maps) for p in potion_maps} def all_fixed_potion_map() -> PotionMap: return PotionMap(dim_map=[0, 1, 2], dir_map=[1, 1, 1]) PartialPotionMapIndex = Tuple[int, int] class PartialPotionMap(PotionMap): """Partial info on the map from potion perceptual space to latent space.""" # The number of possible partial potion maps - 6 normal permutations of the 3 # axes plus 3 ways to select 1 known axis and 3 ways to place it and 3 # possible directions on each of the 3 axes. num_axis_assignments = sum( math.factorial(k) * (int(scipy.special.comb(len(_POSS_AXES), k)) ** 2) for k in itertools.chain(range(len(_POSS_AXES) - 1), [len(_POSS_AXES)])) num_dir_assignments = (len(_POSS_DIRS) + 1) ** len(_POSS_AXES) num_types = num_axis_assignments * num_dir_assignments num_bits_axis = math.ceil(math.log2(num_axis_assignments)) num_bits_dir = math.ceil(math.log2(num_dir_assignments)) def can_map(self, potion: PerceivedPotion) -> bool: latent_dim = self.dim_map[potion.perceived_dim] if latent_dim == helpers.UNKNOWN: return False return self.dir_map[latent_dim] != helpers.UNKNOWN def matches(self, other: PotionMap) -> bool: return (all(d == helpers.UNKNOWN or d == other_d for d, other_d in zip(self.dim_map, other.dim_map)) and all(d == helpers.UNKNOWN or d == other_d for d, other_d in zip(self.dir_map, other.dir_map))) def fill_gaps(self) -> List[PotionMap]: """Gets all potion maps possible given this partial info.""" set_vals = {d for d in self.dim_map if d != helpers.UNKNOWN} dims_to_set = [i for i, d in enumerate(self.dim_map) if d == helpers.UNKNOWN] remaining_vals = {i for i in _POSS_AXES}.difference(set_vals) new_dim_maps = [] for orders in itertools.permutations(remaining_vals): new_dim_map = copy.deepcopy(self.dim_map) for i, val in zip(dims_to_set, orders): new_dim_map[i] = val new_dim_maps.append(new_dim_map) new_dir_maps = itertools.product(*[ _POSS_DIRS if d == helpers.UNKNOWN else [d] for d in self.dir_map]) return [PotionMap(dim_map, dir_map) for dim_map, dir_map in itertools.product(new_dim_maps, new_dir_maps)] def update( self, perceived_axis: int, latent_axis: int, perceived_dir: int, reward_dir: int) -> None: """Updates the potion map given the observation. Args: perceived_axis: The perceived axis of the potion. latent_axis: The latent axis the potion acts on. perceived_dir: Perceived direction of the potion. reward_dir: The direction in which the reward changes. """ # The reward dir shows the direction that the potion moves in latent space. if reward_dir == 0: # We cannot update anything on the partial mapping but will update the # combos pass else: self.dim_map[perceived_axis] = latent_axis self.deduce_dim_map_gaps() self.dir_map[latent_axis] = 1 if perceived_dir == reward_dir else -1 def deduce_dim_map_gaps(self) -> None: """Updates the dimension map by deducing the last dimension if only 1 left.""" unknown_dims = [i for i, d in enumerate(self.dim_map) if d == helpers.UNKNOWN] if len(unknown_dims) == 1: unused_dims = set(_POSS_AXES).difference( {d for d in self.dim_map if d != helpers.UNKNOWN}) # For consistent observations this should always be true but when # precomputing maps we can consider inconsistent observations. In this # case it doesn't matter what we do. if len(unused_dims) == 1: self.dim_map[unknown_dims[0]] = unused_dims.pop() def index(self, perm_index_to_index) -> PartialPotionMapIndex: # Use 2 part index for this otherwise maybe it gets too big return (helpers.partial_perm_to_index(self.dim_map, perm_index_to_index), int(np.ravel_multi_index( tuple(unknown_dir_to_index(d) for d in self.dir_map), tuple(len(_POSS_DIRS) + 1 for _ in self.dir_map)))) def possible_latent_dims( self, perceived_potion: PerceivedPotion) -> List[int]: """Returns a list of latent space dimensions the potion could act on.""" # If we don't know which dimension the potion operates on then it could be # any that we haven't mapped to another dimension. if self.dim_map[perceived_potion.perceived_dim] == helpers.UNKNOWN: unused_dims = set(range(3)).difference(set(self.dim_map)) return sorted(list(unused_dims)) # If we do know the dimension just return it. return [self.dim_map[perceived_potion.perceived_dim]] def partial_potion_map_from_index( ind: PartialPotionMapIndex, index_to_perm_index) -> PartialPotionMap: dim_map = helpers.partial_perm_from_index( ind[0], get_num_axes(), index_to_perm_index) dir_map_indices = np.unravel_index( ind[1], tuple(len(_POSS_DIRS) + 1 for _ in _POSS_AXES)) dir_map = [index_to_unknown_dir(int(d)) for d in dir_map_indices] return PartialPotionMap(dim_map, dir_map) def no_knowledge_partial_potion_map() -> PartialPotionMap: return PartialPotionMap([helpers.UNKNOWN, helpers.UNKNOWN, helpers.UNKNOWN], [helpers.UNKNOWN, helpers.UNKNOWN, helpers.UNKNOWN]) def partial_potion_map_from_possibles( possibles: Sequence[PotionMapIndex], index_to_perm_index: np.ndarray ) -> PartialPotionMap: """Creates a partial potion map from a list of all possible potion maps.""" potion_maps = [potion_map_from_index(p, index_to_perm_index) for p in possibles] poss_dim_map = np.stack((p.dim_map for p in potion_maps)) poss_dir_map = np.stack((p.dir_map for p in potion_maps)) unique_dims = [np.unique(poss_dim_map[:, i]) for i in range(get_num_axes())] unique_dirs = [np.unique(poss_dir_map[:, i]) for i in range(get_num_axes())] ret = PartialPotionMap( [u[0] if len(u) == 1 else helpers.UNKNOWN for u in unique_dims], [u[0] if len(u) == 1 else helpers.UNKNOWN for u in unique_dirs]) ret.deduce_dim_map_gaps() return ret def one_obs_partial_potion_map( perceived_axis: int, latent_axis: int, perceived_dir: int, reward_dir: int ) -> PartialPotionMap: dim_map = [latent_axis if i == perceived_axis else helpers.UNKNOWN for i in _POSS_AXES] dir_map = [1 if i == latent_axis and perceived_dir == reward_dir else -1 if i == latent_axis else helpers.UNKNOWN for i in _POSS_AXES] return PartialPotionMap(dim_map, dir_map) def one_obs_possible_potion_maps( perceived_axis: int, latent_axis: int, perceived_dir: int, reward_dir: int, perm_index_to_index: np.ndarray) -> List[int]: return sorted( [p.index(perm_index_to_index) for p in one_obs_partial_potion_map( perceived_axis, latent_axis, perceived_dir, reward_dir).fill_gaps()]) def one_obs_possible_stone_maps(axis: int, pos_dir: int) -> List[int]: return sorted([s.index() for s in partial_stone_map_from_single_obs( axis, pos_dir).fill_gaps()]) def one_action_outcome( stone: AlignedStone, potion: PerceivedPotion, result: AlignedStone, perm_index_to_index: np.ndarray ) -> Tuple[Optional[List[int]], Optional[List[int]]]: """Computes possible potion and stones maps given the observation.""" reward_dir = result.reward - stone.reward # If we observe a change we can deduce information if reward_dir == 2 or reward_dir == -2: stone_diff = result.aligned_coords - stone.aligned_coords change_axes = np.where(stone_diff)[0] if len(change_axes) != 1: return None, None stone_axis = change_axes[0] stone_dir = stone_diff[stone_axis] return (one_obs_possible_potion_maps( potion.perceived_dim, stone_axis, potion.perceived_dir, reward_dir // 2, perm_index_to_index), one_obs_possible_stone_maps( stone_axis, 1 if stone_dir == reward_dir else -1)) return None, None def update_partial_potion_map( stone: AlignedStone, potion: PerceivedPotion, result_stone: AlignedStone, partial_potion_map: PartialPotionMap, perm_index_to_index: np.ndarray) -> PartialPotionMapIndex: """Returns updated partial potion map index given the observation.""" reward_dir = result_stone.reward - stone.reward # If we observe a change we can deduce information if reward_dir == 2 or reward_dir == -2: stone_diff = result_stone.aligned_coords - stone.aligned_coords change_axes = np.where(stone_diff)[0] if len(change_axes) != 1: return -1, -1 stone_axis = change_axes[0] partial_potion_map.update( potion.perceived_dim, stone_axis, potion.perceived_dir, reward_dir // 2) return partial_potion_map.index(perm_index_to_index) return -1, -1 def update_partial_stone_map( stone: AlignedStone, result_stone: AlignedStone, partial_stone_map: PartialStoneMap) -> PartialStoneMapIndex: """Updates the partial stone map info given the observation.""" reward_dir = result_stone.reward - stone.reward # If we observe a change we can deduce information if reward_dir == 2 or reward_dir == -2: stone_diff = result_stone.aligned_coords - stone.aligned_coords change_axes = np.where(stone_diff)[0] if len(change_axes) != 1: return PartialStoneMapIndex(-1) stone_axis = change_axes[0] stone_dir = stone_diff[stone_axis] partial_stone_map.update(stone_axis, 1 if stone_dir == reward_dir else -1) return partial_stone_map.index() return PartialStoneMapIndex(-1) def aligned_stone_indices( aligned_stones: Counter[AlignedStone]) -> Counter[AlignedStoneIndex]: return collections.Counter({k.index(): v for k, v in aligned_stones.items()}) def perceived_potion_indices( perceived_potions: Counter[PerceivedPotion], perm_index_to_index: np.ndarray ) -> Counter[PerceivedPotionIndex]: return collections.Counter({k.index(perm_index_to_index): v for k, v in perceived_potions.items()}) def react( perceived_stone: AlignedStone, latent_dim: int, latent_dir: int ) -> AlignedStone: """Possible outcome without knowing the direction mapping if stone changes.""" new_reward = max(min(POSS_REWARDS), min( max(POSS_REWARDS), perceived_stone.reward + (2 * latent_dir))) new_coords = np.copy(perceived_stone.aligned_coords) new_coords[latent_dim] = -perceived_stone.aligned_coords[latent_dim] return AlignedStone(new_reward, new_coords) def possible_latent_dirs_and_stone_dirs( perceived_potion: PerceivedPotion, latent_dim: int, partial_potion_map: PartialPotionMap, partial_stone_map: PartialStoneMap ) -> List[Tuple[int, int]]: """Possible latent space directions and stone space directions for a potion. Given a perceived potion and an assumed latent space dimension and given what we know about the maps from potion space and stone space to latent space, get a list of possible latent space directions and stone space directions. Args: perceived_potion: The potion we are considering. latent_dim: The latent dimension the potion applies to. partial_potion_map: What we know about how potions map to latent space. partial_stone_map: What we know about how stones map to latent space. Returns: List of 2-element tuples where the first element is a latent space direction and the second element is a stone space direction. """ if partial_potion_map.dir_map[latent_dim] == helpers.UNKNOWN: all_latent_dirs = _POSS_DIRS elif partial_potion_map.dir_map[latent_dim] == 1: all_latent_dirs = [perceived_potion.perceived_dir] else: all_latent_dirs = [-perceived_potion.perceived_dir] latent_dirs_stone_dirs = [] for latent_dir in all_latent_dirs: if partial_stone_map.latent_pos_dir[latent_dim] == helpers.UNKNOWN: latent_dirs_stone_dirs.append((latent_dir, -1)) latent_dirs_stone_dirs.append((latent_dir, 1)) else: if partial_stone_map.latent_pos_dir[latent_dim] == 1: stone_dir = latent_dir else: stone_dir = -latent_dir latent_dirs_stone_dirs.append((latent_dir, stone_dir)) return latent_dirs_stone_dirs def reward_plausible( latent_direction: int, reward: int, plausible_reward_range: Tuple[int, int] ) -> bool: """Checks the stone reward is plausible given the latent direction.""" # If the stone already has the maximum (or minimum) reward it cannot get any # higher (or lower). if latent_direction * get_num_axes() == reward: return False # Return if the reward of the new stone is in the plausible range lb, ub = plausible_reward_range return lb <= reward + (2 * latent_direction) <= ub def latent_dirs_on_stone( perceived_stone: AlignedStone, latent_dim: int, partial_stone_map: PartialStoneMap, latent_dirs_stone_dirs: Sequence[Tuple[int, int]] ) -> Tuple[bool, List[int]]: """Filters possible latent and stone directions given a stone and partial map. Args: perceived_stone: The stone we are applying a potion to. latent_dim: The latent dimension the potion applies to. partial_stone_map: Information known about the mapping from stone space to latent space latent_dirs_stone_dirs: The list of possible latent directions and stone directions the potion applies to which we filter. Returns: Boolean saying if the stone could be unchanged. List of still plausible latent directions the stone could move in. """ # If the stone changes it must become the opposite value on the latent # dimension passed in. expected_stone_dir = -perceived_stone.aligned_coords[latent_dim] new_coords = np.copy(perceived_stone.aligned_coords) new_coords[latent_dim] = -perceived_stone.aligned_coords[latent_dim] # Get the reward for dimensions where we know the latent space coordinates of # the stone. known_rewards = [c * d for c, d in zip( new_coords, partial_stone_map.latent_pos_dir) if d != helpers.UNKNOWN] # The reward of the resulting stone could be more or less than the sum of the # known rewards by the number of unknown dimensions (extra 1 or -1 per # dimension, note that any bonus is applied later). unknown_range = len(partial_stone_map.latent_pos_dir) - len(known_rewards) known_reward = sum(known_rewards) plausible_reward_range = (known_reward - unknown_range, known_reward + unknown_range) latent_dirs = [ latent_dir for latent_dir, stone_dir in latent_dirs_stone_dirs if stone_dir == expected_stone_dir and reward_plausible( latent_dir, perceived_stone.reward, plausible_reward_range)] # If there are 1 or more latent directions that are not possible then the # stone would be unchanged in these cases. could_stay_still = len(latent_dirs_stone_dirs) > len(latent_dirs) return could_stay_still, latent_dirs class Stone: """A stone object in Alchemy.""" def __init__(self, idx, latent): self.idx = idx self.latent = np.array(latent) def latent_stone(self) -> LatentStone: return LatentStone(self.latent) def __str__(self) -> str: s = 'Stone ' + str(self.idx) + ' (' + str(self.latent) + ')' return s def __eq__(self, other: 'Stone') -> bool: return self.idx == other.idx and all(self.latent == other.latent) def __hash__(self) -> int: return hash((self.idx, tuple(e for e in self.latent))) def __repr__(self) -> str: return 'Stone(idx={idx}, latent={latent})'.format( idx=self.idx, latent=self.latent) class Potion: """A potion object in Alchemy.""" def __init__(self, idx, dimension, direction): self.idx = idx self.dimension = dimension # 0, 1, or 2 self.direction = direction # 1 or -1 @property def as_index(self): return self.dimension * 2 + (self.direction + 1) / 2 def latent_potion(self) -> LatentPotion: return LatentPotion(self.dimension, self.direction) def __str__(self) -> str: return '(' + str(self.dimension) + ',' + str(self.direction) + ')' def __eq__(self, other: 'Potion') -> bool: return (self.idx == other.idx and self.dimension == other.dimension and self.direction == other.direction) def __hash__(self) -> int: return hash((self.idx, self.dimension, self.direction)) def same_effect(self, other: 'Potion') -> bool: return (self.dimension == other.dimension and self.direction == other.direction) def __repr__(self) -> str: return ('Potion(idx={idx}, dimension={dimension}, ' 'direction={direction})'.format( idx=self.idx, dimension=self.dimension, direction=self.direction))
dm_alchemy-master
dm_alchemy/types/stones_and_potions.py
# Lint as python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Code to convert between unity types and python types.""" import copy import itertools from typing import Any, Dict, Sequence, Tuple from dm_alchemy.protos import alchemy_pb2 from dm_alchemy.protos import hypercube_pb2 from dm_alchemy.types import graphs from dm_alchemy.types import stones_and_potions from dm_alchemy.types import utils import frozendict import numpy as np from dm_alchemy.protos import color_info_pb2 from dm_alchemy.protos import unity_types_pb2 PotionMap = stones_and_potions.PotionMap StoneMap = stones_and_potions.StoneMap AlignedStone = stones_and_potions.AlignedStone PerceivedStone = stones_and_potions.PerceivedStone PerceivedPotion = stones_and_potions.PerceivedPotion LatentStone = stones_and_potions.LatentStone LatentPotion = stones_and_potions.LatentPotion MapsAndGraph = Tuple[PotionMap, StoneMap, graphs.Graph] COLOR_TYPE = alchemy_pb2.PerceptualMappingApplicator.Type.COLOR SIZE_TYPE = alchemy_pb2.PerceptualMappingApplicator.Type.SIZE ROUNDNESS_TYPE = alchemy_pb2.PerceptualMappingApplicator.Type.ROUNDNESS # Colours are defined in AlchemyColors.asset _STONE_COLOURS = frozendict.frozendict({ 'purple': unity_types_pb2.Color( r=0.52156866, g=0.22745098, b=0.6313726, a=1.0), 'blurple': unity_types_pb2.Color( r=0.2608, g=0.2667, b=0.5941, a=1.0), 'blue': unity_types_pb2.Color( r=0.0, g=0.30588236, b=0.5568628, a=1.0) }) _POTION_COLOURS = frozendict.frozendict({ 'green': unity_types_pb2.Color( r=0.24394463, g=0.6911765, b=0.35806578, a=1.0), 'red': unity_types_pb2.Color( r=0.9647059, g=0.015686275, b=0.06666667, a=1.0), 'yellow': unity_types_pb2.Color( r=0.9411765, g=0.84705883, b=0.078431375, a=1.0), 'orange': unity_types_pb2.Color( r=0.9764706, g=0.4, b=0.10980392, a=1.0), 'turquoise': unity_types_pb2.Color( r=0.21176471, g=0.72156864, b=0.7411765, a=1.0), 'pink': unity_types_pb2.Color( r=0.9843137, g=0.43529412, b=0.43529412, a=1.0) }) # This is the order of perceived axes in unity. PERCEIVED_AXIS = (COLOR_TYPE, SIZE_TYPE, ROUNDNESS_TYPE) AXIS_NUMBER = frozendict.frozendict({ a: i for i, a in enumerate(PERCEIVED_AXIS)}) SIZE_NAME_AT_COORD = frozendict.frozendict( {-1: 'small', 0: 'medium', 1: 'large'}) _STONE_SIZES = frozendict.frozendict( {'small': 1.0, 'medium': 1.4, 'large': 1.8}) SIZE_AT_COORD = frozendict.frozendict( {coord: _STONE_SIZES[name] for coord, name in SIZE_NAME_AT_COORD.items()}) _COORD_AT_SIZE = frozendict.frozendict({v: k for k, v in SIZE_AT_COORD.items()}) ROUNDNESS_NAME_AT_COORD = frozendict.frozendict( {-1: 'pointy', 0: 'somewhat pointy', 1: 'round'}) _STONE_ROUNDNESSES = frozendict.frozendict( {'pointy': 0.0, 'somewhat pointy': 0.5, 'round': 1.0}) ROUNDNESS_AT_COORD = frozendict.frozendict( {coord: _STONE_ROUNDNESSES[name] for coord, name in ROUNDNESS_NAME_AT_COORD.items()}) _COORD_AT_ROUNDNESS = frozendict.frozendict({ v: k for k, v in ROUNDNESS_AT_COORD.items()}) # The colour proto is not hashable so convert to a type which is. def colour_proto_to_hashable( colour: unity_types_pb2.Color) -> Tuple[float, float, float, float]: return (round(colour.r, 2), round(colour.g, 2), round(colour.b, 2), round(colour.a, 2)) COLOUR_NAME_AT_COORD = frozendict.frozendict( {-1: 'purple', 0: 'blurple', 1: 'blue'}) COLOUR_AT_COORD = frozendict.frozendict({ coord: _STONE_COLOURS[name] for coord, name in COLOUR_NAME_AT_COORD.items()}) _COORD_AT_COLOUR = frozendict.frozendict( {colour_proto_to_hashable(v): k for k, v in COLOUR_AT_COORD.items()}) POTION_COLOUR_AT_PERCEIVED_POTION = frozendict.frozendict({ PerceivedPotion(0, 1): 'green', PerceivedPotion(0, -1): 'red', PerceivedPotion(1, 1): 'yellow', PerceivedPotion(1, -1): 'orange', PerceivedPotion(2, 1): 'turquoise', PerceivedPotion(2, -1): 'pink', }) _PERCEIVED_POTION_AT_POTION_COLOUR = frozendict.frozendict({ colour_proto_to_hashable(_POTION_COLOURS[v]): k for k, v in POTION_COLOUR_AT_PERCEIVED_POTION.items()}) def get_colour_info( name_and_colour: Tuple[str, unity_types_pb2.Color] ) -> color_info_pb2.ColorInfo: return color_info_pb2.ColorInfo( color=name_and_colour[1], name=name_and_colour[0]) def latent_stone_to_unity( latent_stone: LatentStone) -> hypercube_pb2.HypercubeVertex: return hypercube_pb2.HypercubeVertex( index=latent_stone.index(), coordinates=latent_stone.latent_coords.tolist()) def _unity_to_latent_stone( latent: hypercube_pb2.HypercubeVertex) -> LatentStone: # Use object type to store python ints rather than numpy ints. return LatentStone(np.array([int(coord) for coord in latent.coordinates], dtype=object)) def perceptual_features(perceived_stone: PerceivedStone) -> Dict[str, Any]: return { 'size': SIZE_AT_COORD[perceived_stone.perceived_coords[AXIS_NUMBER[ SIZE_TYPE]]], 'roundness': ROUNDNESS_AT_COORD[perceived_stone.perceived_coords[ AXIS_NUMBER[ROUNDNESS_TYPE]]], 'color': COLOUR_AT_COORD[perceived_stone.perceived_coords[AXIS_NUMBER[ COLOR_TYPE]]], } def to_stone_unity_properties( perceived_stone: PerceivedStone, latent_stone: LatentStone ) -> alchemy_pb2.StoneProperties: """Convert a perceived and latent stone to StoneProperties.""" return alchemy_pb2.StoneProperties( reward=15 if perceived_stone.reward > 2 else perceived_stone.reward, latent=latent_stone_to_unity(latent_stone), **perceptual_features(perceived_stone)) def unity_to_perceived_stone( stone_properties: alchemy_pb2.StoneProperties ) -> PerceivedStone: """Convert StoneProperties to a perceived stone.""" size = _COORD_AT_SIZE[round(stone_properties.size, 1)] roundness = _COORD_AT_ROUNDNESS[round(stone_properties.roundness, 1)] colour = _COORD_AT_COLOUR[colour_proto_to_hashable(stone_properties.color)] # Use numpy object type to store python ints rather than numpy ints. perceived_coords = np.array([0, 0, 0], dtype=float) perceived_coords[AXIS_NUMBER[SIZE_TYPE]] = size perceived_coords[AXIS_NUMBER[ROUNDNESS_TYPE]] = roundness perceived_coords[AXIS_NUMBER[COLOR_TYPE]] = colour latent_stone = _unity_to_latent_stone(stone_properties.latent) return PerceivedStone(latent_stone.reward(), perceived_coords) def _from_stone_unity_properties( stone_properties: alchemy_pb2.StoneProperties, rotation: np.ndarray ) -> Tuple[PerceivedStone, AlignedStone, LatentStone]: """Convert StoneProperties to a perceived and latent stone.""" latent_stone = _unity_to_latent_stone(stone_properties.latent) perceived_stone = unity_to_perceived_stone(stone_properties) aligned_stone = stones_and_potions.align(perceived_stone, rotation) return perceived_stone, aligned_stone, latent_stone def latent_potion_to_unity( latent_potion: LatentPotion) -> hypercube_pb2.EdgeLabel: if latent_potion.latent_dir == 1: direction = hypercube_pb2.EdgeLabel.Direction.POSITIVE else: direction = hypercube_pb2.EdgeLabel.Direction.NEGATIVE return hypercube_pb2.EdgeLabel( dimension_index=latent_potion.latent_dim, direction=direction) def _unity_to_latent_potion( edge_label: hypercube_pb2.EdgeLabel) -> LatentPotion: if edge_label.direction == hypercube_pb2.EdgeLabel.Direction.POSITIVE: latent_dir = 1 else: latent_dir = -1 return LatentPotion( latent_dim=edge_label.dimension_index, latent_dir=latent_dir) def to_potion_unity_properties( perceived_potion: PerceivedPotion, latent_potion: LatentPotion, graph: graphs.Graph ) -> alchemy_pb2.PotionProperties: """Convert a perceived and latent potion and graph to PotionProperties.""" colour_name = POTION_COLOUR_AT_PERCEIVED_POTION[perceived_potion] colour = get_colour_info((colour_name, _POTION_COLOURS[colour_name])) reactions = set() for startnode, endnodes in graph.edge_list.edges.items(): expected_end_coords = copy.deepcopy(startnode.coords) expected_end_coords[latent_potion.latent_dim] = ( startnode.coords[latent_potion.latent_dim] + 2 * latent_potion.latent_dir) expected_end_node = graph.node_list.get_node_by_coords( expected_end_coords) if not expected_end_node: continue if expected_end_node in endnodes: reactions.add((startnode.idx, expected_end_node.idx)) reactions = [alchemy_pb2.PotionReaction(from_stone_index=from_stone, to_stone_index=to_stone) for from_stone, to_stone in reactions] sorted_reactions = sorted( reactions, key=lambda reaction: reaction.from_stone_index) return alchemy_pb2.PotionProperties( label=latent_potion_to_unity(latent_potion), reward=0, color=colour, glow_color=colour, reactions=sorted_reactions) def unity_to_perceived_potion( potion: alchemy_pb2.PotionProperties ) -> PerceivedPotion: return _PERCEIVED_POTION_AT_POTION_COLOUR[ colour_proto_to_hashable(potion.color.color)] def _potions_from_potion_unity_properties( potion: alchemy_pb2.PotionProperties ) -> Tuple[PerceivedPotion, LatentPotion]: """Convert the unity representation to a perceived and latent potion.""" return (unity_to_perceived_potion(potion), _unity_to_latent_potion(potion.label)) def graphs_from_potion_unity_properties( potions: Sequence[alchemy_pb2.PotionProperties]) -> graphs.Graph: """Convert a sequence of PotionProperties to a Graph.""" node_list = graphs.all_nodes_in_graph() edge_list = graphs.EdgeList() for i, potion in enumerate(potions): _, latent = _potions_from_potion_unity_properties(potion) utils_potion = stones_and_potions.Potion( i, latent.latent_dim, latent.latent_dir) for reaction in potion.reactions: edge_list.add_edge( node_list.get_node_by_idx(reaction.from_stone_index), node_list.get_node_by_idx(reaction.to_stone_index), utils_potion) return graphs.Graph(node_list, edge_list) def to_unity_chemistry( chemistry: utils.Chemistry ) -> Tuple[alchemy_pb2.Chemistry, alchemy_pb2.RotationMapping]: """Convert from python types to unity Chemistry object.""" # Latent stones and potions are always in the same places. latent_stones = stones_and_potions.possible_latent_stones() latent_potions = stones_and_potions.possible_latent_potions() # Apply the dimension swapping map between latent stones in unity and latent # stones in python (see from_unity_chemistry for more explanation). python_to_unity = PythonToUnityDimMap(chemistry) python_latent_stones = [python_to_unity.apply_to_stone(latent_stone) for latent_stone in latent_stones] python_latent_potions = [python_to_unity.apply_to_potion(latent_potion) for latent_potion in latent_potions] # Apply the stone map to them to get perceptual stones. aligned_stones = [chemistry.stone_map.apply_inverse(stone) for stone in python_latent_stones] perceived_stones = [ stones_and_potions.unalign(stone, chemistry.rotation) for stone in aligned_stones] unity_stones = [to_stone_unity_properties(perceived, latent) for perceived, latent in zip(perceived_stones, latent_stones)] # Apply the potion map to them to get perceptual potions. perceived_potions = [chemistry.potion_map.apply_inverse(potion) for potion in python_latent_potions] unity_potions = [ to_potion_unity_properties(perceived, latent, python_to_unity.graph) for perceived, latent in zip(perceived_potions, latent_potions)] unity_chemistry = alchemy_pb2.Chemistry( stones=unity_stones, potions=unity_potions) rotation_mapping = rotation_to_unity(python_to_unity.rotation) return unity_chemistry, rotation_mapping def rotation_from_unity( rotation_mapping: alchemy_pb2.RotationMapping ) -> np.ndarray: """Get the transformation to undo rotation from unity.""" # Rotate back angles = [-int(rotation_mapping.rotation_angles.x), -int(rotation_mapping.rotation_angles.y), -int(rotation_mapping.rotation_angles.z)] return stones_and_potions.rotation_from_angles(angles) def rotation_to_unity(rotation: np.ndarray) -> alchemy_pb2.RotationMapping: """Convert the transformation to undo rotation to unity.""" angles = stones_and_potions.rotation_to_angles(rotation) return alchemy_pb2.RotationMapping(rotation_angles=unity_types_pb2.Vector3( **{axis: -round(a) for axis, a in zip('xyz', angles)})) def potion_map_from_potions( latent_potions: Sequence[LatentPotion], perceived_potions: Sequence[PerceivedPotion] ) -> PotionMap: """Calculate potion map relating latent and perceived potions.""" dimension_map = [-1, -1, -1] direction_map = [0, 0, 0] for perceived_potion, latent_potion in zip(perceived_potions, latent_potions): dimension_map[perceived_potion.perceived_dim] = latent_potion.latent_dim if latent_potion.latent_dir == perceived_potion.perceived_dir: direction_map[latent_potion.latent_dim] = 1 else: direction_map[latent_potion.latent_dim] = -1 return PotionMap(dim_map=dimension_map, dir_map=direction_map) def _get_aligned_coords_matching_latent( python_stones: Sequence[Tuple[PerceivedStone, AlignedStone, LatentStone]], latent_coords: Sequence[int] ) -> np.ndarray: return [aligned_stone.aligned_coords.astype(int) for _, aligned_stone, latent_stone in python_stones if latent_stone.latent_coords.tolist() == latent_coords][0] def find_dim_map_and_stone_map( chemistry: utils.Chemistry ) -> Tuple[np.ndarray, StoneMap, np.ndarray]: """Find a dimension map and stone map which map latent stones to perceived.""" latent_stones = stones_and_potions.possible_latent_stones() aligned_stones = [chemistry.stone_map.apply_inverse(stone) for stone in latent_stones] perceived_stones = [stones_and_potions.unalign(stone, chemistry.rotation) for stone in aligned_stones] for dim_map in [np.eye(3, dtype=int)[p, :] for p in itertools.permutations( [0, 1, 2])]: for stone_map in stones_and_potions.possible_stone_maps(): sm = np.diag(stone_map.latent_pos_dir.astype(int)) # Since we do rotation before reflection in this case we must allow # rotation forwards and backwards to get all cases. # Because of the scaling this is not just the inverse matrix. inverse_rotation = stones_and_potions.rotation_from_angles( [-a for a in stones_and_potions.rotation_to_angles( chemistry.rotation)]) for rotation in [chemistry.rotation, inverse_rotation]: all_match = True for ls, ps in zip(latent_stones, perceived_stones): new_ls = np.matmul(dim_map, ls.latent_coords.astype(int)) ps_prime = np.matmul(sm, np.matmul(np.linalg.inv(rotation), new_ls)) if not all(abs(a - b) < 0.0001 for a, b in zip( ps_prime, ps.perceived_coords.astype(int))): all_match = False break if all_match: return np.linalg.inv(dim_map), stone_map, rotation assert False, ( 'No dimension map and stone map takes latent stones to the passed ' 'perceived stones with the passed rotation.') def _apply_dim_map_to_stone( dim_map: np.ndarray, latent_stone: LatentStone ) -> LatentStone: coords = np.rint(np.matmul( dim_map, latent_stone.latent_coords.astype(int))) return LatentStone(np.array([int(c) for c in coords], object)) def _apply_dim_map_to_potion( dim_map: np.ndarray, latent_potion: LatentPotion ) -> LatentPotion: return LatentPotion( np.where(dim_map[latent_potion.latent_dim, :])[0][0], latent_potion.latent_dir) def _apply_dim_map_to_graph( dim_map: np.ndarray, graph: graphs.Graph ) -> graphs.Graph: """Swap latent dimensions in graph.""" edge_list = graphs.EdgeList() for start_node, end_nodes in graph.edge_list.edges.items(): start_coords = np.matmul(dim_map, np.array(start_node.coords)).tolist() new_start_node = graph.node_list.get_node_by_coords(start_coords) for end_node, edge in end_nodes.items(): end_coords = np.matmul(dim_map, np.array(end_node.coords)).tolist() new_end_node = graph.node_list.get_node_by_coords(end_coords) new_potion = stones_and_potions.Potion( edge[1].idx, np.where(dim_map[edge[1].dimension, :])[0][0], edge[1].direction) edge_list.add_edge(new_start_node, new_end_node, new_potion) return graphs.Graph(graph.node_list, edge_list) class PythonToUnityDimMap: """Convert from python method of mapping to unity method.""" def __init__(self, chemistry: utils.Chemistry): self._chemistry = chemistry self._dim_map, self.stone_map, self.rotation = find_dim_map_and_stone_map( chemistry) self.graph = self._apply_to_graph(self._chemistry.graph) self.potion_map = self._apply_to_potion_map(self._chemistry.potion_map) def apply_to_stone(self, latent_stone: LatentStone) -> LatentStone: return _apply_dim_map_to_stone(self._dim_map, latent_stone) def apply_to_potion(self, latent_potion: LatentPotion) -> LatentPotion: return _apply_dim_map_to_potion(self._dim_map, latent_potion) def _apply_to_graph(self, graph: graphs.Graph) -> graphs.Graph: return _apply_dim_map_to_graph(self._dim_map, graph) def _apply_to_potion_map(self, potion_map: PotionMap) -> PotionMap: latent_potions = stones_and_potions.possible_latent_potions() new_latent_potions = [self.apply_to_potion(latent_potion) for latent_potion in latent_potions] perceived_potions = [potion_map.apply_inverse(latent_potion) for latent_potion in latent_potions] return potion_map_from_potions(new_latent_potions, perceived_potions) def from_unity_chemistry( chemistry: alchemy_pb2.Chemistry, rotation_mapping: alchemy_pb2.RotationMapping ) -> utils.Chemistry: """Convert from unity Chemistry object to corresponding python types. Args: chemistry: A chemistry object received from the alchemy unity environment. rotation_mapping: A rotation mapping object received from the alchemy unity environment. Returns: A PotionMap describing the transformation from potion perceptual space to latent space. A StoneMap describing the transformation from stone aligned perceptual space to latent space. A Graph describing the available edges in latent space. A np.ndarray describing the rotation from stone aligned perceptual space to stone perceptual space. """ # In unity the latent stones are (possibly) rotated and then "perceptual # mapping applicators" are applied to say how this is represented on screen, # e.g. -1 in the first latent dimension is purple and +1 is blue. # By only considering 7 possible rotations (0 rotation and 45 degrees # clockise or anticlockwise about each axis) and just considering in what # direction perceptual attributes change, when this is combined with the # mapping of potion pairs to latent space dimensions and assigning a direction # to that potion pair, we get all mappings which are 45 degrees offset on one # axis (note that latent variables have the same effect on the reward so # swapping latent space dimensions has no effect). We get duplicates because # after rotating, one dimension of the max reward stone will have value 0 so # reflecting about this does not change the value. However, the configuration # is such that the task distribution is as it would be if we avoided # duplicates. # An alternative way to generate all these mappings without the duplicates # would be to take the stones latent coordinates and first apply a mapping # which changes the positive direction and then rotate these positions by 45 # degrees clockwise (excluding anticlockwise rotations). # It is easier to run algorithms like the ideal observer assuming the second # breakdown of the mapping because the rotation does not effect the best # action to take so we can take the perceived coordinates and undo the # rotation using any plausible rotation (even if it is not the correct one) # and then maintain a belief state over the remaining aspects of the # chemistry and update the belief state if we find the rotation was wrong. # We can switch between these equivalent breakdowns by possibly rotating in # the opposite direction. # From unity we get # perceived_stone = sm * r * latent_stone # where r rotates plus or minus 45 degrees and sm changes directions, we want # perceived_stone = r_prime * sm * latent_stone # where r_prime is rotating clockwise about the axis that r rotates around. rotation = rotation_from_unity(rotation_mapping) abs_rotation = stones_and_potions.rotation_from_angles( [-abs(a) for a in stones_and_potions.rotation_to_angles(rotation)]) python_stones = [_from_stone_unity_properties(stone, abs_rotation) for stone in chemistry.stones] python_potions = [_potions_from_potion_unity_properties(potion) for potion in chemistry.potions] graph = graphs_from_potion_unity_properties(chemistry.potions) # So sm_prime is diagonal with elements in {-1, 1} and dim_map is such that # the sum of each row and each column is 1 with non zero elements 1. # Let a := sm_prime * dim_map # a := [a11 a12 a13] # [a21 a22 a23] # [a31 a32 a33] # a * [1, 1, 1] = [a11 + a12 + a13, a21 + a22 + a23, a31 + a32 + a33] sum_of_each_row = _get_aligned_coords_matching_latent( python_stones, [1, 1, 1]) stone_map = StoneMap(pos_dir=sum_of_each_row) sm_prime = np.diag(sum_of_each_row) # a * [1, 1, 1] - a * [-1, 1, 1] = 2 * [a11, a21, a31] first_column = ((sum_of_each_row - _get_aligned_coords_matching_latent( python_stones, [-1, 1, 1]))/2).astype(int) second_column = ((sum_of_each_row - _get_aligned_coords_matching_latent( python_stones, [1, -1, 1]))/2).astype(int) third_column = ((sum_of_each_row - _get_aligned_coords_matching_latent( python_stones, [1, 1, -1]))/2).astype(int) a = np.hstack((first_column.reshape((3, 1)), second_column.reshape((3, 1)), third_column.reshape((3, 1)))) dim_map = np.rint(np.matmul(np.linalg.inv(sm_prime), a)).astype(int) latent_stones = [latent_stone for _, _, latent_stone in python_stones] aligned_stones = [aligned_stone for _, aligned_stone, _ in python_stones] latent_stones = [_apply_dim_map_to_stone(dim_map, latent_stone) for latent_stone in latent_stones] latent_potions = [latent_potion for _, latent_potion in python_potions] latent_potions = [_apply_dim_map_to_potion(dim_map, latent_potion) for latent_potion in latent_potions] perceived_potions = [perceived_potion for perceived_potion, _ in python_potions] graph = _apply_dim_map_to_graph(dim_map, graph) for aligned_stone, latent_stone in zip(aligned_stones, latent_stones): assert stone_map.apply(aligned_stone) == latent_stone, ( 'Applying the stone map to the aligned stone did not give the ' 'expected latent stone.\n{aligned_stone}\n{latent_stone}\n' '{stone_map}\n{chemistry}'.format( aligned_stone=aligned_stone, latent_stone=latent_stone, stone_map=stone_map, chemistry=chemistry)) potion_map = potion_map_from_potions(latent_potions, perceived_potions) for perceived_potion, latent_potion in zip(perceived_potions, latent_potions): assert potion_map.apply(perceived_potion) == latent_potion, ( 'Applying the potion map to the perceived potion did not give the ' 'expected latent potion.{perceived_potion}\n{latent_potion}\n' '{potion_map}\n{chemistry}'.format( perceived_potion=perceived_potion, latent_potion=latent_potion, potion_map=potion_map, chemistry=chemistry)) return utils.Chemistry(potion_map, stone_map, graph, abs_rotation)
dm_alchemy-master
dm_alchemy/types/unity_python_conversion.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Composite alchemy types.""" import dataclasses import enum import math from typing import Callable, Dict, List, Mapping, Optional, Sequence, Set, Union from dm_alchemy.ideal_observer import precomputed_maps from dm_alchemy.types import graphs from dm_alchemy.types import helpers from dm_alchemy.types import stones_and_potions import numpy as np @dataclasses.dataclass class Chemistry: """The potion map, stone map and graph which together form a chemistry.""" potion_map: stones_and_potions.PotionMap stone_map: stones_and_potions.StoneMap graph: graphs.Graph rotation: np.ndarray def __eq__(self, other: 'Chemistry') -> bool: return (self.potion_map == other.potion_map and self.stone_map == other.stone_map and graphs.constraint_from_graph(self.graph) == graphs.constraint_from_graph(other.graph) and stones_and_potions.rotations_equal(self.rotation, other.rotation)) class TrialItems: """Stones and potions in a single trial. We have 2 different types representing the latent information about stones and potions. We accept either and convert so that downstream functions do not have to worry about which type they are dealing with. """ def __init__( self, potions: Union[Sequence[stones_and_potions.Potion], Sequence[stones_and_potions.LatentPotion]], stones: Union[Sequence[stones_and_potions.Stone], Sequence[stones_and_potions.LatentStone]]): self.potions = potions if potions and isinstance(potions[0], stones_and_potions.LatentPotion): latent_potions: Sequence[stones_and_potions.LatentPotion] = potions self.potions = [ stones_and_potions.Potion(i, latent.latent_dim, latent.latent_dir) for i, latent in enumerate(latent_potions)] self.stones = stones if stones and isinstance(stones[0], stones_and_potions.LatentStone): latent_stones: Sequence[stones_and_potions.LatentStone] = stones self.stones = [ stones_and_potions.Stone(i, latent.latent_coords) for i, latent in enumerate(latent_stones)] @property def num_stones(self) -> int: return len(self.stones) @property def num_potions(self) -> int: return len(self.potions) def __repr__(self) -> str: return 'TrialItems(potions={potions}, stones={stones})'.format( potions=repr(self.potions), stones=repr(self.stones)) def __eq__(self, other: 'TrialItems') -> bool: return self.potions == other.potions and self.stones == other.stones class EpisodeItems: """Initial stones and potions for each trial in an episode.""" def __init__( self, potions: Union[Sequence[Sequence[stones_and_potions.Potion]], Sequence[Sequence[stones_and_potions.LatentPotion]]], stones: Union[Sequence[Sequence[stones_and_potions.Stone]], Sequence[Sequence[stones_and_potions.LatentStone]]]): self.trials = [TrialItems(potions=potions, stones=stones) for potions, stones in zip(potions, stones)] @property def num_trials(self) -> int: return len(self.trials) @property def stones(self) -> List[List[stones_and_potions.Stone]]: return [trial.stones for trial in self.trials] @property def potions(self) -> List[List[stones_and_potions.Potion]]: return [trial.potions for trial in self.trials] def __repr__(self) -> str: return 'EpisodeItems(trials={trials})'.format(trials=repr(self.trials)) def __eq__(self, other: 'EpisodeItems') -> bool: return self.trials == other.trials @dataclasses.dataclass class SymbolicBot: run: bool = False trackers: Optional[List[str]] = None bot_running_trackers: Optional[List[str]] = None @dataclasses.dataclass class SymbolicBots: """The symbolic bots we can run on alchemy.""" ideal_observer: SymbolicBot = dataclasses.field(default_factory=SymbolicBot) ideal_explorer: SymbolicBot = dataclasses.field(default_factory=SymbolicBot) random_action: SymbolicBot = dataclasses.field(default_factory=SymbolicBot) search_oracle: SymbolicBot = dataclasses.field(default_factory=SymbolicBot) agent_symbolic: SymbolicBot = dataclasses.field(default_factory=SymbolicBot) class ElementContent(enum.IntEnum): """The type of content for each element of a chemistry observation.""" GROUND_TRUTH = 0 BELIEF_STATE = 1 UNKNOWN = 2 class ElementType(enum.IntEnum): """The type of content for each element of a chemistry observation.""" GRAPH = 0 POTION_MAP = 1 STONE_MAP = 2 ROTATION = 3 class GroupInChemistry: """A set of dimensions in the chemistry observation.""" def __init__( self, group: Mapping[ElementType, Set[int]], distr: Sequence[float]): self.group = group self.distr = distr if len(self.distr) != len(ElementContent): raise ValueError('Must provide a probability for each content type.') if abs(sum(self.distr) - 1.0) > 0.0001: raise ValueError('Elements of distr must sum to 1.') def __repr__(self) -> str: return ('GroupInChemistry(group={group}, ' 'distr={distr})'.format( group=repr(self.group), distr=repr(self.distr))) GetObsFn = Callable[[], List[float]] class Element: """Details of how we see an element of the chemistry in our observations.""" def __init__( self, element_type: ElementType, expected_obs_length: int, present: bool = True): """Constructs an element which forms part of the observed chemistry. Args: element_type: Specifies which part of the chemistry this element refers to. expected_obs_length: The length of this observation element. present: Whether this element is present in the observation. """ self.element_type = element_type self.expected_obs_length = expected_obs_length self.present = present def all_dims(self) -> Set[int]: return set(range(self.expected_obs_length)) def form_observation( self, dimensions: Mapping[ElementContent, Set[int]], get_content_obs: Mapping[ElementContent, GetObsFn]) -> List[float]: """Gets the observation merging different content types. Args: dimensions: Mapping from different types of content to sets of dimensions which hold that content type. get_content_obs: Mapping from content types to functions which get the observation of that content type. Returns: The observation with different content type at different dimensions as specified. """ if not self.present: return [] # Start with unknown obs obs = get_content_obs[ElementContent.UNKNOWN]() for content in [ElementContent.GROUND_TRUTH, ElementContent.BELIEF_STATE]: # If any of the groups require this type of content then get it and fill # in the dimensions for these groups. if content in dimensions and dimensions[content]: content_obs = get_content_obs[content]() for i in dimensions[content]: obs[i] = content_obs[i] return obs def __repr__(self) -> str: return ('Element(element_type={element_type}, ' 'expected_obs_length={expected_obs_length}, ' 'present={present})'.format( element_type=repr(self.element_type), expected_obs_length=repr(self.expected_obs_length), present=repr(self.present))) class PotionMapElement(Element): """How we see the potion map element of the chemistry in our observations.""" def __init__(self, **kwargs): super().__init__( element_type=ElementType.POTION_MAP, expected_obs_length=9, **kwargs) class StoneMapElement(Element): """How we see the stone map element of the chemistry in our observations.""" def __init__(self, **kwargs): super().__init__( element_type=ElementType.STONE_MAP, expected_obs_length=3, **kwargs) class GraphElement(Element): """How we see the graph element of the chemistry in our observations.""" def __init__(self, **kwargs): super().__init__( element_type=ElementType.GRAPH, expected_obs_length=12, **kwargs) class RotationElement(Element): """How we see the rotation element of the chemistry in our observations.""" def __init__(self, **kwargs): super().__init__( element_type=ElementType.ROTATION, expected_obs_length=4, **kwargs) @dataclasses.dataclass class GetChemistryObsFns: """Functions to get the observations for different content types for each element.""" potion_map: Mapping[ElementContent, GetObsFn] stone_map: Mapping[ElementContent, GetObsFn] graph: Mapping[ElementContent, GetObsFn] rotation: Mapping[ElementContent, GetObsFn] def element( self, element_type: ElementType ) -> Mapping[ElementContent, GetObsFn]: if element_type == ElementType.POTION_MAP: return self.potion_map if element_type == ElementType.STONE_MAP: return self.stone_map if element_type == ElementType.ROTATION: return self.rotation return self.graph _EPSILON = 0.0001 class ChemistrySeen: """What elements of the chemistry we see in our observations.""" def __init__( self, groups: Optional[Sequence[GroupInChemistry]] = None, content: Optional[ElementContent] = None, potion_map: Optional[PotionMapElement] = None, stone_map: Optional[StoneMapElement] = None, graph: Optional[GraphElement] = None, rotation: Optional[RotationElement] = None, precomputed: Optional[Union[str, precomputed_maps.PrecomputedMaps]] = None ): """Returns a SeeChemistry object. Args: groups: Groups of dimensions which get the same content each episode. content: The type of content for the chemistry (if all dimensions are in the same group). potion_map: Information about the potion map element of the chemistry observation. stone_map: Information about the stone map element of the chemistry observation. graph: Information about the graph element of the chemistry observation. rotation: Information about the rotation element of the chemistry observation. precomputed: If any of the elements involve computing the belief state then precomputed should be either a set of precomputed maps or the level name for which we can load the precomputed maps. """ self.potion_map = potion_map or PotionMapElement() self.stone_map = stone_map or StoneMapElement() self.graph = graph or GraphElement() self.rotation = rotation or RotationElement() self.precomputed = precomputed if groups: self.groups = groups if content is not None: raise ValueError('content ignored if groups is passed in.') else: # If groups is not passed we set one group for all elements and give all # the probability to the content being unknown. distr = [0.0 for content in ElementContent] if content is None: content = ElementContent.UNKNOWN distr[content] = 1.0 self.groups = [GroupInChemistry( {element_type: self.element(element_type).all_dims() for element_type in ElementType}, distr)] def element(self, element_type: ElementType) -> Element: """Gets info about the specified element of the chemistry.""" if element_type == ElementType.POTION_MAP: return self.potion_map if element_type == ElementType.STONE_MAP: return self.stone_map if element_type == ElementType.ROTATION: return self.rotation return self.graph def sample_contents(self) -> List[ElementType]: """Samples content types for each content group.""" contents = [] for group in self.groups: contents.append(np.where(np.random.multinomial(1, group.distr))[0][0]) return contents def uses_content_type(self, content: ElementContent) -> bool: """Could the chemistry contain the specified content.""" return any(group.distr[content] > _EPSILON for group in self.groups) def dimensions_for_content( self, contents: Sequence[ElementContent], element_type: ElementType ) -> Dict[ElementContent, Set[int]]: """Given sampled group contents constructs map from content type to dimensions.""" dimensions = {content: set() for content in ElementContent} for group_index, group_content in enumerate(contents): group = self.groups[group_index].group if element_type in group: dimensions[group_content] |= group[element_type] return dimensions def form_observation( self, contents: Sequence[ElementContent], get_obs: GetChemistryObsFns ) -> List[float]: """Forms an observation with the correct content type at each dimension.""" obs = [] for element_type in ElementType: dimensions = (self.dimensions_for_content(contents, element_type) if contents else {}) obs.extend(self.element(element_type).form_observation( dimensions, get_obs.element(element_type))) return obs def obs_size(self) -> int: """Returns the size of the chemistry observation.""" # length of observation depends on number of axes num_axes = stones_and_potions.get_num_axes() vector_size = 0 if self.stone_map.present: # stone pos_dir vector_size += num_axes if self.potion_map.present: # potion dir_map vector_size += num_axes # potion dim_map # TODO(b/173787297): make dim_map factorized; uncomment below # vector_size += num_axes * num_axes vector_size += math.factorial(num_axes) if self.graph.present: # edges in a cube of dimension num_axes vector_size += graphs.num_edges_in_cube() if self.rotation.present: vector_size += len(stones_and_potions.possible_rotations()) return vector_size def initialise_precomputed(self) -> None: """Loads precomputed maps if necessary.""" if isinstance(self.precomputed, str): # For the purpose of seeing the chemistry precomputed with and without # shaping are equivalent and we do not store precomputed for levels # with shaping so just use the precomputed for the level without precomputed = self.precomputed.replace('_w_shaping', '').replace( '_shaping', '') self.precomputed = precomputed_maps.load_from_level_name(precomputed) if not self.precomputed: raise ValueError( 'Could not load precomputed maps for ' + self.precomputed + '.') def __repr__(self) -> str: return ('SeeChemistry(groups={groups}, potion_map={potion_map}, ' 'stone_map={stone_map}, graph={graph}, rotation={rotation}, ' 'precomputed={precomputed})'.format( groups=repr(self.groups), potion_map=repr(self.potion_map), stone_map=repr(self.stone_map), graph=repr(self.graph), rotation=repr(self.rotation), precomputed=repr(self.precomputed))) class SlotBasedAction: """Represents an action using the stone and potion slot indices.""" def __init__( self, end_trial: bool = False, no_op: bool = False, stone_ind: Optional[int] = None, cauldron: bool = False, potion_ind: Optional[int] = None): self.end_trial = end_trial self.no_op = no_op self.cauldron = cauldron self.stone_ind = stone_ind self.potion_ind = potion_ind def _valid(self) -> bool: """Action is valid if exactly one of these things is true.""" put_stone_in_cauldron = self.cauldron and self.using_stone put_stone_in_potion = self.using_potion and self.using_stone return [self.end_trial, self.no_op, put_stone_in_cauldron, put_stone_in_potion].count(True) == 1 @property def using_stone(self) -> bool: return self.stone_ind is not None @property def using_potion(self) -> bool: return self.potion_ind is not None def __repr__(self): return ( 'SlotBasedAction(end_trial={end_trial}, no_op={no_op}, ' 'stone_ind={stone_ind}, cauldron={cauldron}, potion_ind={potion_ind})'. format(end_trial=self.end_trial, no_op=self.no_op, stone_ind=self.stone_ind, cauldron=self.cauldron, potion_ind=self.potion_ind)) class TypeBasedAction: """Represents an action using the stone and potion types.""" def __init__( self, end_trial: bool = False, no_op: bool = False, stone: Optional[stones_and_potions.PerceivedStone] = None, cauldron: bool = False, potion: Optional[stones_and_potions.PerceivedPotion] = None): self.end_trial = end_trial self.no_op = no_op self.cauldron = cauldron self.perceived_stone = stone self.perceived_potion = potion def _valid(self) -> bool: """Action is valid if exactly one of these things is true.""" put_stone_in_cauldron = self.cauldron and self.using_stone put_stone_in_potion = self.using_potion and self.using_stone return [self.end_trial, self.no_op, put_stone_in_cauldron, put_stone_in_potion].count(True) == 1 @property def using_stone(self) -> bool: return self.perceived_stone is not None @property def using_potion(self) -> bool: return self.perceived_potion is not None def __repr__(self): return ( 'TypeBasedAction(end_trial={end_trial}, no_op={no_op}, stone={stone}, ' 'cauldron={cauldron}, potion={potion})'.format( end_trial=self.end_trial, no_op=self.no_op, stone=self.perceived_stone, cauldron=self.cauldron, potion=self.perceived_potion)) def type_based_action_from_ints( aligned_stone_index: stones_and_potions.AlignedStoneIndex, perceived_potion_index: stones_and_potions.PerceivedPotionIndex, rotation: np.ndarray ) -> TypeBasedAction: """Converts from int specification of action to type based.""" if aligned_stone_index == helpers.END_TRIAL: return TypeBasedAction(end_trial=True) perceived_stone = stones_and_potions.unalign( stones_and_potions.aligned_stone_from_index(aligned_stone_index), rotation) if perceived_potion_index == stones_and_potions.CAULDRON: return TypeBasedAction(stone=perceived_stone, cauldron=True) perceived_potion = stones_and_potions.perceived_potion_from_index( perceived_potion_index) return TypeBasedAction(stone=perceived_stone, potion=perceived_potion)
dm_alchemy-master
dm_alchemy/types/utils.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Unpack events from the environment.""" import copy from typing import List, Sequence, Tuple from dm_alchemy.protos import alchemy_pb2 from dm_alchemy.types import stones_and_potions from dm_alchemy.types import unity_python_conversion from dm_alchemy.protos import events_pb2 def unpack_chemistry_and_rotation( event: events_pb2.WorldEvent ) -> Tuple[alchemy_pb2.Chemistry, alchemy_pb2.RotationMapping]: chem_created = alchemy_pb2.ChemistryCreated() event.detail.Unpack(chem_created) return chem_created.chemistry, chem_created.rotation_mapping def unpack_potion_used(event: events_pb2.WorldEvent) -> Tuple[int, int]: potion_used = alchemy_pb2.PotionUsed() event.detail.Unpack(potion_used) return potion_used.potion_instance_id, potion_used.stone_instance_id def unpack_stone_used(event: events_pb2.WorldEvent) -> int: stone_used = alchemy_pb2.StoneUsed() event.detail.Unpack(stone_used) return stone_used.stone_instance_id def potions_used_on_step( events: Sequence[events_pb2.WorldEvent] ) -> List[Tuple[int, int]]: return [unpack_potion_used(event) for event in events if 'Alchemy/PotionUsed' in event.name] def stones_used_on_step(events: Sequence[events_pb2.WorldEvent]) -> List[int]: return [unpack_stone_used(event) for event in events if 'Alchemy/StoneUsed' in event.name] def get_stones( creation_events: Sequence[events_pb2.WorldEvent] ) -> List[Tuple[stones_and_potions.PerceivedStone, int]]: """Gets a list of Stone objects from creation events.""" stones = [] for event in creation_events: if 'StoneCreated' in event.name: stone_event = alchemy_pb2.StoneCreated() event.detail.Unpack(stone_event) perceived_stone = unity_python_conversion.unity_to_perceived_stone( stone_event.stone_properties) stones.append((perceived_stone, stone_event.stone_instance_id)) return stones def get_potions( creation_events: Sequence[events_pb2.WorldEvent] ) -> List[Tuple[stones_and_potions.PerceivedPotion, int]]: """Gets a list of Potion objects from creation events.""" potions = [] for event in creation_events: if 'PotionCreated' in event.name: potion_event = alchemy_pb2.PotionCreated() event.detail.Unpack(potion_event) perceived_potion = unity_python_conversion.unity_to_perceived_potion( potion_event.potion_properties) potions.append((perceived_potion, potion_event.potion_instance_id)) return potions def get_bottlenecks_and_rotation( creation_events: Sequence[events_pb2.WorldEvent] ) -> Tuple[alchemy_pb2.Chemistry, alchemy_pb2.RotationMapping]: """Gets the chemistry constraints from creation_events.""" # search through trajectory for ChemistryCreated event chemistry_events = [] for event in creation_events: if 'ChemistryCreated' in event.name: chem_event = alchemy_pb2.ChemistryCreated() event.detail.Unpack(chem_event) chemistry_events.append(chem_event) assert chemistry_events, 'Chemistry not found' return chemistry_events[0].chemistry, chemistry_events[0].rotation_mapping def events_per_trial( trajectory: Sequence[Sequence[events_pb2.WorldEvent]] ) -> List[List[events_pb2.WorldEvent]]: """Split the events for the trajectory by trial.""" per_trial_events = [] trial_events = [] for step in trajectory: for event in step: if 'TrialEnded' in event.name: trial_events = [] elif 'TrialStarted' in event.name: per_trial_events.append(copy.deepcopy(trial_events)) else: trial_events.append(event) return per_trial_events
dm_alchemy-master
dm_alchemy/types/event_unpacking.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Types and functions for graphs in alchemy.""" import collections import copy import itertools from typing import Any, Dict, List, Mapping, MutableSequence, Optional, Sequence, Set, Tuple, Union import dataclasses from dm_alchemy.types import helpers from dm_alchemy.types import stones_and_potions import numpy as np NO_EDGE = 0 KNOWN_EDGE = 1 _POSS_EDGE_VALS = [helpers.UNKNOWN, NO_EDGE, KNOWN_EDGE] _EDGE_VAL_INDICES = {edge_val: ind for ind, edge_val in enumerate(_POSS_EDGE_VALS)} # TODO(b/173785778): This information is contained in stones_and_potions. Remove # this copy. _TOTAL_VERTICES = 8 def num_edges_in_cube() -> int: num_axes = stones_and_potions.get_num_axes() assert num_axes >= 1 # pytype cannot tell that num edges is an int because this depends on num axes # being >= 1 so we tell it that it is an int. num_edges = num_axes * 2 ** (num_axes - 1) # type: int return num_edges def edge_val_to_index(edge_val: int) -> int: return _EDGE_VAL_INDICES[edge_val] def index_to_edge_val(ind: int) -> int: return _POSS_EDGE_VALS[ind] # Each constraint is a list with 3 entries, 1 for each axis in latent space. # For axis i the entry is a list of 3 strings which can be either 1, -1, * or X. # The value in string j shows the value that a stone must have for its jth # latent space dimension to move in axis i (a * means any value). # When i == j the string is always an X (this just helps with readability so you # can see which axis is being moved along). # For example the constraint [['X', '*', '1'], ['*', 'X', '*'], ['*', '*', 'X']] # means that a stone can move on the y axis regardless of the value of x and z, # and similarly can move on the z axis regardless of the value of x and y, but # can only move on the x axis if z = 1, but y can be any value. Constraint = List[List[str]] # go from topology to nodes class Node: """A node within a graph, for calculating shortest paths.""" def __init__(self, idx: int, coords: Sequence[int]): self.idx = idx self.coords = coords def __str__(self) -> str: return 'Node ' + str(self.idx) + '(' + str(self.coords) + ')' def __hash__(self) -> int: return hash((self.idx, tuple(self.coords))) def __lt__(self, other) -> bool: return (self.idx, tuple(self.coords)) < (other.idx, tuple(other.coords)) class NodeList: """A list of nodes.""" def __init__(self, nodes: MutableSequence[Node] = None): if nodes is None: self.nodes = [] else: self.nodes = nodes def add_node(self, node: Node) -> None: self.nodes.append(node) def remove(self, node: Node) -> None: self.nodes.remove(node) def _get_node_by(self, feature: str, value: Any) -> Optional[Node]: matching_nodes = [n for n in self.nodes if getattr(n, feature) == value] if matching_nodes: assert len(matching_nodes) == 1, 'There should only be 1 node with ' return matching_nodes[0] return None def get_node_by_idx(self, idx: int) -> Optional[Node]: return self._get_node_by('idx', idx) def get_node_by_coords(self, coords: Sequence[int]) -> Optional[Node]: return self._get_node_by('coords', coords) def filter_nodes(self, dimension: int, value: int) -> 'NodeList': # Return a list of nodes filtered by coordinates on a specific dimension return NodeList([n for n in self.nodes if n.coords[dimension] == value]) def get_highest_value_node( self, reward_weights: stones_and_potions.RewardWeights ) -> Node: reward_vals = self.get_node_values(reward_weights) return self.nodes[np.argmax(reward_vals)] def get_node_values( self, reward_weights: stones_and_potions.RewardWeights ) -> List[int]: return [reward_weights(n.coords) for n in self.nodes] def get_nodes_by_value( self, reward_weights: stones_and_potions.RewardWeights, value: int ) -> List[Node]: reward_vals = self.get_node_values(reward_weights) return [n for (v, n) in zip(reward_vals, self.nodes) if v == value] def __hash__(self) -> int: return hash(tuple(self.nodes)) Edge = Tuple[int, Optional[stones_and_potions.Potion]] ConnectedComponent = Set[Node] class EdgeList: """An edge list that contains connections between nodes.""" def __init__(self): self.edges: Dict[Node, Dict[Node, Edge]] = dict() # only gets weakly connected components self._connected_components: List[ConnectedComponent] = [] def __str__(self) -> str: to_print: List[str] = [] for k, v in self.edges.items(): to_print.append('From') to_print.append(str(k)) for k2, v2 in v.items(): to_print.extend(['to', str(k2), 'weight', str(v2[0]), 'using potion', str(v2[1]), '\n']) return ' '.join(to_print) def add_edge( self, startnode: Node, endnode: Node, potion: Optional[stones_and_potions.Potion] = None, weight: int = 1) -> None: """Adds an edge defined by 2 nodes.""" if startnode not in self.edges: self.edges[startnode] = dict() if endnode in self.edges[startnode]: prev_edge = self.edges[startnode][endnode] self.edges[startnode][endnode] = prev_edge[0] + weight, prev_edge[1] else: self.edges[startnode][endnode] = weight, potion # check if either startnode or endnode are in a component, otherwise # create a new one if not self._connected_components: # no components at all, so start the first one self._connected_components.append({startnode, endnode}) else: startnode_in = [startnode in c for c in self._connected_components] endnode_in = [endnode in c for c in self._connected_components] either_in = [s or e for s, e in zip(startnode_in, endnode_in)] if not any(either_in): # node not in either component, so start a new one self._connected_components.append({startnode, endnode}) elif not any(startnode_in): # add startnode to component endnode is in target_component = [i for i, x in enumerate(endnode_in) if x] assert len(target_component) == 1 to_component = target_component[0] self._connected_components[to_component].add(startnode) elif not any(endnode_in): # add startnode to component startnode is in target_component = [i for i, x in enumerate(startnode_in) if x] assert len(target_component) == 1 to_component = target_component[0] self._connected_components[to_component].add(endnode) else: # if both in different components, combine components target_component1 = [i for i, x in enumerate(endnode_in) if x] assert len(target_component1) == 1 target_component2 = [i for i, x in enumerate(startnode_in) if x] assert len(target_component2) == 1 to_component1 = target_component1[0] to_component2 = target_component2[0] if to_component1 != to_component2: self._connected_components[to_component1].update( self._connected_components[to_component2]) del self._connected_components[to_component2] def get_target_nodes(self, startnode: Node) -> Dict[Node, Edge]: # match on ID rather than instance matching_node = [n for n in self.edges if n.idx == startnode.idx][0] return self.edges[matching_node] def get_edge(self, startnode: Node, endnode: Node) -> Optional[Edge]: """If the edge exists returns it otherwise returns None.""" matching_starts = [n for n in self.edges if n.idx == startnode.idx] if not matching_starts: return None matching_start = matching_starts[0] start = self.edges[matching_start] matching_ends = [n for n in start if n.idx == endnode.idx] if not matching_ends: return None matching_end = matching_ends[0] return self.edges[matching_start][matching_end] def has_edge(self, startnode: Node, endnode: Node) -> bool: return self.get_edge(startnode, endnode) is not None def get_connected_components(self) -> List[ConnectedComponent]: return self._connected_components def __hash__(self) -> int: return hash(tuple(sorted([(k, tuple(sorted([(k2, tuple(v2)) for k2, v2 in v.items()]))) for k, v in self.edges.items()]))) @dataclasses.dataclass class Graph: """An alchemy graph.""" node_list: NodeList edge_list: EdgeList def __hash__(self) -> int: return hash((self.node_list, self.edge_list)) def __eq__(self, other): return constraint_from_graph(self) == constraint_from_graph(other) def all_nodes_in_graph() -> NodeList: """Gets a node list with all the nodes that can appear in an alchemy graph.""" node_list = NodeList() for i in range(_TOTAL_VERTICES): coord = list(reversed([2 * int(x) - 1 for x in format(i, '03b')])) node_list.add_node(Node(i, coord)) return node_list def create_graph_from_constraint( constraint: Constraint, potions: Optional[Sequence[stones_and_potions.Potion]] = None ) -> Graph: """Creates the graph from the constraint string and the existing potions. Args: constraint: list of list of strings describing how stones can be transformed by potions (further explanation above where Constraint type is defined). potions: list of Potion objects; if None, assume all possible potions are available. Returns: A Graph object """ node_list = all_nodes_in_graph() edge_list = EdgeList() def add_edge(potion: stones_and_potions.Potion): """Adds an edge defined by a potion.""" dimension = potion.dimension direction = potion.direction startnodes = node_list.filter_nodes(dimension, -direction) dimension_constraints = [(i, x) for i, x in enumerate(constraint[dimension]) if x == '1' or x == '-1'] for dimension_constraint in dimension_constraints: startnodes = startnodes.filter_nodes( dimension_constraint[0], int(dimension_constraint[1])) for node in startnodes.nodes: endnode_coord = copy.copy(node.coords) # type: MutableSequence[int] endnode_coord[dimension] = direction endnode = node_list.get_node_by_coords(endnode_coord) edge_list.add_edge(node, endnode, potion) if potions: for potion in potions: add_edge(potion) else: count = 0 for latent_potion in stones_and_potions.possible_latent_potions(): add_edge(stones_and_potions.Potion( count, latent_potion.latent_dim, latent_potion.latent_dir)) count += 1 return Graph(node_list, edge_list) def constraint_from_graph(graph: Graph) -> Constraint: """Creates the constraint string corresponding to the graph passed in.""" # For each axis consider the possible values on the other axes and check which # of the edges exist. constraint = [] for axis in range(3): other_can_be = [set(), set()] for other_vals in itertools.product([-1, 1], [-1, 1]): coords_start = list(other_vals[:axis]) + [-1] + list(other_vals[axis:]) coords_end = list(other_vals[:axis]) + [1] + list(other_vals[axis:]) node_start = graph.node_list.get_node_by_coords(coords_start) node_end = graph.node_list.get_node_by_coords(coords_end) if graph.edge_list.has_edge(node_start, node_end): other_can_be[0].add(other_vals[0]) other_can_be[1].add(other_vals[1]) axis_constraint = ['*' if len(can_be) > 1 else str(list(can_be)[0]) for can_be in other_can_be] constraint.append(axis_constraint[:axis] + ['X'] + axis_constraint[axis:]) return constraint def convert_graph_to_adj_mat(graph: Graph) -> np.ndarray: """Converts from node_list, edge list to adjacency matrix. Args: graph: a graphs.Graph object. Returns: An adjacency matrix indicating which nodes are connected. The value at row i and column j indicates that node i is connected to node j using a potion with index equal to the value - 1. """ node_coords = [s.coords for s in graph.node_list.nodes] num_nodes = len(node_coords) adj_mat = NO_EDGE * np.ones((num_nodes, num_nodes), int) for start_node, edge_list in graph.edge_list.edges.items(): for end_node, potion_list in edge_list.items(): from_ind = start_node.idx to_ind = end_node.idx adj_mat[from_ind, to_ind] = potion_list[1].as_index + 1 return adj_mat def convert_adj_mat_to_graph( adj_mat: np.ndarray, init_potions: Optional[Sequence[stones_and_potions.Potion]] = None ) -> Graph: """Converts from adjacency matrix and init_potions to graph. Args: adj_mat: a numpy matrix indicating which nodes are connected. The value at row i and column j indicates that node i is connected to node j using a potion with index equal to the value - 1, with NO_EDGE indicating no connection. init_potions: a list of Potion objects. Returns: A Graph object """ node_list = all_nodes_in_graph() node_coords = [s.coords for s in node_list.nodes] edge_list = EdgeList() from_inds, to_inds = np.where(adj_mat != NO_EDGE) for row, column in zip(from_inds, to_inds): from_node = node_list.get_node_by_coords(node_coords[row]) to_node = node_list.get_node_by_coords(node_coords[column]) potion_color_idx = adj_mat[row, column] - 1 potion_dir = (potion_color_idx % 2) * 2 - 1 potion_dim = potion_color_idx // 2 if init_potions is not None: matched_potions = [p for p in init_potions if p.dimension == potion_dim and p.direction == potion_dir] else: matched_potions = [stones_and_potions.Potion(-1, potion_dim, potion_dir)] for potion in matched_potions: edge_list.add_edge(from_node, to_node, potion) return Graph(node_list, edge_list) def possible_constraints() -> List[Constraint]: """Returns all lists of constraint strings regardless of graph validity. Returns: A list of constraints. """ possible_values = ['-1', '1', '*'] one_axis_constraints = [] for v1 in possible_values: for v2 in possible_values: one_axis_constraints.append([v1, v2]) def insert_x(val: List[str], pos: int) -> List[str]: return val[:pos] + ['X'] + val[pos:] constraints = [] for x in one_axis_constraints: for y in one_axis_constraints: for z in one_axis_constraints: constraints.append([insert_x(x, 0), insert_x(y, 1), insert_x(z, 2)]) all_graphs = [create_graph_from_constraint(constraint) for constraint in constraints] def all_reachable(g: Graph) -> bool: # All nodes must have an edge to them and there must be only one component. # It is necessary to check both since connected components only returns # components with at least one edge. return (len(g.edge_list.edges) == len(g.node_list.nodes) and len(g.edge_list.get_connected_components()) == 1) return [constr for graph, constr in zip(all_graphs, constraints) if all_reachable(graph)] def no_bottleneck_constraints() -> List[Constraint]: return [[['X', '*', '*'], ['*', 'X', '*'], ['*', '*', 'X']]] def bottleneck1_constraints() -> List[Constraint]: return [[['X', '-1', '1'], ['*', 'X', '*'], ['*', '*', 'X']]] def bottleneck2_constraints() -> List[Constraint]: return [[['X', '-1', '*'], ['*', 'X', '*'], ['*', '*', 'X']]] def bottleneck3_constraints() -> List[Constraint]: return [[['X', '-1', '1'], ['*', 'X', '*'], ['*', '-1', 'X']]] def get_num_constraints( constraints: Sequence[Constraint]) -> List[int]: """Returns a list of the total number of constraints across axes.""" def count_constraints(axis_constraints: List[str]) -> int: return sum(1 for e in axis_constraints if e not in ['*', 'X']) return [sum(count_constraints(e) for e in c) for c in constraints] def latent_constraint_to_stone_space( latent_constraint: Constraint, stone_map: stones_and_potions.StoneMap ) -> Constraint: """Converts a constraint specified in latent space to one in stone space.""" stone_space_constraint = copy.deepcopy(latent_constraint) axes = list(range(len(latent_constraint))) for axis in axes: for other_axis in axes[:axis] + axes[axis + 1:]: if stone_map.latent_pos_dir[other_axis] == -1: if latent_constraint[axis][other_axis] == '1': stone_space_constraint[axis][other_axis] = '-1' elif latent_constraint[axis][other_axis] == '-1': stone_space_constraint[axis][other_axis] = '1' return stone_space_constraint def graph_distr( constraints: Sequence[Constraint] ) -> Dict[Graph, float]: """Returns prior over all valid graphs.""" num_constraints = get_num_constraints(constraints) valid_graphs = [ (create_graph_from_constraint(constraint), constraint_count) for constraint, constraint_count in zip(constraints, num_constraints)] # Equal probability is given to the set of graphs with each valid number of # constraints. # In practice this means: # 1/4 probability for the constraint 0 case # (1/4)*(1/12) probability for each case with one constraint (12 cases) # (1/4)*(1/48) probability for each case with two constraints (48 cases) # (1/4)*(1/48) probability for each case with three constraints (48 cases) graphs_per_constraint = collections.Counter([g[1] for g in valid_graphs]) prior = [(1.0 / (len(graphs_per_constraint) * graphs_per_constraint[g[1]])) for g in valid_graphs] valid_graphs = [g[0] for g in valid_graphs] return {g: p for g, p in zip(valid_graphs, prior)} def random_graph( distr: Mapping[Graph, float], random_state: np.random.RandomState ) -> Graph: graphs = list(distr.keys()) return graphs[random_state.choice( len(graphs), p=np.array(list(distr.values()), dtype=np.float32))] def cube_edges() -> List[Tuple[int, int]]: """Gets pairs of graph indices which are edges of the cube.""" graph_nodes = all_nodes_in_graph().nodes edges = [] for (i, node_i), (j, node_j) in itertools.combinations( enumerate(graph_nodes), 2): # Nodes are 1D so the return has 1 element with the non-zero indices. diffs = np.nonzero(np.array(node_i.coords) - np.array(node_j.coords))[0] # If the nodes differ on just one dimension there may be an edge between # them. if diffs.size == 1: edges.append((i, j)) return edges def blank_known_adj_mat() -> np.ndarray: """Returns an adjacency matrix with no knowledge of what cube edges exist.""" # At first we know the structure of the cube so only 3 possible edges from # each node. # Start with no edges and add possible edge along the edges of a cube. num_nodes = len(all_nodes_in_graph().nodes) known_adj_mat = NO_EDGE * np.ones((num_nodes, num_nodes), dtype=object) for i, j in cube_edges(): known_adj_mat[i, j] = helpers.UNKNOWN known_adj_mat[j, i] = helpers.UNKNOWN return known_adj_mat def known_adj_mat_from_edge_values(edge_vals: Sequence[int]) -> np.ndarray: known_adj_mat = blank_known_adj_mat() for (start_node, end_node), val in zip(cube_edges(), edge_vals): if val not in _POSS_EDGE_VALS: raise ValueError('Invalid edge value for known adjacency matrix.') known_adj_mat[start_node, end_node] = val known_adj_mat[end_node, start_node] = val return known_adj_mat def adj_mat_from_edge_values(edge_vals: Sequence[int]) -> np.ndarray: """Gets adjacency matrix with potion indices at entries from key edge values.""" known_adj_mat = blank_known_adj_mat() for (start_node, end_node), val in zip(cube_edges(), edge_vals): if not 0 <= val <= stones_and_potions.PerceivedPotion.num_types: raise ValueError('Invalid edge value for adjacency matrix.') known_adj_mat[start_node, end_node] = val if val > 0: potion_pair = stones_and_potions.perceived_potion_from_index( stones_and_potions.PerceivedPotionIndex(val - 1)) potion_pair.perceived_dir = -potion_pair.perceived_dir known_adj_mat[end_node, start_node] = potion_pair.index() + 1 else: known_adj_mat[end_node, start_node] = 0 return known_adj_mat def edge_values_from_adj_mat(adj_mat: np.ndarray) -> List[int]: return [adj_mat[start_node, end_node] for start_node, end_node in cube_edges()] class PartialGraph: """Partial information about a graph.""" def __init__(self): self.known_adj_mat = blank_known_adj_mat() def add_edge( self, stone_start: stones_and_potions.LatentStone, stone_end: stones_and_potions.LatentStone, val: int ) -> None: """Adds an edge to the partial graph.""" # When we discover that an edge either exists or does not exist. # Add the edge from this stone in the direction according to potion from_ind = stone_start.index() to_ind = stone_end.index() self.known_adj_mat[from_ind, to_ind] = val # Reverse edge must have the same val self.known_adj_mat[to_ind, from_ind] = val # There is no need to deduce other knowledge from this since it is all # captured by the list of valid graphs (e.g. the exclusion of xor) def update(self, all_graphs: Sequence[Graph]) -> None: """Updates the graph by filling in edges we can deduce.""" # Get all graphs matching the partial information we have. matches = self.matching_graphs(all_graphs) edge_vals = [] adj_mats = [partial_graph_from_graph(match).known_adj_mat for match in matches] for start_node, end_node in cube_edges(): # For this edge get all possible values in matching graphs. vals = set(adj_mat[start_node, end_node] for adj_mat in adj_mats) # If there is only 1 possible value then it set it in the adjacency # matrix. if len(vals) == 1: edge_vals.append(vals.pop()) else: # Otherwise, set unknown. edge_vals.append(helpers.UNKNOWN) self.known_adj_mat = known_adj_mat_from_edge_values(edge_vals) def matching_graphs( self, all_graphs: Sequence[Graph], return_indices: bool = False ) -> Union[List[Graph], List[int]]: """Returns a list of all graphs which match the partial info we have.""" def matches(graph: Graph): """Returns whether the passed graph matches the partial info we have.""" # g matches our knowledge if all edges in g are plausible according to # what we know and if all edges we know are in g known_edges = {(v1, v2) for v1, v2 in zip(*np.where(self.known_adj_mat == KNOWN_EDGE))} for start_node, end_node_list in graph.edge_list.edges.items(): for end_node in end_node_list: from_ind = start_node.idx to_ind = end_node.idx if (from_ind, to_ind) in known_edges: known_edges.remove((from_ind, to_ind)) continue if self.known_adj_mat[from_ind, to_ind] == NO_EDGE: return False return not known_edges if return_indices: return [i for i, g in enumerate(all_graphs) if matches(g)] return [g for g in all_graphs if matches(g)] def index(self) -> int: important_edges = edge_values_from_adj_mat(self.known_adj_mat) return int(np.ravel_multi_index( tuple(edge_val_to_index(val) for val in important_edges), tuple(len(_POSS_EDGE_VALS) for _ in important_edges))) def partial_graph_from_graph(graph: Graph) -> PartialGraph: """Converts a graphs.Graph to a PartialGraph with all information known.""" known_adj_mat = NO_EDGE * np.ones( (len(graph.node_list.nodes), len(graph.node_list.nodes))) for start_node, edges in graph.edge_list.edges.items(): for end_node in edges: from_ind = start_node.idx to_ind = end_node.idx known_adj_mat[from_ind, to_ind] = KNOWN_EDGE partial_graph = PartialGraph() partial_graph.known_adj_mat = known_adj_mat return partial_graph def partial_graph_from_index(ind: int) -> PartialGraph: """Converts from the integer representation to the full type.""" important_edges = np.unravel_index( ind, tuple(len(_POSS_EDGE_VALS) for _ in range(num_edges_in_cube()))) partial_graph = PartialGraph() partial_graph.known_adj_mat = known_adj_mat_from_edge_values( [index_to_edge_val(int(e)) for e in important_edges]) return partial_graph def partial_graph_from_possibles(possibles: np.ndarray) -> PartialGraph: """Constructs a partial graph from the list of possible graphs.""" partial_graph = PartialGraph() if possibles.size == 0: return partial_graph graph_nodes = possibles[0].node_list.nodes # Go through each possible graph and check the status of the important edges adj_mats = tuple(partial_graph_from_graph(p).known_adj_mat for p in possibles) stacked_adj_mats = np.stack(adj_mats) uniques = [[np.unique(stacked_adj_mats[:, i, j]) for i in range(len( graph_nodes))] for j in range(len(graph_nodes))] for i in range(len(graph_nodes)): for j in range(len(graph_nodes)): u = uniques[i][j] if len(u) == 1: partial_graph.known_adj_mat[i, j] = u[0] else: partial_graph.known_adj_mat[i, j] = helpers.UNKNOWN return partial_graph def graph_with_potions( graph: Graph, potions: Sequence[stones_and_potions.Potion] ) -> Graph: """Makes a copy of a graph with edges only where we have potions.""" reduced_edge_list = EdgeList() for start_node, edges in graph.edge_list.edges.items(): for end_node, edge in edges.items(): potions_required = [ (i, ec - sc) for i, (sc, ec) in enumerate(zip( start_node.coords, end_node.coords)) if sc != ec] assert len(potions_required) == 1 if potions_required[0] in [ (p.dimension, 2 * p.direction) for p in potions]: reduced_edge_list.add_edge(start_node, end_node, edge[1], edge[0]) return Graph(copy.deepcopy(graph.node_list), reduced_edge_list)
dm_alchemy-master
dm_alchemy/types/graphs.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Helper functions and global variables for ideal observer.""" import math from typing import List, Sequence import numpy as np UNKNOWN = -1000 END_TRIAL = -2 def str_np_array_construct(a: np.ndarray) -> str: return 'np.' + repr(a) def perm_to_index(perm: Sequence[int], perm_index_to_index: np.ndarray) -> int: """Converts a permutation to an integer. We first treat the permutation as a tuple of integers which can be any value between 0 and len(perm) - 1. Then we use the precomputed perm_index_to_index to convert from this to indices between 0 and len(perm)!. For example if the permutation is [0, 1, 2] this maps to 0 * 3^2 + 1 * 3^1 + 2 * 3^0 = 5 Then we look up perm_index_to_index[5] which is 0. Args: perm: A permutation. perm_index_to_index: A matrix which converts valid permutations of length 3 to indices between 0 and 3!. Returns: An integer representing the permutation. """ return perm_index_to_index[np.ravel_multi_index( tuple(perm), tuple(len(perm) for _ in range(len(perm))))] def perm_from_index( ind: int, num_elements, index_to_perm_index: np.ndarray) -> List[int]: # Do the inverse of perm_to_index. return [int(i) for i in np.unravel_index( index_to_perm_index[ind], tuple(num_elements for _ in range(num_elements)))] def partial_perm_to_index( partial_perm: Sequence[int], perm_index_to_index: np.ndarray) -> int: """Converts permutation of length 3 with potentially unknown values to an int.""" # We cannot have just 1 unknown value because knowing the others mean it is # determined. Therefore with a length 3 sequence we either have 0, 1 or 3 # knowns. # To make this work for permutations of lengths other than 3 we would have to # consider all cases where the number of knowns is 0, 1, .... n - 2, n. # If the number of knowns is m there are m! ways to order them, n choose m # ways to select the known values and n choose m ways to place them in the # permutation. Since we only need to deal with permutations of length 3 we # just deal with that special case here. if len(partial_perm) != 3: raise ValueError('Function only deals with permutations of length 3.') first_unknown = UNKNOWN first_known = UNKNOWN known_val = UNKNOWN for i, p in enumerate(partial_perm): if p == UNKNOWN: if first_unknown == UNKNOWN: first_unknown = i else: if first_known == UNKNOWN: first_known = i known_val = p # If we have 0 unknowns encode as normal. if first_unknown == UNKNOWN: return perm_to_index(partial_perm, perm_index_to_index) num_axes = len(partial_perm) num_simple_perms = math.factorial(num_axes) # If we have 0 knowns use the next value. if first_known == UNKNOWN: return num_simple_perms # If we have 2 unknowns then we can encode this using the position and value # of the first (and only) known element. return num_simple_perms + 1 + int(np.ravel_multi_index( (first_known, known_val), (num_axes, num_axes))) def partial_perm_from_index( ind: int, num_elements: int, index_to_perm_index: np.ndarray ) -> List[int]: """Converts int to permutation of length 3 with potentially unknown values.""" num_simple_perms = math.factorial(num_elements) if ind < num_simple_perms: return perm_from_index(ind, num_elements, index_to_perm_index) none_known = [UNKNOWN for _ in range(num_elements)] if ind == num_simple_perms: return none_known known_pos, known_val = np.unravel_index( ind - num_simple_perms - 1, (num_elements, num_elements)) # pylint: disable=unbalanced-tuple-unpacking none_known[known_pos] = int(known_val) return none_known
dm_alchemy-master
dm_alchemy/types/helpers.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for converting between unity and python representations.""" import functools from typing import List from absl.testing import absltest from absl.testing import parameterized from dm_alchemy.ideal_observer import precomputed_maps from dm_alchemy.protos import alchemy_pb2 from dm_alchemy.types import graphs from dm_alchemy.types import stones_and_potions from dm_alchemy.types import unity_python_conversion from dm_alchemy.types import utils import numpy as np def _make_tuple(i): return i, def _unity_potions_given_constraint( constraint: graphs.Constraint) -> List[alchemy_pb2.PotionProperties]: """Return list of unity potion properties which encode the passed constraint.""" graph = graphs.create_graph_from_constraint(constraint) # Use any potion map, it doesn't matter since we only care about reactions. pm = stones_and_potions.PotionMap(dim_map=[0, 1, 2], dir_map=[1, 1, 1]) perceived_and_latent = [(pm.apply_inverse(l), l) for l in stones_and_potions.possible_latent_potions()] return [unity_python_conversion.to_potion_unity_properties(p, l, graph) for p, l in perceived_and_latent] def get_potion_tests(): """Test cases for converting between potions and unity potion properties.""" potion_tests = [] for pm in stones_and_potions.possible_potion_maps( precomputed_maps.get_perm_index_conversion()[1]): potion_tests.append( ([(pm.apply_inverse(l), l) for l in stones_and_potions.possible_latent_potions()], functools.partial( unity_python_conversion.to_potion_unity_properties, # It shouldn't matter what graph we use for testing this part. graph=graphs.create_graph_from_constraint( graphs.no_bottleneck_constraints()[0])), unity_python_conversion._potions_from_potion_unity_properties, lambda x: x, _make_tuple)) return potion_tests def from_stone_unity_properties(stone_properties, rotation): perceived_stone, _, latent_stone = ( unity_python_conversion._from_stone_unity_properties( stone_properties, rotation)) return perceived_stone, latent_stone def get_stone_tests(): """Test cases for converting between stones and unity stone properties.""" stone_tests = [] for rotation in stones_and_potions.possible_rotations(): for sm in stones_and_potions.possible_stone_maps(): stone_tests.append( ([(stones_and_potions.unalign(sm.apply_inverse(l), rotation), l) for l in stones_and_potions.possible_latent_stones()], unity_python_conversion.to_stone_unity_properties, functools.partial(from_stone_unity_properties, rotation=rotation), lambda x: x, _make_tuple)) return stone_tests def all_graphs(): return [graphs.create_graph_from_constraint(g) for g in graphs.possible_constraints()] def test_chemistries(): """Return a subset of chemistries to test conversion.""" chems = [ utils.Chemistry( potion_map=stones_and_potions.all_fixed_potion_map(), stone_map=stones_and_potions.all_fixed_stone_map(), graph=graphs.create_graph_from_constraint( graphs.no_bottleneck_constraints()[0]), rotation=np.eye(3)), utils.Chemistry( potion_map=stones_and_potions.PotionMap([1, 0, 2], [1, 1, -1]), stone_map=stones_and_potions.StoneMap(np.array([-1, 1, -1])), graph=graphs.create_graph_from_constraint( graphs.bottleneck1_constraints()[0]), rotation=stones_and_potions.possible_rotations()[-1])] for r in stones_and_potions.possible_rotations(): for sm in stones_and_potions.possible_stone_maps(): chems.append(utils.Chemistry( potion_map=stones_and_potions.all_fixed_potion_map(), stone_map=sm, graph=graphs.create_graph_from_constraint( graphs.no_bottleneck_constraints()[0]), rotation=r)) return chems class UnityPythonConversionTest(parameterized.TestCase): @parameterized.parameters( [(stones_and_potions.possible_latent_stones(), unity_python_conversion.latent_stone_to_unity, unity_python_conversion._unity_to_latent_stone, _make_tuple, _make_tuple), (stones_and_potions.possible_latent_potions(), unity_python_conversion.latent_potion_to_unity, unity_python_conversion._unity_to_latent_potion, _make_tuple, _make_tuple), (stones_and_potions.possible_rotations(), unity_python_conversion.rotation_to_unity, unity_python_conversion.rotation_from_unity, _make_tuple, _make_tuple, lambda lhs, rhs: stones_and_potions.rotations_equal(*lhs, *rhs)), (test_chemistries(), unity_python_conversion.to_unity_chemistry, unity_python_conversion.from_unity_chemistry, _make_tuple, lambda x: x), # Test all graphs while keeping the potions constant # Compare constraint equality since graphs only compare equal if they use # the same node and edge objects. (all_graphs(), _unity_potions_given_constraint, unity_python_conversion.graphs_from_potion_unity_properties, lambda x: _make_tuple(graphs.constraint_from_graph(x)), _make_tuple)] # Test all potions while keeping the graph constant + get_potion_tests() # Test conversion of stones + get_stone_tests() ) def test_back_and_forth( self, items, transform, inverse, post_process_i, post_process_transformed, equality=lambda lhs, rhs: lhs == rhs): """Test that transforming to and from unity types does not change anything. Since some of the transforms to test take or return multiple arguments we assume that all of them do. For functions which do not we use the post processing functions to make them tuples. Args: items: Set of items to transform. transform: Function which transforms them to unity type. inverse: Function which transforms unity type to python type. post_process_i: Function to post process the python type before passing it to the transform and before comparing it. This must return a tuple. post_process_transformed: Function to post process the unity type before passing it to the transform and before comparing it. This must return a tuple. equality: Function to check 2 python types are equal. """ for i in items: # We assume these are all tuples after post processing so we can treat # them all the same. i = post_process_i(i) transformed = post_process_transformed(transform(*i)) inverted = post_process_i(inverse(*transformed)) self.assertTrue(equality(inverted, i)) if __name__ == '__main__': absltest.main()
dm_alchemy-master
dm_alchemy/types/unity_python_conversion_test.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ import itertools from absl.testing import absltest from absl.testing import parameterized from dm_alchemy.ideal_observer import precomputed_maps from dm_alchemy.types import stones_and_potions import numpy as np class StonesAndPotionsTest(parameterized.TestCase): perm_index_to_index = None index_to_perm_index = None @classmethod def setUpClass(cls): super(StonesAndPotionsTest, cls).setUpClass() # Load a precomputed map. cls.perm_index_to_index, cls.index_to_perm_index = ( precomputed_maps.get_perm_index_conversion()) @parameterized.parameters( (stones_and_potions.aligned_stone_from_index, stones_and_potions.AlignedStone.num_types), (stones_and_potions.perceived_potion_from_index, stones_and_potions.PerceivedPotion.num_types), (stones_and_potions.latent_stone_from_index, stones_and_potions.LatentStone.num_types), (stones_and_potions.latent_potion_from_index, stones_and_potions.LatentPotion.num_types), (stones_and_potions.stone_map_from_index, stones_and_potions.StoneMap.num_types), (stones_and_potions.potion_map_from_index, stones_and_potions.PotionMap.num_types, lambda x: {'index_to_perm_index': x.index_to_perm_index}, lambda x: {'perm_index_to_index': x.perm_index_to_index}), (stones_and_potions.partial_stone_map_from_index, stones_and_potions.PartialStoneMap.num_types), (stones_and_potions.partial_potion_map_from_index, ( stones_and_potions.PartialPotionMap.num_axis_assignments, stones_and_potions.PartialPotionMap.num_dir_assignments), lambda x: {'index_to_perm_index': x.index_to_perm_index}, lambda x: {'perm_index_to_index': x.perm_index_to_index}) ) def test_index( self, from_index, num_indices, from_index_precomputed_args=None, to_index_precomputed_args=None): """Tests all valid indices converting to and from their type.""" if from_index_precomputed_args is None: from_index_precomputed_args = lambda _: {} if to_index_precomputed_args is None: to_index_precomputed_args = lambda _: {} if isinstance(num_indices, tuple): unique_indices = itertools.product(*[range(i) for i in num_indices]) expected_instances = np.prod(num_indices) else: unique_indices = range(num_indices) expected_instances = num_indices instances = set() for i in unique_indices: instance = from_index(i, **from_index_precomputed_args(self)) instances.add(instance) back_to_index = instance.index(**to_index_precomputed_args(self)) self.assertEqual(back_to_index, i) # All instances should be unique self.assertLen(instances, expected_instances) if __name__ == '__main__': absltest.main()
dm_alchemy-master
dm_alchemy/types/stones_and_potions_test.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Functions for converting to and from precomputed maps protos.""" import base64 import math import os from typing import List, Optional, Sequence, Tuple from dm_alchemy import io from dm_alchemy.encode import precomputed_maps_pb2 from dm_alchemy.types import graphs from dm_alchemy.types import stones_and_potions import numpy as np def _int_to_string(i: int, bytes_per_int: int) -> str: return base64.b64encode( i.to_bytes(bytes_per_int, 'big', signed=False)).decode('ascii') def _string_to_int(s: str) -> int: return int.from_bytes(base64.b64decode(s.encode( 'ascii')), 'big', signed=False) def _write_to_bin( data: np.ndarray, folder: str, name: str, proto_type, entries_from_list=lambda l: l ) -> None: proto = proto_type( entries=entries_from_list(data.ravel().tolist()), shape=data.shape) io.write_proto(os.path.join(folder, name), proto.SerializeToString()) def write_int_array(data: np.ndarray, folder: str, name: str) -> None: _write_to_bin(data, folder, name, precomputed_maps_pb2.IntArray) def write_float_array(data: np.ndarray, folder: str, name: str) -> None: _write_to_bin(data, folder, name, precomputed_maps_pb2.FloatArray) def _bitfield_entries_from_list(l: Sequence[int]) -> List[str]: max_new_list_int = max(l) if l else 0 num_bytes_needed = math.ceil(max_new_list_int.bit_length() / 8) return [_int_to_string(i, num_bytes_needed) for i in l] def write_bitfield_array(data: np.ndarray, folder: str, name: str) -> None: _write_to_bin( data, folder, name, precomputed_maps_pb2.BitfieldArray, _bitfield_entries_from_list) def _list_int_entries_from_list( l: Sequence[Optional[Sequence[int]]] ) -> List[precomputed_maps_pb2.ListIntsArray.Entry]: return [precomputed_maps_pb2.ListIntsArray.Entry( list_present=e is not None, entries=e) for e in l] def write_list_ints_array(data: np.ndarray, folder: str, name: str) -> None: _write_to_bin( data, folder, name, precomputed_maps_pb2.ListIntsArray, _list_int_entries_from_list) def _possible_latent_dir_entries_from_list( l: Sequence[Tuple[bool, Sequence[int]]] ) -> List[precomputed_maps_pb2.PossibleLatentDirs.Entry]: return [precomputed_maps_pb2.PossibleLatentDirs.Entry( could_be_unchanged=e[0], plausible_latent_dirs=e[1]) for e in l] def write_possible_latent_dirs( data: np.ndarray, folder: str, name: str ) -> None: _write_to_bin( data, folder, name, precomputed_maps_pb2.PossibleLatentDirs, _possible_latent_dir_entries_from_list) def _partial_potion_map_update_entries_from_list( l: Sequence[Tuple[int, int]] ) -> List[int]: max_vals = (stones_and_potions.PartialPotionMap.num_axis_assignments + 1, stones_and_potions.PartialPotionMap.num_dir_assignments + 1) return [int(np.ravel_multi_index((e[0] + 1, e[1] + 1), max_vals)) for e in l] def write_partial_potion_map_update( data: np.ndarray, folder: str, name: str ) -> None: _write_to_bin( data, folder, name, precomputed_maps_pb2.IntArray, _partial_potion_map_update_entries_from_list) def graph_to_proto( graph: graphs.Graph ) -> precomputed_maps_pb2.GraphArray.Entry: adj_mat = graphs.convert_graph_to_adj_mat(graph) edge_values = graphs.edge_values_from_adj_mat(adj_mat) return precomputed_maps_pb2.GraphArray.Entry(edge_present=edge_values) def _graph_entries_from_list( l: Sequence[graphs.Graph] ) -> List[precomputed_maps_pb2.GraphArray.Entry]: return [graph_to_proto(graph) for graph in l] def write_graph_array(data: np.ndarray, folder: str, name: str) -> None: _write_to_bin( data, folder, name, precomputed_maps_pb2.GraphArray, _graph_entries_from_list) def _load_proto( folder: str, name: str, proto_type, proto_entry_to_array_entry=lambda e: e ) -> np.ndarray: """Loads serialized proto file representing a numpy array.""" serialized = io.read_proto(os.path.join(folder, name)) proto = proto_type.FromString(serialized) def pyfunc(i): return proto_entry_to_array_entry(proto.entries[i]) return np.frompyfunc(pyfunc, 1, 1)(np.reshape(np.arange(len( proto.entries)), proto.shape)) def load_int_array(folder: str, name: str) -> np.ndarray: return _load_proto(folder, name, precomputed_maps_pb2.IntArray) def load_float_array(folder: str, name: str) -> np.ndarray: return _load_proto(folder, name, precomputed_maps_pb2.FloatArray) def load_bitfield_array(folder: str, name: str) -> np.ndarray: return _load_proto( folder, name, precomputed_maps_pb2.BitfieldArray, _string_to_int) def _list_int_proto_to_array( entry: precomputed_maps_pb2.ListIntsArray.Entry ) -> Optional[List[int]]: return None if not entry.list_present else list(entry.entries) def load_list_ints_array(folder: str, name: str) -> np.ndarray: return _load_proto( folder, name, precomputed_maps_pb2.ListIntsArray, _list_int_proto_to_array) def _possible_latent_dir_proto_to_array( entry: precomputed_maps_pb2.PossibleLatentDirs.Entry ) -> Tuple[bool, List[int]]: return entry.could_be_unchanged, list(entry.plausible_latent_dirs) def load_possible_latent_dirs(folder: str, name: str) -> np.ndarray: return _load_proto( folder, name, precomputed_maps_pb2.PossibleLatentDirs, _possible_latent_dir_proto_to_array) def _partial_potion_map_update_proto_to_array(entry: int) -> Tuple[int, int]: max_vals = (stones_and_potions.PartialPotionMap.num_axis_assignments + 1, stones_and_potions.PartialPotionMap.num_dir_assignments + 1) partial_potion_map_index = np.unravel_index(entry, max_vals) return partial_potion_map_index[0] - 1, partial_potion_map_index[1] - 1 def load_partial_potion_map_update(folder: str, name: str) -> np.ndarray: return _load_proto( folder, name, precomputed_maps_pb2.IntArray, _partial_potion_map_update_proto_to_array) def proto_to_graph( entry: precomputed_maps_pb2.GraphArray.Entry ) -> graphs.Graph: return graphs.convert_adj_mat_to_graph(graphs.adj_mat_from_edge_values( entry.edge_present)) def load_graph_array(folder: str, name: str) -> np.ndarray: return _load_proto( folder, name, precomputed_maps_pb2.GraphArray, proto_to_graph)
dm_alchemy-master
dm_alchemy/encode/precomputed_maps_proto_conversion.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Functions for converting to and from chemistry protos.""" from typing import List, Sequence, Tuple from dm_alchemy import io from dm_alchemy.encode import chemistries_pb2 from dm_alchemy.encode import precomputed_maps_proto_conversion from dm_alchemy.ideal_observer import precomputed_maps from dm_alchemy.types import stones_and_potions from dm_alchemy.types import utils def chemistry_to_proto( chemistry: utils.Chemistry, ) -> chemistries_pb2.Chemistry: perm_index_to_index, _ = precomputed_maps.get_perm_index_conversion() return chemistries_pb2.Chemistry( potion_map=chemistry.potion_map.index(perm_index_to_index), stone_map=chemistry.stone_map.index(), graph=precomputed_maps_proto_conversion.graph_to_proto(chemistry.graph), rotation=stones_and_potions.rotation_to_angles(chemistry.rotation)) def proto_to_chemistry( proto: chemistries_pb2.Chemistry ) -> utils.Chemistry: _, index_to_perm_index = precomputed_maps.get_perm_index_conversion() return utils.Chemistry( potion_map=stones_and_potions.potion_map_from_index( stones_and_potions.PotionMapIndex(proto.potion_map), index_to_perm_index), stone_map=stones_and_potions.stone_map_from_index( stones_and_potions.StoneMapIndex(proto.stone_map)), graph=precomputed_maps_proto_conversion.proto_to_graph(proto.graph), rotation=stones_and_potions.rotation_from_angles(proto.rotation)) def trial_items_to_proto( trial_items: utils.TrialItems ) -> chemistries_pb2.TrialItems: return chemistries_pb2.TrialItems( stones=[s.latent_stone().index() for s in trial_items.stones], potions=[p.latent_potion().index() for p in trial_items.potions]) def proto_to_trial_items( proto: chemistries_pb2.TrialItems ) -> utils.TrialItems: return utils.TrialItems( stones=[stones_and_potions.latent_stone_from_index( stones_and_potions.LatentStoneIndex(s)) for s in proto.stones], potions=[stones_and_potions.latent_potion_from_index( stones_and_potions.LatentPotionIndex(p)) for p in proto.potions]) def episode_items_to_proto( episode_items: utils.EpisodeItems ) -> chemistries_pb2.EpisodeItems: return chemistries_pb2.EpisodeItems( trial_items=[trial_items_to_proto(t) for t in episode_items.trials]) def proto_to_episode_items( proto: chemistries_pb2.EpisodeItems ) -> utils.EpisodeItems: items = utils.EpisodeItems(stones=[], potions=[]) items.trials = [proto_to_trial_items(p) for p in proto.trial_items] return items def chemistry_and_items_to_proto( chemistry: utils.Chemistry, episode_items: utils.EpisodeItems, ) -> chemistries_pb2.ChemistryAndItems: return chemistries_pb2.ChemistryAndItems( chemistry=chemistry_to_proto(chemistry), items=episode_items_to_proto(episode_items)) def proto_to_chemistry_and_items( proto: chemistries_pb2.ChemistryAndItems ) -> Tuple[utils.Chemistry, utils.EpisodeItems]: return (proto_to_chemistry(proto.chemistry), proto_to_episode_items(proto.items)) def chemistries_and_items_to_proto( chemistries_and_items: Sequence[Tuple[utils.Chemistry, utils.EpisodeItems]], ) -> chemistries_pb2.ChemistriesAndItems: return chemistries_pb2.ChemistriesAndItems( chemistries=[chemistry_and_items_to_proto(chemistry, episode_items) for chemistry, episode_items in chemistries_and_items]) def proto_to_chemistries_and_items( proto: chemistries_pb2.ChemistriesAndItems ) -> List[Tuple[utils.Chemistry, utils.EpisodeItems]]: return [(proto_to_chemistry(chem.chemistry), proto_to_episode_items(chem.items)) for chem in proto.chemistries] def write_chemistries_and_items( chemistries_and_items: Sequence[Tuple[utils.Chemistry, utils.EpisodeItems]], filename: str ) -> None: proto = chemistries_and_items_to_proto(chemistries_and_items) io.write_proto(filename, proto.SerializeToString()) def load_chemistries_and_items( filename: str ) -> List[Tuple[utils.Chemistry, utils.EpisodeItems]]: serialized = io.read_proto(filename) proto = chemistries_pb2.ChemistriesAndItems.FromString(serialized) return proto_to_chemistries_and_items(proto)
dm_alchemy-master
dm_alchemy/encode/chemistries_proto_conversion.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Functions for converting to and from sets of symbolic actions protos.""" from typing import List, Sequence from dm_alchemy import event_tracker from dm_alchemy.encode import precomputed_maps_pb2 from dm_alchemy.encode import symbolic_actions_pb2 import numpy as np def trial_events_to_proto( trial_events: event_tracker.MatrixEventTracker, ) -> precomputed_maps_pb2.IntArray: return precomputed_maps_pb2.IntArray( shape=trial_events.events.shape, entries=trial_events.events.ravel().tolist()) def proto_to_trial_events( proto: precomputed_maps_pb2.IntArray, ) -> event_tracker.MatrixEventTracker: trial_tracker = event_tracker.MatrixEventTracker( num_stones=proto.shape[0], num_potions=proto.shape[1] - 1) trial_tracker.events = np.array(proto.entries, dtype=int).reshape( proto.shape) return trial_tracker def episode_events_to_proto( episode_events: Sequence[event_tracker.MatrixEventTracker] ) -> symbolic_actions_pb2.EpisodeEvents: return symbolic_actions_pb2.EpisodeEvents( trial_events=[trial_events_to_proto(trial_events) for trial_events in episode_events]) def proto_to_episode_events( proto: symbolic_actions_pb2.EpisodeEvents, ) -> List[event_tracker.MatrixEventTracker]: return [proto_to_trial_events(trial_events_proto) for trial_events_proto in proto.trial_events] def evaluation_set_events_to_proto( evaluation_set: Sequence[Sequence[event_tracker.MatrixEventTracker]] ) -> symbolic_actions_pb2.EvaluationSetEvents: return symbolic_actions_pb2.EvaluationSetEvents( episode_events=[episode_events_to_proto(episode_events) for episode_events in evaluation_set]) def proto_to_evaluation_set_events( proto: symbolic_actions_pb2.EvaluationSetEvents, ) -> List[List[event_tracker.MatrixEventTracker]]: return [proto_to_episode_events(episode_events_proto) for episode_events_proto in proto.episode_events]
dm_alchemy-master
dm_alchemy/encode/symbolic_actions_proto_conversion.py
# Lint as python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """A tree search ideal observer for alchemy.""" import collections import copy import math from typing import Any, Counter, List, Mapping, MutableMapping, Sequence, Tuple from dm_alchemy.ideal_observer import helpers from dm_alchemy.ideal_observer import precomputed_maps from dm_alchemy.types import graphs from dm_alchemy.types import helpers as types_helpers from dm_alchemy.types import stones_and_potions import numpy as np # Alias these for readability AlignedStone = stones_and_potions.AlignedStone PerceivedStone = stones_and_potions.PerceivedStone PerceivedPotion = stones_and_potions.PerceivedPotion LatentStone = stones_and_potions.LatentStone LatentPotion = stones_and_potions.LatentPotion StoneMap = stones_and_potions.StoneMap PotionMap = stones_and_potions.PotionMap PartialStoneMap = stones_and_potions.PartialStoneMap PartialPotionMap = stones_and_potions.PartialPotionMap PartialGraph = graphs.PartialGraph PrecomputedMaps = precomputed_maps.PrecomputedMaps # We use indices in place of actual types for speed AlignedStoneIndex = stones_and_potions.AlignedStoneIndex PerceivedPotionIndex = stones_and_potions.PerceivedPotionIndex LatentStoneIndex = stones_and_potions.LatentStoneIndex LatentPotionIndex = stones_and_potions.LatentPotionIndex StoneMapIndex = stones_and_potions.StoneMapIndex PotionMapIndex = stones_and_potions.PotionMapIndex Action = Tuple[AlignedStoneIndex, PerceivedPotionIndex] ActionObjective = Tuple[Action, Any] SearchResults = MutableMapping[int, ActionObjective] ActionObjectiveAndSearchResults = Tuple[Action, Any, SearchResults] END_TRIAL = types_helpers.END_TRIAL def get_possible_stone_maps( stone_map_indices: Sequence[StoneMapIndex], aligned_stones: Sequence[AlignedStone] ) -> Tuple[List[int], List[StoneMapIndex]]: """Gets possible stone maps (and their indices) consistent with stones passed.""" # This doesn't need to be especially fast as it just happens once per trial. still_in_s = [] poss_sms = [] for i, sm in enumerate(stone_map_indices): stone_map = stones_and_potions.stone_map_from_index(sm) if stone_map.consistent_with_stones(aligned_stones): still_in_s.append(i) poss_sms.append(sm) return still_in_s, poss_sms class WorldStateDistribution: """Distribution over world states.""" def __init__( self, stone_map_distr: Mapping[StoneMapIndex, float], potion_map_distr: Mapping[PotionMapIndex, float], precomputed: PrecomputedMaps ): self.stone_map_possible = sorted(stone_map_distr.keys()) self.potion_map_possible = sorted(potion_map_distr.keys()) self.partial_potion_map_index = ( stones_and_potions.partial_potion_map_from_possibles( self.potion_map_possible, precomputed.index_to_perm_index).index( precomputed.perm_index_to_index)) self.partial_stone_map_index = ( stones_and_potions.partial_stone_map_from_possibles( self.stone_map_possible).index()) # Use a bitfield like structure otherwise it takes forever to copy self.partial_graph_possible = helpers.list_to_bitfield( range(precomputed.graphs_list.shape[0])) self.partial_graph_index = ( precomputed.partial_graph_index_to_possible_index[ graphs.partial_graph_from_possibles( precomputed.graphs_list[self.get_possible_graphs()]).index()]) self.poss_world_states = np.zeros(( len(potion_map_distr), len(stone_map_distr), precomputed.graph_index_distr.shape[0]), dtype=float) for potion_map_index, p1 in enumerate(potion_map_distr.values()): for stone_map_index, p2 in enumerate(stone_map_distr.values()): for graph_index, p3 in enumerate(precomputed.graph_index_distr): p = p1 * p2 * p3 self.poss_world_states[ potion_map_index, stone_map_index, graph_index] = p self.observed_no_effect_bits = 0 def new_trial( self, aligned_stone_indices: Counter[AlignedStoneIndex] ) -> None: """Updates the world state distribution given the stones perceived. The reward indicator on the stones allows us to limit the possible maps from stone space to latent space. For example if we see a stone with reward 3 then it must be at [1, 1, 1] in latent space. Args: aligned_stone_indices: The stones seen in this trial. """ # The stones seen when we start a new trial could eliminate stone map # possibilities if there are multiple. aligned_stones = [ stones_and_potions.aligned_stone_from_index(aligned_stone_index) for aligned_stone_index in aligned_stone_indices] still_in_s, poss_sms = get_possible_stone_maps( self.stone_map_possible, aligned_stones) assert still_in_s, 'Stones seen in trial are impossible.' self.stone_map_possible = poss_sms self.partial_stone_map_index = ( stones_and_potions.partial_stone_map_from_possibles( self.stone_map_possible).index()) self.poss_world_states = self.poss_world_states[:, still_in_s, :] total_prob = self.poss_world_states.sum() self.poss_world_states /= total_prob def get_possible_graphs(self) -> List[int]: return helpers.bitfield_to_list(self.partial_graph_possible) def potions_equivalent( self, p1: PerceivedPotionIndex, p2: PerceivedPotionIndex, s: AlignedStoneIndex, precomputed: PrecomputedMaps ) -> bool: """If the potions effect on the stone are equivalent in this belief state. This is the case if we have the same knowledge about the potions effect and the same number of these potions and their counterparts on the same dimension. In this case the calculation of the expected reward must be exactly the same. This does not imply that the actual effect of the potions will be the same. For example, if we have no knowledge of the perceptual mapping and graph and we have one red potion and one green potion then the calculation will include terms for the probability that the red potion acts on each of the directed edges in latent space. The calculation for the green potion would have all of the same terms. Args: p1: The first potion. p2: The second potion. s: The stone they will be applied to. precomputed: Precomputed maps used for speed. Returns: True if they are equivalent. """ latent_dims = precomputed.possible_latent_dims[ p1, self.partial_potion_map_index[0]] latent_dims2 = precomputed.possible_latent_dims[ p2, self.partial_potion_map_index[0]] if latent_dims != latent_dims2: return False for latent_dim in latent_dims: this_could_stay_still1, latent_dirs1 = precomputed.possible_latent_dirs[ self.partial_potion_map_index[1], self.partial_stone_map_index, latent_dim, p1, s] this_could_stay_still2, latent_dirs2 = precomputed.possible_latent_dirs[ self.partial_potion_map_index[1], self.partial_stone_map_index, latent_dim, p2, s] if this_could_stay_still1 != this_could_stay_still2: return False if latent_dirs1 != latent_dirs2: return False return True def update_possible( self, stone_index: AlignedStoneIndex, potion_index: PerceivedPotionIndex, result_index: AlignedStoneIndex, precomputed: PrecomputedMaps ) -> None: """Updates which possibilities are consistent with this observation. Args: stone_index: The initial stone. potion_index: The potion applied to the stone. result_index: The resulting stone. precomputed: Precomputed maps used for speed. """ # Update which potion maps are possible poss_p = precomputed.poss_p_maps[stone_index, potion_index, result_index] if poss_p is not None: self.partial_potion_map_index = precomputed.partial_potion_map_update[ stone_index, potion_index, result_index, self.partial_potion_map_index[0], self.partial_potion_map_index[1]] self.potion_map_possible, still_in_p = helpers.sorted_intersection( self.potion_map_possible, poss_p) self.poss_world_states = self.poss_world_states[still_in_p, :, :] # Update which stone maps are possible poss_s = precomputed.poss_s_maps[stone_index, potion_index, result_index] if poss_s is not None: self.partial_stone_map_index = precomputed.partial_stone_map_update[ stone_index, result_index, self.partial_stone_map_index] self.stone_map_possible, still_in_s = helpers.sorted_intersection( self.stone_map_possible, poss_s) self.poss_world_states = self.poss_world_states[:, still_in_s, :] # Update which graphs are possible update_graphs_possible = False if stone_index != result_index: update_graphs_possible = True self.partial_graph_index = precomputed.partial_graph_update[ precomputed.drop_reward[stone_index], precomputed.drop_reward[result_index]][self.partial_graph_index] if not update_graphs_possible: missing_edge = precomputed.missing_edge_no_change[ self.partial_stone_map_index, self.partial_potion_map_index[0], self.partial_potion_map_index[1], potion_index, precomputed.drop_reward[stone_index]] if missing_edge != -1: update_graphs_possible = True self.partial_graph_index = precomputed.update_partial_graph_no_change[ self.partial_graph_index, missing_edge] if update_graphs_possible: new_graphs_possible = precomputed.partial_graph_to_matching_graphs[ self.partial_graph_index] remaining_graphs_possible = (self.partial_graph_possible & new_graphs_possible) # Work out the position of the eliminated slices in poss_world_states. still_in_g = [] ind = 0 for i in range(precomputed.graphs_list.shape[0]): check = 1 << i poss_check = self.partial_graph_possible & check if remaining_graphs_possible & check and poss_check: still_in_g.append(ind) if poss_check: ind += 1 self.partial_graph_possible = remaining_graphs_possible self.poss_world_states = self.poss_world_states[:, :, still_in_g] # If stone map is known then get info about which actions will have no # effect because they take the stone out of the latent cube stone_map_index = precomputed.partial_stone_map_to_stone_map[ self.partial_stone_map_index] if stone_map_index != -1: self.observed_no_effect_bits |= precomputed.no_effect_from_partial_chem[ stone_map_index, self.partial_potion_map_index[0], self.partial_potion_map_index[1]] def possible_outcomes( self, perceived_potion_index: PerceivedPotionIndex, aligned_stone_index: AlignedStoneIndex, precomputed: PrecomputedMaps ) -> Tuple[List[AlignedStoneIndex], bool]: """Gets a list of outcomes we could see applying this potion to this stone. Args: perceived_potion_index: The potion we apply. aligned_stone_index: The stone we apply it to. precomputed: Precomputed maps used for speed. Returns: A list of possible outcomes and a boolean saying whether one of them is the stone remaining the same. """ outcomes = [] could_stay_still = False for latent_dim in precomputed.possible_latent_dims[ perceived_potion_index, self.partial_potion_map_index[0]]: # latent_dir may not be possible if the reward for the stone is already at # max or min. If you know stone position in latent space on latent_dim # then latent_dir can only be the opposite so you don't need to consider # the reward going the other way even if you don't know what direction the # potion acts in. this_could_stay_still, latent_dirs = precomputed.possible_latent_dirs[ self.partial_potion_map_index[1], self.partial_stone_map_index, latent_dim, perceived_potion_index, aligned_stone_index] # Could stay still due to going outside the cube. could_stay_still |= this_could_stay_still for latent_dir in latent_dirs: result = precomputed.react_result[ aligned_stone_index, latent_dim, (latent_dir + 1) // 2, self.partial_graph_index] if result != helpers.IMPOSSIBLE: outcomes.append(result) if latent_dirs: # Could stay still due to edge not existing this_edge_exists = precomputed.edge_exists[ self.partial_graph_index, precomputed.drop_reward[ aligned_stone_index], latent_dim] # If either we know the edge isn't there or we are not sure if the edge # is there then could stay still. if this_edge_exists != graphs.KNOWN_EDGE: could_stay_still = True if could_stay_still: outcomes.append(aligned_stone_index) return outcomes, could_stay_still def action_and_outcome( self, stone_index: AlignedStoneIndex, potion_index: PerceivedPotionIndex, result_index: AlignedStoneIndex, precomputed: PrecomputedMaps, bit_mask: int ) -> float: """Updates the world state distribution given we saw this observation.""" # Eliminate whole slices of the world state distribution if possible given # the new information. self.update_possible(stone_index, potion_index, result_index, precomputed) # If the stone changed as a result of applying the potion then all # information gained removes whole slices otherwise we must remove # combinations of potion map, stone map and graph. if stone_index == result_index: for potion_i, potion_map_index in enumerate(self.potion_map_possible): for stone_i, stone_map_index in enumerate(self.stone_map_possible): graphs_bitfield = precomputed.graphs_with_edge[ stone_map_index, potion_map_index, stone_index, potion_index] # If there are no graphs with an edge between the stone and result # then continue. if graphs_bitfield == 0: continue # Graphs in the list are not possible because they contain the edge so # the stone should have changed but didn't. not_possible = [] ind = 0 for i in range(precomputed.graphs_list.shape[0]): check = 1 << i poss_check = self.partial_graph_possible & check if graphs_bitfield & check and poss_check: not_possible.append(ind) if poss_check: ind += 1 self.poss_world_states[potion_i, stone_i, not_possible] = 0.0 self.observed_no_effect_bits |= bit_mask # Re-normalise total_prob = self.poss_world_states.sum() if total_prob > 0.0: self.poss_world_states /= total_prob return total_prob def __len__(self): return len(self.poss_world_states) def update_stone_map(self, new_to_old: StoneMap) -> None: """If we assumed the wrong rotation we may need to swap stone map dims.""" # Change partial stone map. partial_stone_map = stones_and_potions.partial_stone_map_from_index( self.partial_stone_map_index) # If the partial stone map is not completely known at this point then it is # completely unknown since any 2 bits of information would be enough to # completely determine the rotation. if any(c == types_helpers.UNKNOWN for c in partial_stone_map.latent_pos_dir): assert all(c == types_helpers.UNKNOWN for c in partial_stone_map.latent_pos_dir) else: partial_stone_map.chain(new_to_old) self.partial_stone_map_index = partial_stone_map.index() # Change poss stone maps. old_stone_map_possible = copy.deepcopy(self.stone_map_possible) old_stone_map_to_new_stone_map = {} for stone_map_index in self.stone_map_possible: stone_map = stones_and_potions.stone_map_from_index(stone_map_index) stone_map.chain(new_to_old) old_stone_map_to_new_stone_map[stone_map_index] = stone_map.index() self.stone_map_possible = sorted( old_stone_map_to_new_stone_map[stone_map_index] for stone_map_index in self.stone_map_possible) new_stone_map_to_index = { stone_map_index: i for i, stone_map_index in enumerate(self.stone_map_possible)} old_index_to_new_index = [ new_stone_map_to_index[old_stone_map_to_new_stone_map[stone_map]] for stone_map in old_stone_map_possible] # Change poss world states. old_poss_world_states = copy.deepcopy(self.poss_world_states) for old_index, new_index in enumerate(old_index_to_new_index): self.poss_world_states[new_index] = old_poss_world_states[old_index] # Change observed no effect. old_observed_no_effect_bits = copy.deepcopy(self.observed_no_effect_bits) self.observed_no_effect_bits = 0 for old_index in range(stones_and_potions.AlignedStone.num_dir_assignments): aligned_stone = stones_and_potions.aligned_stone_from_index( AlignedStoneIndex(old_index)) new_index = new_to_old.apply(aligned_stone).index() for potion_index in range(stones_and_potions.PerceivedPotion.num_types): old_mask = 1 << (old_index * PerceivedPotion.num_types) + potion_index masked = old_observed_no_effect_bits & old_mask if masked: new_mask = 1 << (new_index * PerceivedPotion.num_types) + potion_index self.observed_no_effect_bits |= new_mask def init_world_state_distribution( precomputed: PrecomputedMaps ) -> WorldStateDistribution: """Creates an initial world state distribution from observed stones.""" # Initialise the ideal observer based on the stones and potions you can see return WorldStateDistribution( stones_and_potions.stone_map_distr(precomputed.stone_maps), stones_and_potions.potion_map_distr(precomputed.potion_maps), precomputed) def stone_potion_bit_mask( stone_index: AlignedStoneIndex, potion_index: PerceivedPotionIndex, precomputed: PrecomputedMaps ) -> int: """Returns a mask for the bit representing a stone potion pair.""" stone_part = precomputed.drop_reward[stone_index] * PerceivedPotion.num_types return 1 << (stone_part + potion_index) class BeliefState: """Belief the ideal observer has about stones, potions, world and reward. The belief state consists of a set of perceived stones, a set of perceived potions, a distribution over world states, and a reward so far. """ possible_partial_graph_num_bits = None def __init__(self, precomputed: PrecomputedMaps): # These should be set by calling new_trial self.aligned_stones: Counter[AlignedStoneIndex] = collections.Counter() self.perceived_potions: Counter[PerceivedPotionIndex] = ( collections.Counter()) self.world_state_distribution = init_world_state_distribution(precomputed) def representative_potions( self, stone_index: AlignedStoneIndex, precomputed: PrecomputedMaps ) -> List[PerceivedPotionIndex]: """Gets a representative set of potions for this stone and belief state. Some potions will be equivalent if we don't know what they do and we have the same number of them and their counterparts on the same perceptual dimension. For each equivalence set we return one potion as a representative of the set. Args: stone_index: The stone to apply potions to. precomputed: Precomputed maps used for speed. Returns: A representative set of potions. """ potion_to_count = [0 for _ in range(PerceivedPotion.num_types)] for p1 in self.perceived_potions: p2 = precomputed.potion_to_pair[p1] c1 = self.perceived_potions[p1] c2 = self.perceived_potions[p2] potion_to_count[p1] = (c1, c2) representative_potions = [] for p2 in self.perceived_potions: equiv = False for p1 in representative_potions: b1 = (self.world_state_distribution.observed_no_effect_bits & precomputed.potion_masks[p1]) >> p1 b2 = (self.world_state_distribution.observed_no_effect_bits & precomputed.potion_masks[p2]) >> p2 if (potion_to_count[p1] == potion_to_count[p2] and (b1 == b2) and self.world_state_distribution.potions_equivalent( p1, p2, stone_index, precomputed)): equiv = True break if not equiv: representative_potions.append(p2) return representative_potions def _remove_stone(self, stone_index: AlignedStoneIndex) -> None: if stone_index in self.aligned_stones: self.aligned_stones[stone_index] -= 1 if self.aligned_stones[stone_index] == 0: del self.aligned_stones[stone_index] def _add_stone(self, stone_index: AlignedStoneIndex) -> None: self.aligned_stones.update([stone_index]) def _remove_potion(self, potion_index: PerceivedPotionIndex) -> None: self.perceived_potions.subtract([potion_index]) if self.perceived_potions[potion_index] == 0: del self.perceived_potions[potion_index] def possible_actions( self, precomputed: PrecomputedMaps ) -> List[Tuple[AlignedStoneIndex, PerceivedPotionIndex]]: """Gets representative list of possible actions which have an effect.""" # Use -1, -1 to represent ending and putting all stones in the cauldron or # throwing them away. If we consider this action first then in the event # that it has the same expected reward as using a potion we will take this # action instead. This gives more intuitive behaviour, for example if we # have the best stone we won't transform it to something less good and then # transform it back. poss_actions = [(AlignedStoneIndex(END_TRIAL), PerceivedPotionIndex(END_TRIAL))] for s in self.aligned_stones: # Don't consider potions if we have observed that they have no effect. potions = [p for p in self.representative_potions(s, precomputed) if not (self.world_state_distribution.observed_no_effect_bits & stone_potion_bit_mask(s, p, precomputed))] poss_actions.extend([(s, p) for p in potions]) return poss_actions def use_potion( self, stone_index: AlignedStoneIndex, potion_index: PerceivedPotionIndex, result_index: AlignedStoneIndex ) -> None: """Uses the potion on the current stone. Args: stone_index: The stone used in the potion. potion_index: The potion applied to the stone. result_index: The result observed. """ # Remove the used potion self._remove_potion(potion_index) # If the stone has not changed then we don't need to do anything else. if stone_index == result_index: return # Remove the initial stone and replace with the result self._remove_stone(stone_index) self._add_stone(result_index) def action_and_outcome( self, stone_index: AlignedStoneIndex, potion_index: PerceivedPotionIndex, result_index: AlignedStoneIndex, precomputed: PrecomputedMaps, bit_mask: int ) -> float: """Updates the belief state given the action and observation. Args: stone_index: The stone used in the potion. potion_index: The potion in which the stone is used. result_index: The resulting stone. precomputed: Precomputed maps used for speed. bit_mask: Mask on observed no effect for the stone and potion passed. Returns: The probability given our prior belief state of this observation. """ self.use_potion(stone_index, potion_index, result_index) # Update the world state distribution total_prob = self.world_state_distribution.action_and_outcome( stone_index, potion_index, result_index, precomputed, bit_mask) return total_prob def new_trial( self, aligned_stones: Counter[AlignedStoneIndex], perceived_potions: Counter[PerceivedPotionIndex] ) -> None: self.aligned_stones = copy.deepcopy(aligned_stones) self.perceived_potions = copy.deepcopy(perceived_potions) self.world_state_distribution.new_trial(aligned_stones) def to_bitfield(self) -> int: """Converts to a bitfield to cache results.""" def perceived_potions_to_bits( perceived_potions: Mapping[PerceivedPotionIndex, int] ) -> Tuple[int, int]: """Converts the set of perceived potions to a bitfield.""" local_int_rep = 0 for potion_type, count in perceived_potions.items(): local_int_rep |= (count << ( PerceivedPotion.count_num_bits * potion_type)) if count > PerceivedPotion.max_present: raise ValueError('Too many potions present.') if potion_type >= PerceivedPotion.num_types: raise ValueError('Invalid potion type.') return local_int_rep, ( PerceivedPotion.num_types * PerceivedPotion.count_num_bits) def aligned_stones_to_bits( aligned_stones: Mapping[AlignedStoneIndex, int] ) -> Tuple[int, int]: """Converts the set of perceived stones to a bitfield.""" local_int_rep = 0 stone_number = 0 for stone_type, count in sorted(aligned_stones.items()): for _ in range(count): local_int_rep |= (stone_type << ( AlignedStone.num_bits * stone_number)) stone_number += 1 if stone_type >= stones_and_potions.AlignedStone.num_types: raise ValueError('Invalid stone type') if stone_number > AlignedStone.max_present: raise ValueError('Too many stones present.') return local_int_rep, AlignedStone.max_present * AlignedStone.num_bits all_things = [ perceived_potions_to_bits(self.perceived_potions), aligned_stones_to_bits(self.aligned_stones), (self.world_state_distribution.observed_no_effect_bits, LatentStone.num_types * PerceivedPotion.num_types), (self.world_state_distribution.partial_potion_map_index[0], PartialPotionMap.num_bits_axis), (self.world_state_distribution.partial_potion_map_index[1], PartialPotionMap.num_bits_dir), (self.world_state_distribution.partial_stone_map_index, PartialStoneMap.num_bits), (self.world_state_distribution.partial_graph_index, BeliefState.possible_partial_graph_num_bits) ] return helpers.pack_to_bitfield(all_things) def __repr__(self) -> str: # Convert the observed_no_effect bitfield to a matrix before printing. observed_no_effect = np.zeros( (stones_and_potions.LatentStone.num_types, stones_and_potions.PerceivedPotion.num_types)) for pe_st in range(stones_and_potions.LatentStone.num_types): for pe_po in range(stones_and_potions.PerceivedPotion.num_types): bit_num = (pe_st * stones_and_potions.PerceivedPotion.num_types) + pe_po observed_no_effect[pe_st, pe_po] = ( self.world_state_distribution.observed_no_effect_bits & (1 << bit_num)) return ( 'BeliefState(aligned_stones={aligned_stones}, ' 'perceived_potions={perceived_potions}, ' 'observed_no_effect={observed_no_effect}, ' 'poss_world_states={poss_world_states}, ' 'partial_potion_map_index={partial_potion_map_index}, ' 'partial_stone_map_index={partial_stone_map_index}, ' 'partial_graph_index={partial_graph_index}, ' 'partial_graph_possible={partial_graph_possible}, ' 'stone_map_possible={stone_map_possible}, ' 'potion_map_possible={potion_map_possible}, ' 'num_world_states = {num_world_states}, '.format( aligned_stones=self.aligned_stones, perceived_potions=self.perceived_potions, observed_no_effect=types_helpers.str_np_array_construct( observed_no_effect), poss_world_states=types_helpers.str_np_array_construct( self.world_state_distribution.poss_world_states), partial_potion_map_index=( self.world_state_distribution.partial_potion_map_index), partial_stone_map_index=( self.world_state_distribution.partial_stone_map_index), partial_graph_index=( self.world_state_distribution.partial_graph_index), partial_graph_possible=( self.world_state_distribution.partial_graph_possible), stone_map_possible=self.world_state_distribution.stone_map_possible, potion_map_possible=( self.world_state_distribution.potion_map_possible), num_world_states=len(self.world_state_distribution))) def update_stone_map( self, new_to_old: StoneMap ) -> None: """If we assumed the wrong rotation we may need to swap stone map dims.""" # Change poss stone maps. old_aligned_stones = { stones_and_potions.aligned_stone_from_index(stone): count for stone, count in self.aligned_stones.items()} new_aligned_stones = collections.Counter({ AlignedStone(stone.reward, new_to_old.apply( stone).latent_coords).index(): count for stone, count in old_aligned_stones.items()}) self.aligned_stones = new_aligned_stones self.world_state_distribution.update_stone_map(new_to_old) @property def num_world_states(self) -> int: return np.where(self.world_state_distribution.poss_world_states)[0].size @property def num_potion_maps(self) -> int: return len(self.world_state_distribution.potion_map_possible) @property def num_stone_maps(self) -> int: return len(self.world_state_distribution.stone_map_possible) @property def num_graphs(self) -> int: return len(self.world_state_distribution.get_possible_graphs()) def partial_potion_map( self, index_to_perm_index: np.ndarray ) -> PartialPotionMap: return stones_and_potions.partial_potion_map_from_index( self.world_state_distribution.partial_potion_map_index, index_to_perm_index) def partial_stone_map(self) -> PartialStoneMap: return stones_and_potions.partial_stone_map_from_index( self.world_state_distribution.partial_stone_map_index) def partial_graph( self, possible_partial_graph_indices: np.ndarray ) -> PartialGraph: return graphs.partial_graph_from_index( possible_partial_graph_indices[ self.world_state_distribution.partial_graph_index]) class BeliefStateWithRotation: """Belief state over chem including rotations.""" def __init__(self, precomputed: precomputed_maps.PrecomputedMaps): self.belief_state = BeliefState(precomputed) self.possible_rotations = stones_and_potions.possible_rotations() self.rotation = None # We need to know 1 stone which is consistent with the selected rotation. self._observed_stone = None self._rotation_to_angles = ( lambda rotation: tuple(stones_and_potions.rotation_to_angles(rotation))) stone_map_indices = [ sm.index() for sm in stones_and_potions.possible_stone_maps()] self._stone_maps_for_rotation = { self._rotation_to_angles(rotation): copy.deepcopy(stone_map_indices) for rotation in stones_and_potions.possible_rotations()} def _update_given_stones( self, perceived_stones: Sequence[PerceivedStone] ) -> None: """Updates the possible rotations and belief state given observed stones.""" if not perceived_stones: raise ValueError( 'Must pass perceived stones to update possible rotations.') # Given the stones we see can we eliminate some possible rotations. valid_rotations = [] for rotation in self.possible_rotations: # For a rotation to be possible all stones have to go to corners of the # cube and the change in latent variables has to be consistent with the # change in reward (i.e. at least one stone map gives the observed # rewards). aligned_stones = [] rotation_valid = True for stone in perceived_stones: valid, coords = stones_and_potions.aligns(stone, rotation) if valid: aligned_stones.append(stones_and_potions.aligned_stone_from_coords( coords, stone.reward)) else: rotation_valid = False break if rotation_valid: stone_maps = self._stone_maps_for_rotation[self._rotation_to_angles( rotation)] _, possible_stone_maps = get_possible_stone_maps( stone_maps, aligned_stones) self._stone_maps_for_rotation[self._rotation_to_angles( rotation)] = possible_stone_maps if possible_stone_maps: valid_rotations.append(rotation) assert valid_rotations, 'No rotation is valid.' self.possible_rotations = valid_rotations if self.rotation is None: self.rotation = self.possible_rotations[0] self._observed_stone = stones_and_potions.align( perceived_stones[0], self.rotation) elif not stones_and_potions.rotations_equal( self.rotation, self.possible_rotations[0]): new_to_old = stones_and_potions.get_new_mapping_to_old_mapping( self.rotation, self.possible_rotations[0], self._observed_stone) self.belief_state.update_stone_map(new_to_old) self.rotation = self.possible_rotations[0] self._observed_stone = stones_and_potions.align( perceived_stones[0], self.rotation) def new_trial( self, perceived_stones: Counter[PerceivedStone], perceived_potions: Counter[PerceivedPotion] ) -> None: """Updates belief state given that new trial has started.""" self._update_given_stones(list(perceived_stones.keys())) aligned_stones = collections.Counter( {stones_and_potions.align(stone, self.rotation).index(): count for stone, count in perceived_stones.items()}) perceived_potion_indices = collections.Counter( {potion.index(): count for potion, count in perceived_potions.items()}) self.belief_state.new_trial(aligned_stones, perceived_potion_indices) def action_and_outcome( self, stone: PerceivedStone, potion: PerceivedPotion, result: PerceivedStone, precomputed: PrecomputedMaps ) -> float: self._update_given_stones([result]) stone_index = stones_and_potions.align(stone, self.rotation).index() result_index = stones_and_potions.align(result, self.rotation).index() bit_mask = stone_potion_bit_mask(stone_index, potion.index(), precomputed) return self.belief_state.action_and_outcome( stone_index, potion.index(), result_index, precomputed, bit_mask) @property def num_world_states(self) -> int: return self.belief_state.num_world_states @property def num_potion_maps(self) -> int: return self.belief_state.num_potion_maps @property def num_stone_maps(self) -> int: return self.belief_state.num_stone_maps @property def num_graphs(self) -> int: return self.belief_state.num_graphs def partial_potion_map( self, index_to_perm_index: np.ndarray ) -> PartialPotionMap: return self.belief_state.partial_potion_map(index_to_perm_index) def partial_stone_map(self) -> PartialStoneMap: return self.belief_state.partial_stone_map() def partial_graph( self, possible_partial_graph_indices: np.ndarray ) -> PartialGraph: return self.belief_state.partial_graph(possible_partial_graph_indices) def search( belief_state: BeliefState, search_results: SearchResults, bonus: int, precomputed: PrecomputedMaps, depth: int = 0, minimise_world_states: bool = False ) -> ActionObjective: """Searches iteratively over actions and outcomes to find expected reward. Conducts a depth first search over the DAG of available actions and the possible outcomes. The reward for a latent stone is assumed to be the sum of the latent values plus the passed bonus if all latent values are positive. We do not deal with reward functions with arbitrary coefficient vectors and offsets. Args: belief_state: The current belief state. search_results: A cache of previously computed results mapping the belief state as a bitfield to the best action and expected reward. bonus: The extra reward we get by reaching the stone of reward 3. precomputed: Precomputed maps used for speed. depth: Number of actions taken in this search to reach this belief state. minimise_world_states: Let the objective be to minimise the number of world states at the end of the trial instead of to maximise the accumulated reward. Actions selected will not necessarily produce reward but will narrow down the possible chemistries, e.g. given a maximum value stone on the first trial and a potion we would use the potion to find out the effect even though it could reduce the value of the stone. Returns: The best action and maximum expected reward achievable from this state. """ # If we have searched from this belief state before return the cached result. belief_state_bitfield = belief_state.to_bitfield() if belief_state_bitfield in search_results: return search_results[belief_state_bitfield] # For all possible actions consider all possible outcomes and then search from # the outcome. action_rewards = {} for stone_index, potion_index in belief_state.possible_actions(precomputed): # This action means use or discard all stones. if potion_index == END_TRIAL: # Ending the trial so get the actual number of world states if minimise_world_states: expected_num_world_states = np.where( belief_state.world_state_distribution.poss_world_states)[0].size else: raw_reward_count = [(precomputed.stone_to_reward[s], c) for s, c in belief_state.aligned_stones.items()] reward_per_stone_type = [ 0.0 if reward < 0 else c * (reward + bonus) if reward == stones_and_potions.max_reward() else c * reward for reward, c in raw_reward_count] action_reward = sum(reward_per_stone_type) best_action_depth = depth else: bit_mask = stone_potion_bit_mask(stone_index, potion_index, precomputed) if belief_state.world_state_distribution.observed_no_effect_bits & bit_mask: continue # Using a potion on a stone could lead to a number of possible outcomes # with various probabilities. The ideal observer must calculate what # reward it can expect to obtain for each of these. if minimise_world_states: # The expected number of world states when the trial ends expected_num_world_states = 0 else: action_reward = 0.0 best_action_depth = 0.0 poss_outcomes, could_stay_still = ( belief_state.world_state_distribution.possible_outcomes( potion_index, stone_index, precomputed)) if len(poss_outcomes) == 1: # If there is only one possibility and it is staying still then there is # no need to search this action as it will have no effect. if could_stay_still: continue new_game_state = copy.deepcopy(belief_state) new_game_state.use_potion(stone_index, potion_index, poss_outcomes[0]) _, objective = search( new_game_state, search_results, bonus, precomputed, depth + 1, minimise_world_states) if minimise_world_states: expected_num_world_states -= objective else: search_reward, neg_search_action_depth = objective action_reward += search_reward best_action_depth -= neg_search_action_depth else: for outcome in poss_outcomes: new_game_state = copy.deepcopy(belief_state) prob = new_game_state.action_and_outcome( stone_index, potion_index, outcome, precomputed, bit_mask) if prob > 0: _, objective = search( new_game_state, search_results, bonus, precomputed, depth + 1, minimise_world_states) if minimise_world_states: expected_num_world_states -= prob * objective else: search_reward, neg_search_action_depth = objective action_reward += prob * search_reward best_action_depth -= prob * neg_search_action_depth # Store the expected reward and the negative search depth so that when we # maximise reward, if 2 actions have the same expected reward then we will # take the action with the minimum search depth. This prevents us taking # actions which do not harm our expected reward but do not help us. if minimise_world_states: action_rewards[ (stone_index, potion_index)] = -expected_num_world_states else: action_rewards[(stone_index, potion_index)] = ( action_reward, -best_action_depth) result = max(action_rewards.items(), key=lambda a: a[1]) search_results[belief_state_bitfield] = result return result def ideal_observer( init_game_state: BeliefState, search_results: SearchResults, bonus: int, precomputed: PrecomputedMaps, minimise_world_states: bool ) -> ActionObjectiveAndSearchResults: """Runs the ideal observer given a set of stones and potions. This runs an exhaustive search from the initial state over possible actions and possible outcomes of those actions. It returns which action to take, the expected reward and a set of search results for belief states encountered. Args: init_game_state: The initial belief state of the system. search_results: Previously computed action and reward for belief states. bonus: The additional reward for getting the best stone. precomputed: Precomputed maps used for speed. minimise_world_states: Let the objective be to minimise the number of world states at the end of the trial instead of to maximise the accumulated reward. Returns: The best action to take, the expected reward and a set of search results for belief states encountered. """ # Set the number of bits required to fit all possible partial graph indices. BeliefState.possible_partial_graph_num_bits = math.ceil(math.log2( len(precomputed.partial_graph_index_to_possible_index))) # Run the search over all possible next actions action, objective = search( init_game_state, search_results, bonus, precomputed, minimise_world_states=minimise_world_states) return action, objective, search_results
dm_alchemy-master
dm_alchemy/ideal_observer/ideal_observer.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for the ideal observer.""" import collections import functools from absl.testing import absltest from absl.testing import parameterized from dm_alchemy import event_tracker as ev_trk from dm_alchemy import symbolic_alchemy_bots from dm_alchemy import symbolic_alchemy_trackers from dm_alchemy.ideal_observer import ideal_observer from dm_alchemy.ideal_observer import precomputed_maps from dm_alchemy.types import graphs from dm_alchemy.types import stones_and_potions from dm_alchemy.types import utils import numpy as np Counter = collections.Counter AlignedStone = stones_and_potions.AlignedStone PerceivedPotion = stones_and_potions.PerceivedPotion PotionMap = stones_and_potions.PotionMap StoneMap = stones_and_potions.StoneMap Stone = stones_and_potions.Stone Potion = stones_and_potions.Potion AddMatrixEventTracker = symbolic_alchemy_trackers.AddMatrixEventTracker ScoreTracker = symbolic_alchemy_trackers.ScoreTracker BeliefStateTracker = symbolic_alchemy_trackers.BeliefStateTracker _ALL_FIXED_POTION_MAP = stones_and_potions.all_fixed_potion_map() _ALL_FIXED_STONE_MAP = stones_and_potions.all_fixed_stone_map() _ALL_FIXED_GRAPH = graphs.create_graph_from_constraint( graphs.no_bottleneck_constraints()[0]) _BOTTLENECK1_CONSTRAINT = graphs.bottleneck1_constraints()[0] _BOTTLENECK1_GRAPH = graphs.create_graph_from_constraint( _BOTTLENECK1_CONSTRAINT) def add_trackers_to_env(env, reward_weights, precomputed, init_belief_state): env.add_trackers({ AddMatrixEventTracker.NAME: AddMatrixEventTracker(), ScoreTracker.NAME: ScoreTracker(reward_weights), BeliefStateTracker.NAME: BeliefStateTracker( precomputed, env, init_belief_state)}) class IdealObserverTest(parameterized.TestCase): level_name_to_precomputed = None @classmethod def setUpClass(cls): super(IdealObserverTest, cls).setUpClass() # Load all of the precomputed maps required for all tests otherwise loading # them in the tests can take too long and cause the test to time out. levels_used = ['all_fixed_bottleneck1', 'perceptual_mapping_randomized', 'perceptual_mapping_randomized_with_random_bottleneck'] cls.level_name_to_precomputed = { s: precomputed_maps.load_from_level_name(s) for s in levels_used} @parameterized.parameters( # 1 stone worth 3, no potions - reward should be 3 (Counter({AlignedStone(3, np.array([1, 1, 1])): 1}), Counter(), 3), # 1 stone worth 1, no potions - reward should be 1 (Counter({AlignedStone(1, np.array([1, 1, 1])): 1}), Counter(), 1), # 1 stone worth -1, no potions - reward should be 0 (since we can choose # not to put the stone in) (Counter({AlignedStone(-1, np.array([1, 1, 1])): 1}), Counter(), 0), # 1 stone worth 3, any number of potions - reward should be 3 (Counter({AlignedStone(3, np.array([1, 1, 1])): 1}), Counter({PerceivedPotion(0, 1): 1}), 3), (Counter({AlignedStone(3, np.array([1, 1, 1])): 1}), Counter({PerceivedPotion(0, 1): 1, PerceivedPotion(0, -1): 1}), 3), # If graph has no bottlenecks and we start with a stone worth 1 and # potions which go in opposite directions by using a potion we get # expected reward of 4/3 since: # 1/6 prob of going to 3 then cashing in - contributes 1/2 # 1/3 prob of going to -1 then we can definitely get back to 1 using the # other potion - contributes 1/3 # 1/2 prob of staying at 1 then it is best not to use the other potion - # contributes 1/2 (Counter({AlignedStone(1, np.array([1, 1, 1])): 1}), Counter({PerceivedPotion(0, 1): 1, PerceivedPotion(0, -1): 1}), 1.33333333, 0, 'perceptual_mapping_randomized'), # If graph has no bottlenecks and we start with a stone worth -1 and # potions on different axes then by using a potion we get # expected reward of 0.583333 since: # 1/6 prob of going to -3 - contributes 0 # 1/3 prob of going to 1 then if we use the other potion we know it # applies to a different axis so: # 1/4 prob of going to 3 - contributes (1/3) * (1/4) * 3 = 1/4 # 1/4 prob of going to -1 - contributes 0 # 1/2 prob of staying at 1 - contributes (1/3) * (1/2) * 1 = 1/6 # 1/2 prob of staying at -1 then the other potion could apply to any axis # so it gives: # 1/6 prob of going to -3 - contributes 0 # 1/3 prob of going to 1 - contributes (1/2) * (1/3) * 1 = 1/6 # 1/2 prob of staying at -1 - contributes 0 (Counter({AlignedStone(-1, np.array([1, 1, 1])): 1}), Counter({PerceivedPotion(0, 1): 1, PerceivedPotion(1, 1): 1}), 0.583333, 0, 'perceptual_mapping_randomized') ) def test_expected_reward( self, aligned_stones, perceived_potions, expected_expected_reward, bonus=0, level_name='perceptual_mapping_randomized_with_random_bottleneck' ): precomputed = IdealObserverTest.level_name_to_precomputed[level_name] # Make an initial game state using first trial information. current_game_state = ideal_observer.BeliefState(precomputed) # Start with no search results. search_results = {} aligned_stones_ind = collections.Counter({ k.index(): v for k, v in aligned_stones.items()}) perceived_potions_ind = collections.Counter({ k.index(): v for k, v in perceived_potions.items()}) current_game_state.new_trial(aligned_stones_ind, perceived_potions_ind) _, objective, _ = ideal_observer.ideal_observer( current_game_state, search_results, bonus, precomputed, False) expected_reward, _ = objective self.assertAlmostEqual(expected_reward, expected_expected_reward, 4) @parameterized.parameters( # In the first trial we have 1 stone worth -3 and the 3 potions required # to get it to 3. In the second trial we have a stone at 1 and 1 correct # potion and some potions which will take it away from the 3. It should # take the knowledge gained from trial 1 and apply it to trial 2. {'perceived_items': utils.EpisodeItems( stones=[[Stone(0, [-1, -1, -1])], [Stone(0, [1, 1, -1])]], potions=[[Potion(0, 0, 1), Potion(1, 1, 1), Potion(2, 2, 1)], [Potion(0, 0, -1), Potion(1, 1, -1), Potion(2, 2, 1)]]), 'potion_map': _ALL_FIXED_POTION_MAP, 'stone_map': _ALL_FIXED_STONE_MAP, 'graph': _ALL_FIXED_GRAPH, 'expected_rewards': [15, 15]}, {'perceived_items': utils.EpisodeItems( stones=[[Stone(0, [-1, -1, -1])], [Stone(0, [1, -1, 1])]], potions=[[Potion(0, 0, 1), Potion(1, 1, 1), Potion(2, 2, 1)], [Potion(0, 0, -1), Potion(1, 1, -1), Potion(2, 2, 1)]]), 'potion_map': _ALL_FIXED_POTION_MAP, 'stone_map': _ALL_FIXED_STONE_MAP, 'graph': _ALL_FIXED_GRAPH, 'expected_rewards': [15, 1]}, {'perceived_items': utils.EpisodeItems( stones=[ [Stone(0, [-1, -1, -1])], [Stone(0, [1, -1, 1])], [Stone(0, [-1, -1, -1])], [Stone(0, [1, 1, 1])], [Stone(0, [1, 1, -1])], [Stone(0, [1, 1, -1])], [Stone(0, [1, -1, -1])], [Stone(0, [1, -1, -1])], [Stone(0, [1, -1, -1])], [Stone(0, [-1, -1, 1])]], potions=[ [Potion(0, 0, -1), Potion(1, 1, -1), Potion(2, 0, 1), Potion(3, 2, -1), Potion(4, 0, 1), Potion(5, 0, -1)], [Potion(0, 0, 1), Potion(1, 0, -1), Potion(2, 0, -1), Potion(3, 0, -1), Potion(4, 0, 1), Potion(5, 2, -1)], [Potion(0, 0, -1), Potion(1, 1, 1), Potion(2, 2, -1), Potion(3, 1, -1), Potion(4, 2, -1), Potion(5, 2, -1)], [Potion(0, 1, -1), Potion(1, 0, -1), Potion(2, 1, -1), Potion(3, 1, -1), Potion(4, 0, -1), Potion(5, 0, -1)], [Potion(0, 1, 1), Potion(1, 0, -1), Potion(2, 1, -1), Potion(3, 2, 1), Potion(4, 1, 1), Potion(5, 0, -1)], [Potion(0, 2, -1), Potion(1, 1, 1), Potion(2, 2, 1), Potion(3, 0, -1), Potion(4, 0, -1), Potion(5, 0, 1)], [Potion(0, 0, 1), Potion(1, 0, 1), Potion(2, 0, -1), Potion(3, 2, -1), Potion(4, 2, -1), Potion(5, 1, 1)], [Potion(0, 2, -1), Potion(1, 1, 1), Potion(2, 1, -1), Potion(3, 0, 1), Potion(4, 2, -1), Potion(5, 2, -1)], [Potion(0, 2, 1), Potion(1, 2, -1), Potion(2, 2, -1), Potion(3, 2, 1), Potion(4, 1, -1), Potion(5, 1, -1)], [Potion(0, 1, -1), Potion(1, 0, -1), Potion(2, 2, 1), Potion(3, 2, 1), Potion(4, 2, -1), Potion(5, 1, 1)]]), 'potion_map': _ALL_FIXED_POTION_MAP, 'stone_map': _ALL_FIXED_STONE_MAP, 'graph': _ALL_FIXED_GRAPH, 'expected_rewards': [0, 0, 0, 15, 15, 15, 1, 1, 1, 1]}, # This is a case that the oracle fails because it has to use a long path # instead of a short one. {'perceived_items': utils.EpisodeItems( stones=[[Stone(0, [-1, -1, -1]), Stone(1, [1, 1, -1])]], potions=[[Potion(0, 0, 1), Potion(1, 1, 1), Potion(2, 2, 1)]]), 'potion_map': _ALL_FIXED_POTION_MAP, 'stone_map': _ALL_FIXED_STONE_MAP, 'graph': _BOTTLENECK1_GRAPH, 'expected_rewards': [16], 'bonus': 12, # Give it the exact constraint, stone map and potion map so it can run # as the oracle. 'level_name': 'all_fixed_bottleneck1'}, # This is a case where without sorting on search depth for actions of # equal reward, the search_oracle takes the longer path. {'perceived_items': utils.EpisodeItems( stones=[[Stone(0, [-1, -1, -1]), Stone(1, [-1, -1, 1])]], potions=[[Potion(0, 0, -1), Potion(1, 0, 1), Potion(2, 2, 1)]]), 'potion_map': _ALL_FIXED_POTION_MAP, 'stone_map': _ALL_FIXED_STONE_MAP, 'graph': _BOTTLENECK1_GRAPH, 'expected_rewards': [1], 'bonus': 12, # Give it the exact constraint, stone map and potion map so it can run # as the oracle. 'level_name': 'all_fixed_bottleneck1', # We expect the only events to be applying potion 1 to stone 1 and then # putting it into the cauldron. 'expected_events': [ev_trk.OrderedEvents([ # First put stone 1 into potion 1 ev_trk.SingleEvent(1, {1}), # Then put stone 1 into the cauldron ev_trk.SingleEvent(1, {-1}) ])], 'non_events': [ev_trk.AnyOrderEvents({ # Stone 0 should not be put into any potions or the cauldron. ev_trk.SingleEvent(0, {0, 1, 2, -1}), # Stone 1 should not be put into any other potions. ev_trk.SingleEvent(1, {0, 2}), })]}, # Tests for the ideal explorer. # Ideal observer wouldn't use the potion as it minimises search depth if # it cannot get reward but ideal explorer will use it to minimise num # world states. {'minimise_world_states': False, 'perceived_items': utils.EpisodeItems( stones=[[Stone(0, [-1, -1, -1])]], potions=[[Potion(0, 0, 1)]]), 'potion_map': _ALL_FIXED_POTION_MAP, 'stone_map': _ALL_FIXED_STONE_MAP, 'graph': _BOTTLENECK1_GRAPH, 'expected_rewards': [0], 'bonus': 12, 'non_events': [ # Do not put stone 0 into potion 0 as we are minimising search depth. ev_trk.SingleEvent(0, {0}), ]}, {'minimise_world_states': True, 'perceived_items': utils.EpisodeItems( stones=[[Stone(0, [-1, -1, -1])]], potions=[[Potion(0, 0, 1)]]), 'potion_map': _ALL_FIXED_POTION_MAP, 'stone_map': _ALL_FIXED_STONE_MAP, 'graph': _BOTTLENECK1_GRAPH, 'expected_rewards': [0], 'bonus': 12, 'expected_events': [ # Put stone 0 into potion 0 to reduce num world states. ev_trk.SingleEvent(0, {0}), ]}, ) def test_multiple_trials( self, perceived_items, potion_map, stone_map, graph, expected_rewards, bonus=12, level_name='perceptual_mapping_randomized_with_random_bottleneck', expected_events=None, non_events=None, minimise_world_states=False): precomputed = IdealObserverTest.level_name_to_precomputed[level_name] reward_weights = stones_and_potions.RewardWeights([1, 1, 1], 0, bonus) symbolic_bot_trackers_from_env = functools.partial( add_trackers_to_env, reward_weights=reward_weights, precomputed=precomputed, init_belief_state=None) results = symbolic_alchemy_bots.get_multi_trial_ideal_observer_reward( perceived_items, utils.Chemistry(potion_map, stone_map, graph, np.eye(3)), reward_weights, precomputed, minimise_world_states, symbolic_bot_trackers_from_env) per_trial = results['score']['per_trial'] event_trackers = results['matrix_event']['event_tracker'] self.assertLen(per_trial, len(expected_rewards)) for trial_reward, expected_trial_reward in zip(per_trial, expected_rewards): self.assertEqual(trial_reward, expected_trial_reward, 4) if expected_events is not None: self.assertLen(event_trackers, len(expected_events)) for event_tracker, expected_event in zip(event_trackers, expected_events): self.assertTrue(expected_event.occurs(event_tracker.events)) if non_events is not None: self.assertLen(event_trackers, len(non_events)) for event_tracker, non_event in zip(event_trackers, non_events): self.assertFalse(non_event.occurs(event_tracker.events)) if __name__ == '__main__': absltest.main()
dm_alchemy-master
dm_alchemy/ideal_observer/ideal_observer_test.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for the precomputed maps used in the ideal observer.""" from absl.testing import absltest from absl.testing import parameterized from dm_alchemy.ideal_observer import precomputed_maps from dm_alchemy.types import helpers from dm_alchemy.types import stones_and_potions import numpy as np LatentStone = stones_and_potions.LatentStone PerceivedPotion = stones_and_potions.PerceivedPotion PotionMap = stones_and_potions.PotionMap PartialPotionMap = stones_and_potions.PartialPotionMap StoneMap = stones_and_potions.StoneMap PartialStoneMap = stones_and_potions.PartialStoneMap Stone = stones_and_potions.Stone Potion = stones_and_potions.Potion class PrecomputedMapsTest(parameterized.TestCase): no_effect_chem = None perm_index_to_index = None @classmethod def setUpClass(cls): super().setUpClass() cls.perm_index_to_index, index_to_perm_index = ( precomputed_maps.get_perm_index_conversion()) cls.no_effect_chem = precomputed_maps.get_no_effect_from_partial_chem( index_to_perm_index) cls.partial_stone_map_to_stone_map = ( precomputed_maps.get_partial_stone_map_to_stone_map()) @parameterized.parameters( # If we know nothing about the potion map then we should not update any # entries in observed_no_effect_bits {'stone_map': stones_and_potions.all_fixed_stone_map(), 'partial_potion_map': ( stones_and_potions.no_knowledge_partial_potion_map()), 'expected_observed_no_effect_bits': 0}, # If we know the first dimension then observed no effect bits should have # a 1 for the potion which takes the stone out of the cube and 0s # elsewhere {'stone_map': stones_and_potions.all_fixed_stone_map(), 'partial_potion_map': PartialPotionMap( [0, helpers.UNKNOWN, helpers.UNKNOWN], [1, helpers.UNKNOWN, helpers.UNKNOWN]), 'potions': [PerceivedPotion(0, 1), PerceivedPotion(0, 1), PerceivedPotion(1, 1), PerceivedPotion(2, -1), PerceivedPotion(0, 1)], 'stones': [LatentStone(np.array([1, -1, -1])), LatentStone(np.array([1, -1, 1])), LatentStone(np.array([1, -1, 1])), LatentStone(np.array([1, -1, 1])), LatentStone(np.array([-1, -1, 1]))], 'observed_no_effect_vals': [1, 1, 0, 0]}, ) def test_no_effect_from_partial_chem( self, stone_map, partial_potion_map, expected_observed_no_effect_bits=None, potions=None, stones=None, observed_no_effect_vals=None): """Test that actions with no effect are correctly computed in a few cases.""" partial_potion_map_index_0, partial_potion_map_index_1 = ( partial_potion_map.index(self.perm_index_to_index)) observed_no_effect_bits = self.no_effect_chem[ stone_map.index(), partial_potion_map_index_0, partial_potion_map_index_1] if expected_observed_no_effect_bits is not None: self.assertEqual( observed_no_effect_bits, expected_observed_no_effect_bits) if potions is not None: for potion, stone, val in zip(potions, stones, observed_no_effect_vals): bit_num = stone.index() * PerceivedPotion.num_types + potion.index() bit_mask = 1 << bit_num expected_val = (observed_no_effect_bits & bit_mask) >> bit_num self.assertEqual(val, expected_val) @parameterized.parameters( # A fully specified partial stone map {'partial_stone_map': PartialStoneMap(np.array([-1, 1, -1])), 'expected_stone_map_index': StoneMap(np.array([-1, 1, -1])).index()}, # A partially specified partial stone map {'partial_stone_map': PartialStoneMap(np.array([-1, 1, helpers.UNKNOWN])), 'expected_stone_map_index': -1}, ) def test_partial_stone_map_to_stone_map( self, partial_stone_map, expected_stone_map_index): stone_map_index = self.partial_stone_map_to_stone_map[ partial_stone_map.index()] self.assertEqual(stone_map_index, expected_stone_map_index) if __name__ == '__main__': absltest.main()
dm_alchemy-master
dm_alchemy/ideal_observer/precomputed_maps_test.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Helper functions and global variables for ideal observer.""" import itertools from typing import List, Sequence, Tuple, TypeVar IMPOSSIBLE = -1001 T = TypeVar('T') def sorted_intersection( list_0: Sequence[T], list_1: Sequence[T] ) -> Tuple[List[T], List[int]]: """Finds the intersection and the indices of the remaining elements in list_0. Both lists must be sorted in ascending order. Args: list_0: Sequence of elements which can be compared to each other and the elements in list_1. list_1: A second sequence of elements. Returns: A list of the intersection of the two lists. A list of the indices into list_0 for the elements in the intersection. """ in_both = [] still_in_0 = [] index_0 = 0 index_1 = 0 while index_0 < len(list_0) and index_1 < len(list_1): value_0 = list_0[index_0] value_1 = list_1[index_1] if value_0 == value_1: still_in_0.append(index_0) in_both.append(value_0) if value_0 <= value_1: index_0 += 1 if value_1 <= value_0: index_1 += 1 return in_both, still_in_0 def list_to_bitfield(l: Sequence[int]) -> int: bitfield = 0 for i in l: bitfield |= (1 << i) return bitfield def bitfield_to_list(b: int) -> List[int]: ret = [] for i in itertools.count(): mask = 1 << i if b & mask: ret.append(i) if mask > b: break return ret def pack_to_bitfield(ints_and_num_bits: Sequence[Tuple[int, int]]) -> int: """Packs a sequence of ints into a single int which acts like a bitfield. Args: ints_and_num_bits: Sequence of tuples each containing an int and the max number of bits required to represent that int. Returns: A single arbitrary precision int with all the passed ints packed into it. """ int_rep = 0 bits_count = 0 for sub_int_rep, num_bits in ints_and_num_bits: for i in range(num_bits): if sub_int_rep & (1 << i): int_rep |= (1 << (bits_count + i)) bits_count += num_bits return int_rep
dm_alchemy-master
dm_alchemy/ideal_observer/helpers.py
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Precompute variables mapping inputs to outputs for complex functions.""" import collections import copy import functools import itertools import os from typing import Any, Mapping, Optional, Sequence, Tuple from absl import logging import dataclasses from dm_alchemy import io from dm_alchemy.encode import precomputed_maps_pb2 from dm_alchemy.encode import precomputed_maps_proto_conversion from dm_alchemy.ideal_observer import helpers from dm_alchemy.types import graphs from dm_alchemy.types import helpers as types_helpers from dm_alchemy.types import stones_and_potions import frozendict import numpy as np @dataclasses.dataclass class PrecomputedMaps: """Functions to get the observations for different content types for each element.""" graphs_list: np.ndarray graph_index_distr: np.ndarray partial_graph_to_matching_graphs: np.ndarray partial_graph_update: np.ndarray stone_to_reward: np.ndarray drop_reward: np.ndarray partial_graph_index_to_possible_index: Mapping[int, int] graphs_with_edge: np.ndarray edge_exists: np.ndarray stone_maps: np.ndarray potion_maps: np.ndarray possible_latent_dims: np.ndarray poss_p_maps: np.ndarray poss_s_maps: np.ndarray react_result: np.ndarray possible_latent_dirs: np.ndarray partial_potion_map_update: np.ndarray partial_stone_map_update: np.ndarray potion_masks: np.ndarray potion_to_pair: np.ndarray perm_index_to_index: np.ndarray index_to_perm_index: np.ndarray missing_edge_no_change: np.ndarray update_partial_graph_no_change: np.ndarray partial_stone_map_to_stone_map: np.ndarray no_effect_from_partial_chem: np.ndarray def __deepcopy__(self, memo): # Don't deepcopy precomputed maps as it takes too long and uses too much # memory and the contents never change after construction so we only need 1. return copy.copy(self) def save(self, folder): """Saves the precomputed maps to serialized protos in the folder passed in.""" precomputed_maps_proto_conversion.write_graph_array( self.graphs_list, folder, 'graphs_list') for int_array, name in [ (self.stone_to_reward, 'stone_to_reward'), (self.drop_reward, 'drop_reward'), (self.edge_exists, 'edge_exists'), (self.stone_maps, 'stone_maps'), (self.potion_maps, 'potion_maps'), (self.react_result, 'react_result'), (self.partial_stone_map_update, 'partial_stone_map_update'), (self.potion_to_pair, 'potion_to_pair'), (self.perm_index_to_index, 'perm_index_to_index'), (self.index_to_perm_index, 'index_to_perm_index'), (self.missing_edge_no_change, 'missing_edge_no_change'), (self.update_partial_graph_no_change, 'update_partial_graph_no_change'), (self.partial_stone_map_to_stone_map, 'partial_stone_map_to_stone_map'), ]: precomputed_maps_proto_conversion.write_int_array(int_array, folder, name) for int_array, name in [ (self.partial_graph_to_matching_graphs, 'partial_graph_to_matching_graphs'), (self.graphs_with_edge, 'graphs_with_edge'), (self.potion_masks, 'potion_masks'), (self.no_effect_from_partial_chem, 'no_effect_from_partial_chem'), ]: precomputed_maps_proto_conversion.write_bitfield_array( int_array, folder, name) precomputed_maps_proto_conversion.write_float_array( self.graph_index_distr, folder, 'graph_index_distr') for int_array, name in [ (self.possible_latent_dims, 'possible_latent_dims'), (self.partial_graph_update, 'partial_graph_update'), (self.poss_p_maps, 'poss_p_maps'), (self.poss_s_maps, 'poss_s_maps'), ]: precomputed_maps_proto_conversion.write_list_ints_array( int_array, folder, name) precomputed_maps_proto_conversion.write_possible_latent_dirs( self.possible_latent_dirs, folder, 'possible_latent_dirs') precomputed_maps_proto_conversion.write_partial_potion_map_update( self.partial_potion_map_update, folder, 'partial_potion_map_update') proto = precomputed_maps_pb2.PartialGraphIndexToPossibleIndex( entries=self.partial_graph_index_to_possible_index) io.write_proto( os.path.join(folder, 'partial_graph_index_to_possible_index'), proto.SerializeToString()) def _load_from_folder(folder): """Loads precomputed maps from serialized protos in the folder passed in.""" kwargs = {'graphs_list': precomputed_maps_proto_conversion.load_graph_array( folder, 'graphs_list')} for name in [ 'stone_to_reward', 'drop_reward', 'edge_exists', 'stone_maps', 'potion_maps', 'react_result', 'partial_stone_map_update', 'potion_to_pair', 'perm_index_to_index', 'index_to_perm_index', 'missing_edge_no_change', 'update_partial_graph_no_change', 'partial_stone_map_to_stone_map']: kwargs[name] = precomputed_maps_proto_conversion.load_int_array( folder, name) for name in [ 'partial_graph_to_matching_graphs', 'graphs_with_edge', 'potion_masks', 'no_effect_from_partial_chem']: kwargs[name] = precomputed_maps_proto_conversion.load_bitfield_array( folder, name) for name in [ 'possible_latent_dims', 'poss_p_maps', 'poss_s_maps', 'partial_graph_update']: kwargs[name] = precomputed_maps_proto_conversion.load_list_ints_array( folder, name) kwargs['graph_index_distr'] = ( precomputed_maps_proto_conversion.load_float_array( folder, 'graph_index_distr')) kwargs['possible_latent_dirs'] = ( precomputed_maps_proto_conversion.load_possible_latent_dirs( folder, 'possible_latent_dirs')) kwargs['partial_potion_map_update'] = ( precomputed_maps_proto_conversion.load_partial_potion_map_update( folder, 'partial_potion_map_update')) serialized = io.read_proto(os.path.join( folder, 'partial_graph_index_to_possible_index')) proto = precomputed_maps_pb2.PartialGraphIndexToPossibleIndex.FromString( serialized) kwargs['partial_graph_index_to_possible_index'] = proto.entries return PrecomputedMaps(**kwargs) # Alias these for readability AlignedStone = stones_and_potions.AlignedStone AlignedStoneIndex = stones_and_potions.AlignedStoneIndex PerceivedPotion = stones_and_potions.PerceivedPotion PerceivedPotionIndex = stones_and_potions.PerceivedPotionIndex LatentStone = stones_and_potions.LatentStone LatentPotion = stones_and_potions.LatentPotion StoneMap = stones_and_potions.StoneMap PotionMap = stones_and_potions.PotionMap PartialStoneMap = stones_and_potions.PartialStoneMap PartialPotionMap = stones_and_potions.PartialPotionMap PartialGraph = graphs.PartialGraph aligned_stone_from_index = stones_and_potions.aligned_stone_from_index perceived_potion_from_index = stones_and_potions.perceived_potion_from_index latent_stone_from_index = stones_and_potions.latent_stone_from_index latent_potion_from_index = stones_and_potions.latent_potion_from_index stone_map_from_index = stones_and_potions.stone_map_from_index potion_map_from_index = stones_and_potions.potion_map_from_index partial_stone_map_from_index = stones_and_potions.partial_stone_map_from_index partial_potion_map_from_index = stones_and_potions.partial_potion_map_from_index partial_graph_from_index = graphs.partial_graph_from_index _SIMPLE_TYPE_COUNT = frozendict.frozendict({ 'PotionMap': PotionMap.num_types, 'StoneMap': StoneMap.num_types, 'LatentPotion': LatentPotion.num_types, 'LatentStone': LatentStone.num_types, 'PerceivedPotion': PerceivedPotion.num_types, 'AlignedStone': AlignedStone.num_types, 'PartialPotionMap_dim': PartialPotionMap.num_axis_assignments, 'PartialPotionMap_dir': PartialPotionMap.num_dir_assignments, 'PartialStoneMap': PartialStoneMap.num_types, 'dim': stones_and_potions.get_num_axes(), 'dir': stones_and_potions.get_num_dirs(), }) _SIMPLE_TYPE_RECONSTRUCTOR = frozendict.frozendict({ 'StoneMap': stone_map_from_index, 'LatentPotion': latent_potion_from_index, 'LatentStone': latent_stone_from_index, 'PerceivedPotion': perceived_potion_from_index, 'AlignedStone': aligned_stone_from_index, 'PartialStoneMap': partial_stone_map_from_index, 'dim': lambda x: x, 'dir': stones_and_potions.index_to_dir, }) # Dict of reconstructors for indices which are passed in additional data. _INDICES_PASSED_IN_RECONSTRUCTOR = frozendict.frozendict({ 'graph_important_edges': lambda x: (latent_stone_from_index(x[0]), latent_stone_from_index(x[1])) , 'possible_partial_graph_indices': partial_graph_from_index, 'nodes': lambda x: x }) def partial_potion_map_part_index(data, dim=0, direction=0): return partial_potion_map_from_index( (dim, direction), data['index_to_perm_index']) _RECONSTRUCTORS_REQUIRING_DATA = frozendict.frozendict({ 'PotionMap': lambda i, data: potion_map_from_index(i, data['index_to_perm_index']), 'PartialPotionMap_dim': lambda i, data: partial_potion_map_part_index(data, dim=i), 'PartialPotionMap_dir': lambda i, data: partial_potion_map_part_index(data, direction=i), }) _TYPE_FROM_TUPLE_INDEX = frozendict.frozendict({ 'PartialPotionMap': ( ('PartialPotionMap_dim', 'PartialPotionMap_dir'), lambda i, data: partial_potion_map_part_index(data, i[0], i[1])), }) PRECOMPUTED_LEVEL_FILES_DIR = 'ideal_observer/data' def _get_type_count(current_type: str, additional_data: Mapping[str, Any]): if current_type in _SIMPLE_TYPE_COUNT: return _SIMPLE_TYPE_COUNT[current_type] return len(additional_data[current_type]) def _get_indices_and_reconstructor(current_type, additional_data): """For a given type gets valid indices and a method to reconstruct from index.""" if 'enumerated_' in current_type: index_gen, reconstructor = _get_indices_and_reconstructor( current_type.replace('enumerated_', ''), additional_data) return enumerate(index_gen), lambda x: reconstructor(x[1]) if current_type in _SIMPLE_TYPE_RECONSTRUCTOR: return (range(_SIMPLE_TYPE_COUNT[current_type]), _SIMPLE_TYPE_RECONSTRUCTOR[current_type]) if current_type in _INDICES_PASSED_IN_RECONSTRUCTOR: return (additional_data[current_type], _INDICES_PASSED_IN_RECONSTRUCTOR[current_type]) if current_type in _RECONSTRUCTORS_REQUIRING_DATA: return (range(_SIMPLE_TYPE_COUNT[current_type]), functools.partial(_RECONSTRUCTORS_REQUIRING_DATA[current_type], data=additional_data)) if current_type in _TYPE_FROM_TUPLE_INDEX: sub_types, reconstructor = _TYPE_FROM_TUPLE_INDEX[current_type] sub_indices = [] for sub_type in sub_types: index_gen, _ = _get_indices_and_reconstructor(sub_type, additional_data) sub_indices.append(index_gen) return itertools.product(*sub_indices), functools.partial( reconstructor, data=additional_data) def _reconstructed_elements( to_map: Mapping[str, str], additional_data: Mapping[str, np.ndarray]): """Generator for map from indices to elements.""" # Get one of the types in to_map and loop through all possibilities for it # recursively calling for the remaining entries. indices_and_reconstructors = [ _get_indices_and_reconstructor(current_type, additional_data) for current_type in to_map.values()] names = to_map.keys() indices = [elt[0] for elt in indices_and_reconstructors] reconstructors = [elt[1] for elt in indices_and_reconstructors] reconstructed = [] # Indices may be generators and we iterate through twice so we must make a # copy for type_indices, reconstructor in zip( copy.deepcopy(indices), reconstructors): reconstructed.append([reconstructor(i) for i in type_indices]) for current_index, current_element in zip( itertools.product(*indices), itertools.product(*reconstructed)): # We have to make a copy of the element before returning it because if it is # mutable and gets changed we don't want the change to be there for later # iterations. yield (collections.OrderedDict( [(name, i) for name, i in zip(names, current_index)]), collections.OrderedDict( [(name, copy.deepcopy(e)) for name, e in zip( names, current_element)])) _RESULT_TYPE_TO_EMPTY_RESULT = { # Use numpy object type to store python ints rather than numpy ints. 'int': lambda s: np.zeros(s, dtype=object), # Create an array with an empty list at each entry. 'list': lambda s: np.frompyfunc(list, 0, 1)(np.empty(s, dtype=object)), 'tuple': lambda s: np.frompyfunc(tuple, 0, 1)(np.empty(s, dtype=object)), } def _empty_result(to_map, result_type, additional_data=None): shape = [] for current_type in to_map: if current_type in _TYPE_FROM_TUPLE_INDEX: shape.extend([_get_type_count(sub_type, additional_data) for sub_type in _TYPE_FROM_TUPLE_INDEX[current_type][0]]) else: shape.append(_get_type_count(current_type, additional_data)) shape = tuple(shape) return _RESULT_TYPE_TO_EMPTY_RESULT[result_type](shape) LoopHelper = collections.namedtuple('LoopHelper', 'empty_result gen') def _precompute_loop_helper( to_map, result_type, additional_data=None, result_to_map=None): """Creates an empty results array and generator for indices and elements. Args: to_map: A list of types to map optionally with a name for the index and the element associated with each type. If no name is provided the type name itself will be used as the name. See functions below for example usages. result_type: The type of each element in the result matrix. additional_data: Additional data required to loop over the types passed in and reconstruct the elements. result_to_map: A list of types which index the result. If none is provided then it is assumed to be the same as to_map. Returns: A LoopHelper type containing an empty numpy array and a generator which will loop through all of the valid indices and elements. """ # Passing a name is optional - if no name is passed then use the type string. to_map = collections.OrderedDict( [elt if isinstance(elt, tuple) else (elt, elt) for elt in to_map]) if result_to_map is None: result_to_map = to_map.values() # Remove enumerated from result_to_map result_to_map = [elt.replace('enumerated_', '') for elt in result_to_map] empty_result = _empty_result(result_to_map, result_type, additional_data) gen = functools.partial(_reconstructed_elements, to_map, additional_data) return LoopHelper(empty_result, gen) def get_partial_graph_update( all_graphs, graph_important_edges, possible_partial_graph_indices, partial_graph_index_to_possible_index ) -> np.ndarray: """Updates partial graph after seeing that edge exists.""" # Create an array to hold results with an empty list at each entry. result, gen = _precompute_loop_helper( ['graph_important_edges', 'possible_partial_graph_indices'], 'list', additional_data={ 'graph_important_edges': graph_important_edges, 'possible_partial_graph_indices': possible_partial_graph_indices}, result_to_map=['LatentStone', 'LatentStone']) for indices, elements in gen(): latent_stone_index, latent_result_index = indices['graph_important_edges'] latent_stone, latent_result = elements['graph_important_edges'] partial_graph = elements['possible_partial_graph_indices'] partial_graph.add_edge(latent_stone, latent_result, graphs.KNOWN_EDGE) partial_graph.update(all_graphs) poss_index = partial_graph_index_to_possible_index[partial_graph.index()] result[latent_stone_index, latent_result_index].append(poss_index) result[latent_result_index, latent_stone_index].append(poss_index) return result def get_partial_graph_to_matching_graphs( all_graphs, possible_partial_graph_indices: np.ndarray) -> np.ndarray: """Gets list of graphs matching the partial graph.""" result, gen = _precompute_loop_helper( ['enumerated_possible_partial_graph_indices'], 'int', additional_data={ 'possible_partial_graph_indices': possible_partial_graph_indices}) for indices, elements in gen(): i, _ = indices['enumerated_possible_partial_graph_indices'] partial_graph = elements['enumerated_possible_partial_graph_indices'] matches = partial_graph.matching_graphs(all_graphs, return_indices=True) result[i] = helpers.list_to_bitfield(matches) return result def get_graphs_with_edge(valid_graphs, index_to_perm_index) -> np.ndarray: """Array of bitfields of graphs which have the given edge given the maps.""" nodes = graphs.all_nodes_in_graph() result, gen = _precompute_loop_helper( ['StoneMap', 'PotionMap', 'AlignedStone', 'PerceivedPotion'], 'int', additional_data={'index_to_perm_index': index_to_perm_index}) for indices, elements in gen(): stone_map = elements['StoneMap'] potion_map = elements['PotionMap'] aligned_stone = elements['AlignedStone'] perceived_potion = elements['PerceivedPotion'] latent_potion = potion_map.apply(perceived_potion) potion_in_stone_space = stone_map.apply_to_potion(latent_potion) start_node = nodes.get_node_by_coords(list(aligned_stone.aligned_coords)) end_node_coords = copy.deepcopy(aligned_stone.aligned_coords) end_node_coords[potion_in_stone_space.latent_dim] += ( 2 * potion_in_stone_space.latent_dir) end_node_coord = end_node_coords[potion_in_stone_space.latent_dim] if end_node_coord < -1 or end_node_coord > 1: # Not in any graph result[tuple(indices.values())] = 0 continue end_node = nodes.get_node_by_coords(list(end_node_coords)) poss_graphs = [i for i, g in enumerate(valid_graphs) if g.edge_list.has_edge(start_node, end_node)] graphs_bitfield = helpers.list_to_bitfield(poss_graphs) result[tuple(indices.values())] = graphs_bitfield return result def get_edge_exists(possible_partial_graph_indices: np.ndarray) -> np.ndarray: """Checks if an edge exists given partial graph info.""" graph_nodes = graphs.all_nodes_in_graph() result, gen = _precompute_loop_helper( ['enumerated_possible_partial_graph_indices', 'enumerated_nodes', 'dim'], 'int', additional_data={ 'possible_partial_graph_indices': possible_partial_graph_indices, 'nodes': graph_nodes.nodes}) for indices, elements in gen(): i, _ = indices['enumerated_possible_partial_graph_indices'] partial_graph = elements['enumerated_possible_partial_graph_indices'] start_node_index, start_node = indices['enumerated_nodes'] dim = indices['dim'] start_coords = start_node.coords end_coords = copy.deepcopy(start_coords) end_coords[dim] = -start_coords[dim] end_node_ = graph_nodes.get_node_by_coords(end_coords) assert end_node_ is not None end_node: graphs.Node = end_node_ result[i, start_node_index, dim] = partial_graph.known_adj_mat[ start_node_index, end_node.idx] return result def get_possible_partial_graph_indices( graph_important_edges: Sequence[Tuple[int, int]], graphs_list: Sequence[graphs.Graph] ) -> np.ndarray: """Calculates an exhaustive list of possible partial graphs. This is smaller than the list of partial graphs we can represent because some partial graphs are impossible. For example graphs which are known to be disconnected. It is important to use only the possible partial graphs because this makes it practical to store maps over all possibilities. Args: graph_important_edges: List of the edges which may exist in a graph. graphs_list: List of all valid graphs. Returns: The list of partial graph indices. """ def remaining_edges(g): ret = [] for edge in graph_important_edges: if g.known_adj_mat[edge] == types_helpers.UNKNOWN: ret.append(edge) return ret # TODO(b/173785715): Start with what we can deduce from the graphs_list. to_expand = [PartialGraph().index()] visited = {PartialGraph().index()} while to_expand: current_node = to_expand[0] to_expand = to_expand[1:] current_graph = partial_graph_from_index(current_node) for e in remaining_edges(current_graph): for val in [0, 1]: new_partial = copy.deepcopy(current_graph) new_partial.known_adj_mat[e] = val new_partial.known_adj_mat[e[1], e[0]] = val new_partial.update(graphs_list) new_partial_index = new_partial.index() if new_partial_index not in visited: visited.add(new_partial_index) to_expand.append(new_partial_index) return np.array(list(sorted(visited)), dtype=object) def get_poss_potion_maps_and_stone_maps( perm_index_to_index: np.ndarray ) -> Tuple[np.ndarray, np.ndarray]: """Gets a list of potion and stone maps possible given an observation.""" poss_stone_maps, gen = _precompute_loop_helper( [('stone', 'AlignedStone'), ('potion', 'PerceivedPotion'), ('result', 'AlignedStone')], 'list') # In this function we get 2 results at the same time so make another. poss_potion_maps = _empty_result( ['AlignedStone', 'PerceivedPotion', 'AlignedStone'], 'list') for indices, elements in gen(): potion_maps, stone_maps = stones_and_potions.one_action_outcome( elements['stone'], elements['potion'], elements['result'], perm_index_to_index) poss_potion_maps[tuple(indices.values())] = potion_maps poss_stone_maps[tuple(indices.values())] = stone_maps return poss_potion_maps, poss_stone_maps def get_possible_latent_dims( index_to_perm_index: np.ndarray ) -> np.ndarray: """Gets a list of possible latent dimensions given a potion and partial map.""" result, gen = _precompute_loop_helper( ['PerceivedPotion', 'PartialPotionMap_dim'], 'list', additional_data={'index_to_perm_index': index_to_perm_index}) for indices, elements in gen(): partial_potion_map = elements['PartialPotionMap_dim'] perceived_potion = elements['PerceivedPotion'] result[tuple(indices.values())] = ( partial_potion_map.possible_latent_dims(perceived_potion)) return result def get_react_result( possible_partial_graph_indices: np.ndarray, edge_exists: np.ndarray, drop_reward: np.ndarray ) -> np.ndarray: """Gets the resulting stone when applying a potion to a stone.""" result, gen = _precompute_loop_helper( ['AlignedStone', 'dim', 'dir', 'enumerated_possible_partial_graph_indices'], 'int', additional_data={ 'possible_partial_graph_indices': possible_partial_graph_indices}) for indices, elements in gen(): aligned_stone = elements['AlignedStone'] latent_dim = elements['dim'] latent_dir = elements['dir'] aligned_stone_index = indices['AlignedStone'] latent_dir_index = indices['dir'] partial_graph_index, _ = indices[ 'enumerated_possible_partial_graph_indices'] # If we know the edge doesn't exist do not consider the possibility # that the stone changes. if edge_exists[ partial_graph_index, drop_reward[aligned_stone_index], latent_dim] == graphs.NO_EDGE: result[aligned_stone_index, latent_dim, latent_dir_index, partial_graph_index] = helpers.IMPOSSIBLE else: result[aligned_stone_index, latent_dim, latent_dir_index, partial_graph_index] = stones_and_potions.react( aligned_stone, latent_dim, latent_dir).index() return result def get_possible_latent_dirs(index_to_perm_index: np.ndarray) -> np.ndarray: """Gets a list of possible latent dimensions given maps and stone and potion.""" result, gen = _precompute_loop_helper( ['PartialPotionMap_dir', 'PartialStoneMap', 'dim', 'PerceivedPotion', 'AlignedStone'], 'tuple', additional_data={'index_to_perm_index': index_to_perm_index}) for indices, elements in gen(): partial_potion_map = elements['PartialPotionMap_dir'] partial_stone_map = elements['PartialStoneMap'] latent_dim = elements['dim'] perceived_potion = elements['PerceivedPotion'] aligned_stone = elements['AlignedStone'] latent_dirs_stone_dirs = ( stones_and_potions.possible_latent_dirs_and_stone_dirs( perceived_potion, latent_dim, partial_potion_map, partial_stone_map)) result[tuple(indices.values())] = ( stones_and_potions.latent_dirs_on_stone( aligned_stone, latent_dim, partial_stone_map, latent_dirs_stone_dirs)) return result def get_partial_potion_map_update( index_to_perm_index: np.ndarray, perm_index_to_index: np.ndarray ) -> np.ndarray: """Updates a partial potion map given an observation.""" result, gen = _precompute_loop_helper( [('stone', 'AlignedStone'), ('potion', 'PerceivedPotion'), ('result', 'AlignedStone'), 'PartialPotionMap'], 'tuple', additional_data={'index_to_perm_index': index_to_perm_index}) for indices, elements in gen(): stone_index = indices['stone'] potion_index = indices['potion'] result_index = indices['result'] partial_potion_map_index = indices['PartialPotionMap'] stone = elements['stone'] potion = elements['potion'] result_stone = elements['result'] partial_potion_map = elements['PartialPotionMap'] result[stone_index, potion_index, result_index, partial_potion_map_index[0], partial_potion_map_index[1]] = ( stones_and_potions.update_partial_potion_map( stone, potion, result_stone, partial_potion_map, perm_index_to_index)) return result def get_partial_stone_map_update() -> np.ndarray: """Updates a partial stone map given an observation.""" result, gen = _precompute_loop_helper( [('stone', 'AlignedStone'), ('result', 'AlignedStone'), 'PartialStoneMap'], 'int') for indices, elements in gen(): partial_stone_map = elements['PartialStoneMap'] result[tuple(indices.values())] = ( stones_and_potions.update_partial_stone_map( elements['stone'], elements['result'], partial_stone_map)) return result def get_missing_edge_no_change( index_to_perm_index: np.ndarray, graph_important_edges: Sequence[Tuple[int, int]] ) -> np.ndarray: """Gets which edge is missing given a potion has no effect.""" result, gen = _precompute_loop_helper( ['PartialStoneMap', 'PartialPotionMap', 'PerceivedPotion', 'LatentStone'], 'int', additional_data={'index_to_perm_index': index_to_perm_index}) for indices, elements in gen(): partial_potion_map = elements['PartialPotionMap'] partial_stone_map = elements['PartialStoneMap'] potion = elements['PerceivedPotion'] aligned_stone_coords = elements['LatentStone'] partial_stone_map_index = indices['PartialStoneMap'] partial_potion_map_index_0, partial_potion_map_index_1 = indices[ 'PartialPotionMap'] potion_index = indices['PerceivedPotion'] stone_index = indices['LatentStone'] # If we can't map the potion into latent space we cannot tell which # edge is missing. if not partial_potion_map.can_map(potion): result[partial_stone_map_index, partial_potion_map_index_0, partial_potion_map_index_1, potion_index, stone_index] = -1 continue # If we can't map the potion from latent space into stone perceptual # space we cannot tell which edge is missing. latent_potion = partial_potion_map.apply(potion) if partial_stone_map.latent_pos_dir[ latent_potion.latent_dim] == types_helpers.UNKNOWN: result[partial_stone_map_index, partial_potion_map_index_0, partial_potion_map_index_1, potion_index, stone_index] = -1 continue stone_space_potion = partial_stone_map.apply_to_potion(latent_potion) # If the stone value on the dimension that the potion should change # is the opposite of the potion direction then the stone should have # changed and therefore we can eliminate graphs containing the edge. if aligned_stone_coords.latent_coords[ stone_space_potion.latent_dim] == stone_space_potion.latent_dir: result[partial_stone_map_index, partial_potion_map_index_0, partial_potion_map_index_1, potion_index, stone_index] = -1 continue # Set the result to be the index of the edge which shouldn't be # there. expected_end_coords = copy.deepcopy(aligned_stone_coords.latent_coords) expected_end_coords[stone_space_potion.latent_dim] = -expected_end_coords[ stone_space_potion.latent_dim] expected_end_index = stones_and_potions.LatentStone( expected_end_coords).index() missing_edge = -1 edge_start_end = sorted((stone_index, expected_end_index)) for edge_index, (i, j) in enumerate(graph_important_edges): if sorted((i, j)) == edge_start_end: missing_edge = edge_index assert missing_edge != -1, 'Missing edge doesn\'t exist' result[partial_stone_map_index, partial_potion_map_index_0, partial_potion_map_index_1, potion_index, stone_index] = missing_edge return result def get_partial_stone_map_to_stone_map() -> np.ndarray: """If a partial stone map is fully known returns stone map otherwise -1.""" result, gen = _precompute_loop_helper(['PartialStoneMap'], 'int') for indices, elements in gen(): index = indices['PartialStoneMap'] partial_stone_map = elements['PartialStoneMap'] stone_maps = partial_stone_map.fill_gaps() if len(stone_maps) != 1: result[index] = -1 else: result[index] = stone_maps[0].index() return result def get_no_effect_from_partial_chem( index_to_perm_index: np.ndarray ) -> np.ndarray: """Gets bit mask for potions known to take a stone out of the latent cube.""" result, gen = _precompute_loop_helper( ['StoneMap', 'PartialPotionMap'], 'int', additional_data={ 'index_to_perm_index': index_to_perm_index}) for indices, elements in gen(): stone_map = elements['StoneMap'] stone_map_index = indices['StoneMap'] partial_potion_map = elements['PartialPotionMap'] partial_potion_map_index_0, partial_potion_map_index_1 = indices[ 'PartialPotionMap'] # Go through perceived potion and perceived stone (without reward) and # update if we know there will be no effect. _, no_effect_gen = _precompute_loop_helper( ['PerceivedPotion', 'LatentStone'], 'int') no_effect_result = 0 for no_effect_indices, no_effect_elements in no_effect_gen(): perceived_potion = no_effect_elements['PerceivedPotion'] aligned_stone_wo_reward = no_effect_elements['LatentStone'] latent_stone = stone_map.apply(AlignedStone( 0, aligned_stone_wo_reward.latent_coords)) # If we can map the perceived potion to latent space, do so and see if it # has an effect. if not partial_potion_map.can_map(perceived_potion): continue latent_potion = partial_potion_map.apply(perceived_potion) if latent_potion.latent_dir == latent_stone.latent_coords[ latent_potion.latent_dim]: no_effect_result |= 1 << ( (no_effect_indices['LatentStone'] * PerceivedPotion.num_types) + no_effect_indices['PerceivedPotion']) result[stone_map_index, partial_potion_map_index_0, partial_potion_map_index_1] = no_effect_result return result def get_update_partial_graph_no_change( all_graphs, possible_partial_graph_indices: np.ndarray, partial_graph_index_to_possible_index: Mapping[int, int], graph_important_edges: Sequence[Tuple[int, int]] ) -> np.ndarray: """Given a missing edge updates the partial graph.""" graph_nodes = graphs.all_nodes_in_graph() result, gen = _precompute_loop_helper( ['enumerated_possible_partial_graph_indices', 'enumerated_graph_important_edges'], 'int', additional_data={ 'possible_partial_graph_indices': possible_partial_graph_indices, 'graph_important_edges': graph_important_edges}) for indices, elements in gen(): edge_index, (start_node, end_node) = indices[ 'enumerated_graph_important_edges'] poss_index, _ = indices[ 'enumerated_possible_partial_graph_indices'] partial_graph = elements['enumerated_possible_partial_graph_indices'] start_stone = stones_and_potions.LatentStone(np.array( graph_nodes.nodes[start_node].coords)) end_stone = stones_and_potions.LatentStone(np.array( graph_nodes.nodes[end_node].coords)) partial_graph.add_edge(start_stone, end_stone, graphs.NO_EDGE) partial_graph.update(all_graphs) result[poss_index, edge_index] = partial_graph_index_to_possible_index[ partial_graph.index()] return result def get_perm_index_conversion() -> Tuple[np.ndarray, np.ndarray]: """Gets maps to convert between different indices representing permutations. We make a map from an index computed by treating each entry in the permutation as being between 0 and len(perm) - 1 (of which there are len(perm) ^ len(perm)) to an index between 0 and len(perm)! - 1. len(perm) is 3 so this is not large. Returns: Map from index which treats entries as independent to compact index, the inverse. """ num_axes = stones_and_potions.get_num_axes() # Use numpy object type to store python ints rather than numpy ints. perm_index_to_index = np.array([-1 for _ in range(num_axes ** num_axes)], dtype=object) for i, perm in enumerate(itertools.permutations(range(num_axes))): perm_index_to_index[np.ravel_multi_index( tuple(perm), tuple(num_axes for _ in range(num_axes)))] = i # Make the inverse map. index_to_perm_index = np.array( [int(np.ravel_multi_index( tuple(perm), tuple(num_axes for _ in range(num_axes)))) for perm in itertools.permutations(range(3))], dtype=object) return perm_index_to_index, index_to_perm_index def constraints_to_filename( constraints: Sequence[graphs.Constraint], poss_stone_maps: Sequence[stones_and_potions.StoneMap], poss_potion_maps: Sequence[stones_and_potions.PotionMap] ) -> str: """Converts a sequence of constraints and possible maps to a filename. This removes characters like * and - and ensures a list with the same number of constraints is the same length. Each constraint becomes 6 letters long with S (i.e. star) substituted for *, N (i.e. negative) substituted for -1 and P (i.e. positive) substituted for 1. Consecutive constraints are separated by a / and the sequence of constraints is lexicographically sorted to ensure that two sequences of constraints which differ only in order are represented by the same string. Stone and potion maps are converted to indices and the represented as a sequence of ranges, eg. 0-7/1,8,16-24,32. Args: constraints: A sequence of graphs.Constraints. poss_stone_maps: A sequence of possible stone maps. poss_potion_maps: A sequence of possible potion maps. Returns: A string with each constraint and possible stone map and potion map. """ def constraint_to_str(constraint: graphs.Constraint) -> str: remapping = {'*': 'S', '-1': 'N', '1': 'P'} all_dims = [] for i, dim in enumerate(constraint): constr = dim[:i] + dim[i + 1:] all_dims.append(''.join([remapping[c] for c in constr])) return ''.join(all_dims) def seq_ints_to_str(seq: Sequence[int]) -> str: """Convert a sequence of ints to a string.""" ranges = [] start_i, prev_i = None, None for i in seq: if start_i is None: start_i, prev_i = i, i continue if i != prev_i + 1: ranges.append((start_i, prev_i)) start_i = i prev_i = i ranges.append((start_i, prev_i)) return ','.join(str(s) + ('' if e == s else '-' + str(e)) for s, e in ranges) perm_index_to_index, _ = get_perm_index_conversion() return ('/'.join(sorted(constraint_to_str(c) for c in constraints)) + '/' + seq_ints_to_str([s.index() for s in poss_stone_maps]) + '/' + seq_ints_to_str([p.index(perm_index_to_index) for p in poss_potion_maps])) def load_from_level_name(level_name: str) -> Optional[PrecomputedMaps]: """Loads precomputed for the level name passed if it exists.""" # All levels are in alchemy and this is not included in the precomputed.pkl # file paths so remove this from the level name if it is included. if level_name.startswith('alchemy/'): level_name = level_name.replace('alchemy/', '') # Precomputed maps refer to the mapping between aligned stones and latent # stones so any rotation does not affect them so ignore it. # There are a few different ways of specifying rotation in the level name. level_name = level_name.replace('rotation_and_', '') level_name = level_name.replace('with_rotation', '') level_name = level_name.replace('fixed_with', 'fixed') level_name = level_name.replace('rotate_color_shape', '') level_name = level_name.replace('rotate_color_size', '') level_name = level_name.replace('rotate_size_shape', '') precomputed_folder = os.path.join(PRECOMPUTED_LEVEL_FILES_DIR, level_name) return _load_from_folder(precomputed_folder) def get_precomputed_maps( constraints: Optional[Sequence[graphs.Constraint]] = None, poss_stone_maps: Optional[Sequence[stones_and_potions.StoneMap]] = None, poss_potion_maps: Optional[Sequence[stones_and_potions.PotionMap]] = None, ) -> PrecomputedMaps: """Precomputes a set of maps to make running the ideal observer faster.""" # Constraints must be specified in stone perceptual space. if constraints is None: constraints = graphs.possible_constraints() if poss_stone_maps is None: poss_stone_maps = stones_and_potions.possible_stone_maps() perm_index_to_index, index_to_perm_index = get_perm_index_conversion() if poss_potion_maps is None: poss_potion_maps = stones_and_potions.possible_potion_maps( index_to_perm_index) logging.info('Computing precomputed maps.') # Everywhere below we use numpy object type to store python ints rather than # numpy ints so that we get arbitrary precision which allows us to make # bitfields easily. stone_maps = np.array([s.index() for s in poss_stone_maps], dtype=object) potion_maps = np.array([p.index(perm_index_to_index) for p in poss_potion_maps], dtype=object) # The graph distribution is an unordered mapping, we sort it to make debugging # easier and so that we can extract a list of graphs and a list of # probabilities for those graphs and this will be consistent across runs. graphs_distr = graphs.graph_distr(constraints) graphs_distr_as_list = list(graphs_distr.items()) graphs_distr_constraints = [graphs.constraint_from_graph(k) for k, _ in graphs_distr_as_list] graphs_distr_num_constraints = graphs.get_num_constraints( graphs_distr_constraints) graphs_distr_sorted = sorted(zip( graphs_distr_as_list, graphs_distr_num_constraints, graphs_distr_constraints), key=lambda x: (x[2], str(x[1]))) graphs_list = np.frompyfunc(graphs.Graph, 2, 1)( np.array([g[0].node_list for g, _, _ in graphs_distr_sorted], dtype=object), np.array([g[0].edge_list for g, _, _ in graphs_distr_sorted], dtype=object)) graph_index_distr = np.array([g[1] for g, _, _ in graphs_distr_sorted], dtype=object) graphs_with_edge = get_graphs_with_edge(graphs_list, index_to_perm_index) # A list of the edges which can be present in a bottleneck. # i.e. edges of the cube. graph_important_edges = graphs.cube_edges() # A list of all partial information we could have about a graph given that # we have performed some set of stone in potion experiments. possible_partial_graph_indices = get_possible_partial_graph_indices( graph_important_edges, graphs_list) # Map from a simple partial graph index which enumerates all representable # graphs to an index into the list of reachable partial graphs. partial_graph_index_to_possible_index = {ind: i for i, ind in enumerate( possible_partial_graph_indices)} # A map which goes from a partial graph to a list of graphs which are # possible given the partial info. partial_graph_to_matching_graphs = get_partial_graph_to_matching_graphs( graphs_list, possible_partial_graph_indices) # A map from a partial graph to an updated partial graph given an # observation. partial_graph_update = get_partial_graph_update( graphs_list, graph_important_edges, possible_partial_graph_indices, partial_graph_index_to_possible_index) # A map from a perceived stone to its reward. stone_to_reward = np.array( [aligned_stone_from_index(AlignedStoneIndex(i)).reward for i in range(stones_and_potions.AlignedStone.num_types)], dtype=object) # A map from a perceived stone to an index which ignores the reward. drop_reward = np.array( [aligned_stone_from_index(AlignedStoneIndex(i)).coords_only_index() for i in range(stones_and_potions.AlignedStone.num_types)], dtype=object) # Compute a list of possible outcomes (perceived stones) given a perceived # stone which we apply a potion to. possible_latent_dims = get_possible_latent_dims(index_to_perm_index) poss_p_maps, poss_s_maps = get_poss_potion_maps_and_stone_maps( perm_index_to_index) possible_latent_dirs = get_possible_latent_dirs(index_to_perm_index) partial_potion_map_update = get_partial_potion_map_update( index_to_perm_index, perm_index_to_index) partial_stone_map_update = get_partial_stone_map_update() # For each perceived potion we create a mask on the observed no effect bit # field which selects the entries for this potion. potion_masks_list = [] for j in reversed(range(stones_and_potions.PerceivedPotion.num_types)): binary_list = ['1' if i == j else '0' for i in range( stones_and_potions.PerceivedPotion.num_types)] mask = int(''.join([''.join(binary_list) for _ in range( stones_and_potions.LatentStone.num_types)]), 2) potion_masks_list.append(mask) potion_masks = np.array(potion_masks_list, dtype=object) perceived_potions = np.array( [perceived_potion_from_index(PerceivedPotionIndex(i)) for i in range( stones_and_potions.PerceivedPotion.num_types)], dtype=object) potion_to_pair = np.array( [stones_and_potions.PerceivedPotion( perceived_dim=p.perceived_dim, perceived_dir=-p.perceived_dir).index() for p in perceived_potions], dtype=object) edge_exists = get_edge_exists(possible_partial_graph_indices) react_result = get_react_result( possible_partial_graph_indices, edge_exists, drop_reward) missing_edge_no_change = get_missing_edge_no_change( index_to_perm_index, graph_important_edges) update_partial_graph_no_change = get_update_partial_graph_no_change( graphs_list, possible_partial_graph_indices, partial_graph_index_to_possible_index, graph_important_edges) partial_stone_map_to_stone_map = get_partial_stone_map_to_stone_map() no_effect_from_partial_chem = get_no_effect_from_partial_chem( index_to_perm_index) precomputed = PrecomputedMaps( graphs_list=graphs_list, graph_index_distr=graph_index_distr, partial_graph_to_matching_graphs=partial_graph_to_matching_graphs, partial_graph_update=partial_graph_update, stone_to_reward=stone_to_reward, drop_reward=drop_reward, partial_graph_index_to_possible_index=( partial_graph_index_to_possible_index), graphs_with_edge=graphs_with_edge, edge_exists=edge_exists, stone_maps=stone_maps, potion_maps=potion_maps, possible_latent_dims=possible_latent_dims, poss_p_maps=poss_p_maps, poss_s_maps=poss_s_maps, react_result=react_result, possible_latent_dirs=possible_latent_dirs, partial_potion_map_update=partial_potion_map_update, partial_stone_map_update=partial_stone_map_update, potion_masks=potion_masks, potion_to_pair=potion_to_pair, perm_index_to_index=perm_index_to_index, index_to_perm_index=index_to_perm_index, missing_edge_no_change=missing_edge_no_change, update_partial_graph_no_change=update_partial_graph_no_change, partial_stone_map_to_stone_map=partial_stone_map_to_stone_map, no_effect_from_partial_chem=no_effect_from_partial_chem ) return precomputed
dm_alchemy-master
dm_alchemy/ideal_observer/precomputed_maps.py
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Example random agent for interacting with DeepMind Alchemy environment.""" from absl import app from absl import flags from absl import logging import dm_alchemy from dm_env import specs import numpy as np FLAGS = flags.FLAGS flags.DEFINE_string( 'docker_image_name', None, 'Name of the Docker image that contains the Alchemy environment. ' 'If None, uses the default docker image name.') flags.DEFINE_integer('seed', 123, 'Environment seed.') flags.DEFINE_string( 'level_name', 'alchemy/perceptual_mapping_randomized_with_rotation_and_random_bottleneck', 'Name of Alchemy task to run.') class RandomAgent: """Basic random agent for DeepMind Alchemy environment.""" def __init__(self, action_spec): self.action_spec = action_spec def act(self): action = {} for name, spec in self.action_spec.items(): # Uniformly sample BoundedArray actions. if isinstance(spec, specs.BoundedArray): action[name] = np.random.uniform(spec.minimum, spec.maximum, spec.shape) else: action[name] = spec.generate_value() return action def main(_): env_settings = dm_alchemy.EnvironmentSettings( seed=FLAGS.seed, level_name=FLAGS.level_name) with dm_alchemy.load_from_docker( name=FLAGS.docker_image_name, settings=env_settings) as env: agent = RandomAgent(env.action_spec()) timestep = env.reset() score = 0 while not timestep.last(): action = agent.act() timestep = env.step(action) if timestep.reward: score += timestep.reward logging.info('Total score: %1.1f, reward: %1.1f', score, timestep.reward) if __name__ == '__main__': app.run(main)
dm_alchemy-master
examples/random_agent.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Example human agent for interacting with DeepMind Alchemy.""" import multiprocessing from absl import app from absl import flags from absl import logging import dm_alchemy import numpy as np import pygame FLAGS = flags.FLAGS flags.DEFINE_list( 'screen_size', ['640', '480'], 'Screen width/height in pixels. Scales the environment RGB observations to ' 'fit the screen size.') flags.DEFINE_list( 'resolution', ['240', '200'], 'Resolution of the RGB observation presented.') flags.DEFINE_string( 'docker_image_name', None, 'Name of the Docker image that contains the Alchemy environment. ' 'If None, uses the default name "alchemy"') flags.DEFINE_integer('seed', 123, 'Environment seed.') flags.DEFINE_string( 'level_name', 'alchemy/perceptual_mapping_randomized_with_rotation_and_random_bottleneck', 'Name of Alchemy task to run.') _FRAMES_PER_SECOND = 30 _MOUSE_SENSITIVITY = 0.1 _CURSOR_COLOR = (255, 0, 0) _CURSOR_SIZE = 10 _LEFT_BUTTON = 1 _KEYS_TO_ACTION = { pygame.K_w: {'MOVE_BACK_FORWARD': .5}, pygame.K_s: {'MOVE_BACK_FORWARD': -.5}, pygame.K_a: {'STRAFE_LEFT_RIGHT': -.5}, pygame.K_d: {'STRAFE_LEFT_RIGHT': .5}, pygame.K_UP: {'LOOK_DOWN_UP': -1}, pygame.K_DOWN: {'LOOK_DOWN_UP': 1}, pygame.K_LEFT: {'LOOK_LEFT_RIGHT': -1}, pygame.K_RIGHT: {'LOOK_LEFT_RIGHT': 1}, pygame.K_i: {'LOOK_DOWN_UP': -1}, pygame.K_k: {'LOOK_DOWN_UP': 1}, pygame.K_j: {'LOOK_LEFT_RIGHT': -1}, pygame.K_l: {'LOOK_LEFT_RIGHT': 1}, pygame.K_y: {'HAND_ROTATE_AROUND_FORWARD': 1}, pygame.K_r: {'HAND_ROTATE_AROUND_FORWARD': -1}, pygame.K_t: {'HAND_ROTATE_AROUND_RIGHT': -1}, pygame.K_g: {'HAND_ROTATE_AROUND_RIGHT': 1}, pygame.K_f: {'HAND_ROTATE_AROUND_UP': -1}, pygame.K_h: {'HAND_ROTATE_AROUND_UP': 1}, pygame.K_b: {'HAND_PUSH_PULL': 5}, pygame.K_v: {'HAND_PUSH_PULL': -5}, pygame.K_SPACE: {'HAND_GRIP': 1}, } # pyformat: disable _NO_ACTION = { 'MOVE_BACK_FORWARD': 0, 'STRAFE_LEFT_RIGHT': 0, 'LOOK_DOWN_UP': 0, 'LOOK_LEFT_RIGHT': 0, 'HAND_ROTATE_AROUND_FORWARD': 0, 'HAND_ROTATE_AROUND_RIGHT': 0, 'HAND_ROTATE_AROUND_UP': 0, 'HAND_PUSH_PULL': 0, 'HAND_GRIP': 0 } def _grab_mouse(grab=True): pygame.event.set_grab(grab) pygame.mouse.set_visible(not grab) def main(_): pygame.init() try: pygame.mixer.quit() except NotImplementedError: pass pygame.display.set_caption('Alchemy Human Agent') view_width, view_height = [int(pixels) for pixels in FLAGS.resolution] env_settings = dm_alchemy.EnvironmentSettings( seed=FLAGS.seed, level_name=FLAGS.level_name, width=view_width, height=view_height) # Let rendering use all but one CPU cores, but at least 1 core. num_rendering_cores = max(multiprocessing.cpu_count() - 1, 1) environment_variables = {'LP_NUM_THREADS': str(num_rendering_cores)} with dm_alchemy.load_from_docker( name=FLAGS.docker_image_name, settings=env_settings, environment_variables=environment_variables) as env: screen = pygame.display.set_mode( (int(FLAGS.screen_size[0]), int(FLAGS.screen_size[1]))) rgb_spec = env.observation_spec()['RGB_INTERLEAVED'] surface = pygame.Surface((rgb_spec.shape[1], rgb_spec.shape[0])) score = 0 clock = pygame.time.Clock() while True: for event in pygame.event.get(): if event.type == pygame.QUIT: return elif event.type == pygame.KEYDOWN: if event.key == pygame.K_q: return if event.key == pygame.K_ESCAPE: _grab_mouse(not pygame.event.get_grab()) elif event.type == pygame.MOUSEBUTTONDOWN: if event.button == _LEFT_BUTTON: _grab_mouse() actions = _NO_ACTION.copy() keys = pygame.key.get_pressed() for key, key_actions in _KEYS_TO_ACTION.items(): if not keys[key]: continue for name, action in key_actions.items(): actions[name] = action if pygame.event.get_grab(): left_button_pressed, _, _ = pygame.mouse.get_pressed() if left_button_pressed: actions['HAND_GRIP'] = 1 x, y = pygame.mouse.get_rel() actions['LOOK_LEFT_RIGHT'] = _MOUSE_SENSITIVITY * x actions['LOOK_DOWN_UP'] = _MOUSE_SENSITIVITY * y timestep = env.step(actions) thisframe = np.copy(timestep.observation['RGB_INTERLEAVED']) frame = np.swapaxes(thisframe, 0, 1) pygame.surfarray.blit_array(surface, frame) pygame.transform.smoothscale(surface, screen.get_size(), screen) info = pygame.display.Info() rect_x = info.current_w // 2 rect_y = info.current_h // 2 width_line = 2 pygame.draw.line(screen, _CURSOR_COLOR, (rect_x - _CURSOR_SIZE, rect_y), (rect_x + _CURSOR_SIZE, rect_y), width_line) pygame.draw.line(screen, _CURSOR_COLOR, (rect_x, rect_y - _CURSOR_SIZE), (rect_x, rect_y + _CURSOR_SIZE), width_line) pygame.display.update() if timestep.reward: score += timestep.reward logging.info('Total score: %1.1f, reward: %1.1f', score, timestep.reward) clock.tick(_FRAMES_PER_SECOND) if __name__ == '__main__': app.run(main)
dm_alchemy-master
examples/human_agent.py
# pylint: disable=g-bad-file-header # Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Setup for pip package.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys from setuptools import find_packages from setuptools import setup from setuptools.command.install import install as InstallCommandBase _VERSION = '1.36' EXTRA_PACKAGES = { 'tensorflow': ['tensorflow>=1.15.0,<2.0.0'], 'tensorflow with gpu': ['tensorflow-gpu>=1.15.0,<2.0.0'], } REQUIRED_PACKAGES = [ 'tensorflow-probability>=0.8.0,<0.9.0', # Version 0.9 requires tensorflow 2 'six', 'absl-py', 'semantic_version', 'contextlib2', 'wrapt' ] setup( name='dm-sonnet', version=_VERSION, description=( 'Sonnet is a library for building neural networks in TensorFlow.'), long_description='', url='https://github.com/deepmind/sonnet', author='DeepMind', author_email='[email protected]', # Contained modules and scripts. packages=find_packages(), entry_points={}, install_requires=REQUIRED_PACKAGES, extras_require=EXTRA_PACKAGES, # Add in any packaged data. include_package_data=True, package_data={ '': ['*.txt', '*.rst'], 'sonnet': ['*.so'], }, zip_safe=False, cmdclass={ 'install': InstallCommandBase, }, # PyPI package information. classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: {}'.format( '2.7' if (sys.version[0] == '2') else sys.version[0]), 'Topic :: Scientific/Engineering :: Mathematics', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Software Development :: Libraries', ], license='Apache 2.0', keywords='sonnet tensorflow tensor machine learning', )
sonnet-1
setup.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """This python module contains Neural Network Modules for TensorFlow. Each module is a Python object which conceptually "owns" any variables required in that part of the Neural Network. The `__call__` function on the object is used to connect that Module into the Graph, and this may be called repeatedly with sharing automatically taking place. Everything public should be imported by this top level `__init__.py` so that the library can be used as follows: ``` import sonnet as snt linear = snt.Linear(...) ``` """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import importlib import sys import semantic_version def _ensure_dependency_available_at_version(package_name, min_version): """Throw helpful error if required dependencies not available.""" try: pkg = importlib.import_module(package_name) except ImportError: pip_name = package_name.replace('_', '-') raise SystemError( 'Sonnet requires %s (minimum version %s) to be installed. ' 'If using pip, run `pip install %s` or ' '`pip install %s-gpu`' % ( package_name, min_version, pip_name, pip_name)) installed_version = semantic_version.Version(pkg.__version__) version_spec = semantic_version.Spec('>=' + min_version) if not version_spec.match(installed_version): raise SystemError( '%s version %s is installed, but Sonnet requires at least version %s.' % (package_name, pkg.__version__, min_version)) _ensure_dependency_available_at_version('tensorflow', '1.8.0') _ensure_dependency_available_at_version('tensorflow_probability', '0.4.0') # Check some version of TF is available. from sonnet.python import custom_getters from sonnet.python.modules import nets from sonnet.python.modules.attention import AttentiveRead from sonnet.python.modules.base import AbstractModule from sonnet.python.modules.base import Module from sonnet.python.modules.base import observe_connections from sonnet.python.modules.base import Transposable from sonnet.python.modules.base_errors import DifferentGraphError from sonnet.python.modules.base_errors import Error from sonnet.python.modules.base_errors import IncompatibleShapeError from sonnet.python.modules.base_errors import ModuleInfoError from sonnet.python.modules.base_errors import NotConnectedError from sonnet.python.modules.base_errors import NotInitializedError from sonnet.python.modules.base_errors import NotSupportedError from sonnet.python.modules.base_errors import ParentNotBuiltError from sonnet.python.modules.base_errors import UnderspecifiedError from sonnet.python.modules.base_info import SONNET_COLLECTION_NAME from sonnet.python.modules.basic import AddBias from sonnet.python.modules.basic import BatchApply from sonnet.python.modules.basic import BatchFlatten from sonnet.python.modules.basic import BatchReshape from sonnet.python.modules.basic import ConcatLinear from sonnet.python.modules.basic import FlattenTrailingDimensions from sonnet.python.modules.basic import Linear from sonnet.python.modules.basic import merge_leading_dims from sonnet.python.modules.basic import MergeDims from sonnet.python.modules.basic import SelectInput from sonnet.python.modules.basic import SliceByDim from sonnet.python.modules.basic import split_leading_dim from sonnet.python.modules.basic import TileByDim from sonnet.python.modules.basic import TrainableVariable from sonnet.python.modules.basic_rnn import BidirectionalRNN from sonnet.python.modules.basic_rnn import DeepRNN from sonnet.python.modules.basic_rnn import ModelRNN from sonnet.python.modules.basic_rnn import VanillaRNN from sonnet.python.modules.batch_norm import BatchNorm from sonnet.python.modules.batch_norm_v2 import BatchNormV2 from sonnet.python.modules.clip_gradient import clip_gradient from sonnet.python.modules.conv import CAUSAL from sonnet.python.modules.conv import CausalConv1D from sonnet.python.modules.conv import CONSTANT_PADDING from sonnet.python.modules.conv import Conv1D from sonnet.python.modules.conv import Conv1DTranspose from sonnet.python.modules.conv import Conv2D from sonnet.python.modules.conv import Conv2DTranspose from sonnet.python.modules.conv import Conv3D from sonnet.python.modules.conv import Conv3DTranspose from sonnet.python.modules.conv import DepthwiseConv2D from sonnet.python.modules.conv import FULL from sonnet.python.modules.conv import InPlaneConv2D from sonnet.python.modules.conv import REFLECT_PADDING from sonnet.python.modules.conv import REVERSE_CAUSAL from sonnet.python.modules.conv import SAME from sonnet.python.modules.conv import SeparableConv1D from sonnet.python.modules.conv import SeparableConv2D from sonnet.python.modules.conv import SYMMETRIC_PADDING from sonnet.python.modules.conv import VALID from sonnet.python.modules.embed import Embed from sonnet.python.modules.gated_rnn import BatchNormLSTM from sonnet.python.modules.gated_rnn import Conv1DLSTM from sonnet.python.modules.gated_rnn import Conv2DLSTM from sonnet.python.modules.gated_rnn import GRU from sonnet.python.modules.gated_rnn import highway_core_with_recurrent_dropout from sonnet.python.modules.gated_rnn import HighwayCore from sonnet.python.modules.gated_rnn import LSTM from sonnet.python.modules.gated_rnn import lstm_with_recurrent_dropout from sonnet.python.modules.gated_rnn import lstm_with_zoneout from sonnet.python.modules.gated_rnn import LSTMBlockCell from sonnet.python.modules.gated_rnn import LSTMState from sonnet.python.modules.layer_norm import LayerNorm from sonnet.python.modules.moving_average import MovingAverage from sonnet.python.modules.optimization_constraints import get_lagrange_multiplier from sonnet.python.modules.optimization_constraints import OptimizationConstraints from sonnet.python.modules.pondering_rnn import ACTCore from sonnet.python.modules.relational_memory import RelationalMemory from sonnet.python.modules.residual import Residual from sonnet.python.modules.residual import ResidualCore from sonnet.python.modules.residual import SkipConnectionCore from sonnet.python.modules.rnn_core import RNNCellWrapper from sonnet.python.modules.rnn_core import RNNCore from sonnet.python.modules.rnn_core import trainable_initial_state from sonnet.python.modules.rnn_core import TrainableInitialState from sonnet.python.modules.scale_gradient import scale_gradient from sonnet.python.modules.sequential import Sequential from sonnet.python.modules.spatial_transformer import AffineGridWarper from sonnet.python.modules.spatial_transformer import AffineWarpConstraints from sonnet.python.modules.spatial_transformer import GridWarper from sonnet.python.modules.spectral_normalization import wrap_with_spectral_norm from sonnet.python.modules.util import check_initializers from sonnet.python.modules.util import check_partitioners from sonnet.python.modules.util import check_regularizers from sonnet.python.modules.util import count_variables_by_type from sonnet.python.modules.util import custom_getter_router from sonnet.python.modules.util import deprecation_warning from sonnet.python.modules.util import format_variable_map from sonnet.python.modules.util import format_variables from sonnet.python.modules.util import get_normalized_variable_map from sonnet.python.modules.util import get_saver from sonnet.python.modules.util import get_variables_in_module from sonnet.python.modules.util import get_variables_in_scope from sonnet.python.modules.util import has_variable_scope from sonnet.python.modules.util import log_variables from sonnet.python.modules.util import parse_string_to_constructor from sonnet.python.modules.util import remove_unsupported_kwargs from sonnet.python.modules.util import reuse_variables from sonnet.python.modules.util import summarize_variables from sonnet.python.modules.util import supports_kwargs from sonnet.python.modules.util import variable_map_items from sonnet.python.ops import nest from sonnet.python.ops.initializers import restore_initializer __version__ = '1.36'
sonnet-1
sonnet/__init__.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Sonnet Protobuf modules.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function
sonnet-1
sonnet/protos/__init__.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Removes the ":0" suffix from names in a checkpoint.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import tensorflow.compat.v1 as tf tf.app.flags.DEFINE_string("source", None, "Source checkpoint") tf.app.flags.DEFINE_string("target", None, "Target checkpoint") tf.app.flags.DEFINE_boolean("dry_run", False, "Whether to do a dry run") FLAGS = tf.app.flags.FLAGS def _build_migrated_variables(checkpoint_reader, name_value_fn): """Builds the TensorFlow variables of the migrated checkpoint. Args: checkpoint_reader: A `tf.train.NewCheckPointReader` of the checkpoint to be read from. name_value_fn: Function taking two arguments, `name` and `value`, which returns the pair of new name and value for that a variable of that name. Returns: Tuple of a dictionary with new variable names as keys and `tf.Variable`s as values, and a dictionary that maps the old variable names to the new variable names. """ names_to_shapes = checkpoint_reader.get_variable_to_shape_map() new_name_to_variable = {} name_to_new_name = {} for name in names_to_shapes: value = checkpoint_reader.get_tensor(name) new_name, new_value = name_value_fn(name, value) if new_name is None: continue name_to_new_name[name] = new_name new_name_to_variable[new_name] = tf.Variable(new_value) return new_name_to_variable, name_to_new_name def remove_colon_zero(name): return name[:-2] if name.endswith(":0") else name def main(unused_args): with tf.Graph().as_default(): reader = tf.train.NewCheckpointReader(FLAGS.source) name_value_fn = lambda name, value: (remove_colon_zero(name), value) variables, name_to_new_name = _build_migrated_variables( reader, name_value_fn=name_value_fn) if not FLAGS.dry_run: init = tf.global_variables_initializer() saver = tf.train.Saver(variables) with tf.Session() as sess: sess.run(init) saver.save(sess, FLAGS.target) return name_to_new_name if __name__ == "__main__": tf.app.run()
sonnet-1
sonnet/util/migrate_checkpoint.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Sonnet Modules and custom ops in TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function
sonnet-1
sonnet/python/__init__.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for sonnet.python.modules.custom_getters.stop_gradient.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports from absl.testing import parameterized import sonnet as snt import tensorflow.compat.v1 as tf def _suffix_custom_getter(getter, name, *args, **kwargs): return getter(name + "_test", *args, **kwargs) class OverrideArgsTest(parameterized.TestCase, tf.test.TestCase): @parameterized.named_parameters( ("override_args", snt.custom_getters.override_args), ("override_default_args", snt.custom_getters.override_default_args), ) def testUsage(self, custom_getter_fn): # Create a module with no custom getters. linear = snt.Linear(10) # Create a module within the scope of an 'override args' custom getter. local_custom_getter = custom_getter_fn( collections=[tf.GraphKeys.LOCAL_VARIABLES]) with tf.variable_scope("", custom_getter=local_custom_getter): local_linear = snt.Linear(10) # Connect both modules to the graph, creating their variables. inputs = tf.placeholder(dtype=tf.float32, shape=(7, 11)) linear(inputs) local_linear(inputs) self.assertIn(linear.w, tf.global_variables()) self.assertNotIn(linear.w, tf.local_variables()) self.assertIn(local_linear.w, tf.local_variables()) self.assertNotIn(local_linear.w, tf.global_variables()) @parameterized.named_parameters( ("override_args", snt.custom_getters.override_args), ("override_default_args", snt.custom_getters.override_default_args), ) def testNestedWithin(self, custom_getter_fn): # Create a module with an 'override args' custom getter, within the scope # of another custom getter. local_custom_getter = custom_getter_fn( collections=[tf.GraphKeys.LOCAL_VARIABLES]) with tf.variable_scope("", custom_getter=_suffix_custom_getter): local_linear = snt.Linear(10, custom_getter=local_custom_getter) # Connect the module to the graph, creating its variables. inputs = tf.placeholder(dtype=tf.float32, shape=(7, 11)) local_linear(inputs) # Both custom getters should be effective. self.assertIn(local_linear.w, tf.local_variables()) self.assertNotIn(local_linear.w, tf.global_variables()) self.assertEqual("linear/w_test", local_linear.w.op.name) @parameterized.named_parameters( ("override_args", snt.custom_getters.override_args), ("override_default_args", snt.custom_getters.override_default_args), ) def testWithNested(self, custom_getter_fn): # Create a module with a custom getter, within the scope of an # 'override args' custom getter. local_custom_getter = custom_getter_fn( collections=[tf.GraphKeys.LOCAL_VARIABLES]) with tf.variable_scope("", custom_getter=local_custom_getter): local_linear = snt.Linear(10, custom_getter=_suffix_custom_getter) # Connect the module to the graph, creating its variables. inputs = tf.placeholder(dtype=tf.float32, shape=(7, 11)) local_linear(inputs) # Both custom getters should be effective. self.assertIn(local_linear.w, tf.local_variables()) self.assertNotIn(local_linear.w, tf.global_variables()) self.assertEqual("linear/w_test", local_linear.w.op.name) def testExplicitArgOverridden(self): # Create a variable within the scope of an 'override args' custom getter. local_custom_getter = snt.custom_getters.override_args( collections=[tf.GraphKeys.LOCAL_VARIABLES]) with tf.variable_scope("", custom_getter=local_custom_getter): # Explicitly specify an arg that disagrees with the custom getter. v = tf.get_variable("v", (), collections=[ tf.GraphKeys.GLOBAL_VARIABLES]) # The custom getter should win. self.assertIn(v, tf.local_variables()) self.assertNotIn(v, tf.global_variables()) def testExplicitArgNotOverridden(self): # Create a variable within an 'override default args' custom getter. local_custom_getter = snt.custom_getters.override_default_args( collections=[tf.GraphKeys.LOCAL_VARIABLES]) with tf.variable_scope("", custom_getter=local_custom_getter): # Explicitly specify an arg that disagrees with the custom getter. v = tf.get_variable("v", (), collections=[ tf.GraphKeys.GLOBAL_VARIABLES]) # The custom getter should honour the explicitly specified arg. self.assertIn(v, tf.global_variables()) self.assertNotIn(v, tf.local_variables()) if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/custom_getters/override_args_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for Bayes by Backprop custom getter.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin import sonnet as snt import sonnet.python.custom_getters.bayes_by_backprop as bbb import tensorflow.compat.v1 as tf import tensorflow_probability as tfp def softplus(x): return np.log(1.0 + np.exp(x)) def test_diag_gaussian_builder_builder( init_loc=0.0, init_scale=0.01, dist_cls=tfp.distributions.Normal, name_append="posterior"): def diagonal_gaussian_posterior_builder(getter, name, *args, **kwargs): shape = kwargs.pop("shape") parameter_shapes = dist_cls.param_static_shapes(shape) kwargs["initializer"] = tf.constant_initializer(init_loc) loc_var = getter( name + "/{}_loc".format(name_append), shape=parameter_shapes["loc"], *args, **kwargs) kwargs["initializer"] = tf.constant_initializer(init_scale) scale_var = getter( name + "/{}_scale".format(name_append), shape=parameter_shapes["scale"], *args, **kwargs) posterior = dist_cls( loc=loc_var, scale=tf.nn.softplus(scale_var), name="{}_posterior_dist".format(name)) posterior_vars = {"loc": loc_var, "scale": scale_var} return posterior, posterior_vars return diagonal_gaussian_posterior_builder def uniform_builder( getter, name, *args, **kwargs): del kwargs["initializer"] shape = kwargs.pop("shape") parameter_shapes = tfp.distributions.Uniform.param_static_shapes(shape) low_var = getter( name + "/low", shape=parameter_shapes["low"], *args, **kwargs) hi_var = getter(name + "/hi", shape=parameter_shapes["high"], *args, **kwargs) uniform_dist = tfp.distributions.Uniform(low=low_var, high=hi_var) return uniform_dist class BBBTest(tf.test.TestCase): def test_mean_mode_is_deterministic_and_correct(self): softplus_of_three = softplus(3.0) bbb_getter = bbb.bayes_by_backprop_getter( posterior_builder=test_diag_gaussian_builder_builder(10.9, 3.0), prior_builder=bbb.fixed_gaussian_prior_builder, sampling_mode_tensor=tf.constant(bbb.EstimatorModes.mean)) with tf.variable_scope("my_scope", custom_getter=bbb_getter): my_variable = tf.get_variable("v", shape=[2], dtype=tf.float32) init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) with self.test_session() as sess: sess.run(init_op) variable_value_one = sess.run(my_variable) variable_value_two = sess.run(my_variable) variable_value_three = sess.run(my_variable) self.assertAllClose(variable_value_one, np.zeros(shape=[2]) + 10.9, atol=1e-5) self.assertAllClose(variable_value_two, np.zeros(shape=[2]) + 10.9, atol=1e-5) self.assertAllClose(variable_value_three, np.zeros(shape=[2]) + 10.9, atol=1e-5) variable_metadata = bbb.get_variable_metadata() self.assertTrue(len(variable_metadata) == 1) q_dist_sigma = variable_metadata[0].posterior.scale with self.test_session() as sess: sigma_res = sess.run(q_dist_sigma) self.assertAllClose(sigma_res, np.zeros(shape=[2]) + softplus_of_three, atol=1e-5) def test_sample_mode_is_stochastic_and_can_be_switched(self): use_mean = tf.constant(bbb.EstimatorModes.mean) use_sample = tf.constant(bbb.EstimatorModes.sample) sampling_mode = tf.get_variable( "bbb_sampling_mode", initializer=tf.constant_initializer(bbb.EstimatorModes.sample), dtype=tf.string, shape=(), trainable=False) set_to_mean_mode = tf.assign(sampling_mode, use_mean) set_to_sample_mode = tf.assign(sampling_mode, use_sample) softplus_of_twenty = softplus(20.0) bbb_getter = bbb.bayes_by_backprop_getter( posterior_builder=test_diag_gaussian_builder_builder(10.9, 20.0), prior_builder=bbb.fixed_gaussian_prior_builder, sampling_mode_tensor=sampling_mode) with tf.variable_scope("my_scope", custom_getter=bbb_getter): my_variable = tf.get_variable("v", shape=[10, 3], dtype=tf.float32) # Check that the distribution has the right parameters. variable_metadata = bbb.get_variable_metadata() self.assertTrue(len(variable_metadata) == 1) q_dist_mean = variable_metadata[0].posterior.loc q_dist_sigma = variable_metadata[0].posterior.scale init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) with self.test_session() as sess: sess.run(init_op) mean_res, sigma_res = sess.run([q_dist_mean, q_dist_sigma]) variable_value_one = sess.run(my_variable) variable_value_two = sess.run(my_variable) self.assertAllClose(mean_res, np.zeros(shape=[10, 3])+10.9) self.assertAllClose(sigma_res, np.zeros(shape=[10, 3]) + softplus_of_twenty) actual_distance = np.sqrt( np.sum(np.square(variable_value_one - variable_value_two))) expected_distance_minimum = 5 self.assertGreater(actual_distance, expected_distance_minimum) # Now the value should be deterministic again. with self.test_session() as sess: sess.run(set_to_mean_mode) variable_value_three = sess.run(my_variable) variable_value_four = sess.run(my_variable) variable_value_five = sess.run(my_variable) self.assertAllClose(variable_value_three, np.zeros(shape=[10, 3]) + 10.9, atol=1e-5) self.assertAllClose(variable_value_four, np.zeros(shape=[10, 3]) + 10.9, atol=1e-5) self.assertAllClose(variable_value_five, np.zeros(shape=[10, 3]) + 10.9, atol=1e-5) # Now it should be stochastic again. with self.test_session() as sess: sess.run(set_to_sample_mode) variable_value_six = sess.run(my_variable) variable_value_seven = sess.run(my_variable) actual_new_distance = np.sqrt( np.sum(np.square(variable_value_six - variable_value_seven))) self.assertGreater(actual_new_distance, expected_distance_minimum) def test_variable_sharing(self): _, x_size = input_shape = [5, 5] sample_mode = tf.constant(bbb.EstimatorModes.sample) mean_mode = tf.constant(bbb.EstimatorModes.mean) sampling_mode = tf.get_variable( "bbb_sampling_mode", initializer=tf.constant_initializer(bbb.EstimatorModes.sample), dtype=tf.string, shape=(), trainable=False) set_to_sample_mode = tf.assign(sampling_mode, sample_mode) set_to_mean_mode = tf.assign(sampling_mode, mean_mode) bbb_getter = bbb.bayes_by_backprop_getter( posterior_builder=bbb.diagonal_gaussian_posterior_builder, prior_builder=bbb.fixed_gaussian_prior_builder, kl_builder=bbb.stochastic_kl_builder, sampling_mode_tensor=sampling_mode) tf.get_variable_scope().set_custom_getter(bbb_getter) mlp = snt.nets.MLP(output_sizes=[32, x_size]) x_train = tf.placeholder(dtype=tf.float32, shape=input_shape) x_test = tf.placeholder(dtype=tf.float32, shape=input_shape) # Dummy targets. target_train = x_train + 3.0 target_test = x_test + 3.0 y_train = mlp(x_train) # Also, y_test should be deterministic for fixed x. y_test = mlp(x_test) # Expect there to be two parameter for w and b for each layer in the MLP, #. That's 2 * 2 * 2 = 8. But ONLY for the training set. expected_number_of_variables = 8 actual_number_of_variables = len(tf.trainable_variables()) self.assertTrue(expected_number_of_variables == actual_number_of_variables) loss_train = tf.reduce_sum(tf.square(y_train - target_train), reduction_indices=[1]) loss_train = tf.reduce_mean(loss_train, reduction_indices=[0]) loss_test = tf.reduce_sum(tf.square(y_test - target_test), reduction_indices=[1]) loss_test = tf.reduce_mean(loss_test) kl_cost = bbb.get_total_kl_cost() * 0.000001 total_train_loss = loss_train + kl_cost optimizer = tf.train.GradientDescentOptimizer(0.001) train_step = optimizer.minimize(total_train_loss) x_feed = np.random.normal(size=input_shape) fd = { x_train: x_feed, x_test: x_feed } init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) sess.run(set_to_mean_mode) y_test_res_one = sess.run(y_test, feed_dict=fd) y_test_res_two = sess.run(y_test, feed_dict=fd) sess.run(set_to_sample_mode) self.assertAllClose(y_test_res_one, y_test_res_two) n_train = 10 check_freq = 2 with self.test_session() as sess: for i in xrange(n_train): if i % check_freq == 0: sess.run(set_to_mean_mode) to_run = [y_train, y_test, loss_train, loss_test, kl_cost] else: to_run = [y_train, y_test, loss_train, loss_test, kl_cost, train_step] res = sess.run(to_run, feed_dict=fd) loss_train_res, loss_test_res = res[2:4] if i % check_freq == 0: self.assertAllClose(loss_train_res, loss_test_res) sess.run(set_to_sample_mode) def testLastSampleMode(self): """Tests that the 'last sample' estimator mode uses the last sample.""" class CustomNormal(tfp.distributions.Normal): """A custom normal distribution which implements `self.last_sample()`.""" def __init__(self, *args, **kwargs): super(CustomNormal, self).__init__(*args, **kwargs) self._noise = tf.get_variable( name=self.loc.name.replace(":", "_") + "_noise", shape=self.loc.shape, dtype=self.loc.dtype, initializer=tf.random_normal_initializer(0.0, 1.0), trainable=False) def sample(self): noise = self._noise.assign(tf.random_normal(self.loc.shape)) return self.last_sample(noise) def last_sample(self, noise=None): if noise is None: noise = self._noise return noise * self.scale + self.loc sampling_mode_tensor = tf.get_variable( name="sampling_mode", dtype=tf.string, shape=(), trainable=False, initializer=tf.constant_initializer(bbb.EstimatorModes.sample)) enter_last_sample_mode = tf.assign( sampling_mode_tensor, tf.constant(bbb.EstimatorModes.last_sample)) bbb_getter = bbb.bayes_by_backprop_getter( posterior_builder=test_diag_gaussian_builder_builder( dist_cls=CustomNormal), prior_builder=bbb.adaptive_gaussian_prior_builder, kl_builder=bbb.stochastic_kl_builder, sampling_mode_tensor=sampling_mode_tensor) with tf.variable_scope("model_scope", custom_getter=bbb_getter): model = snt.Linear(5) data = tf.placeholder(shape=(2, 4), dtype=tf.float32) outputs = model(data) # We expect there to be 8 trainable variables. # model (Linear has two variables: weight and bias). # The posterior has two variables (mu and sigma) for each variable. # So does the prior (since it's adaptive). self.assertEqual(len(tf.trainable_variables()), 2*2*2) init_op = tf.global_variables_initializer() x_feed = np.random.normal(size=(2, 4)) with self.test_session() as sess: sess.run(init_op) output_res_one = sess.run(outputs, feed_dict={data: x_feed}) output_res_two = sess.run(outputs, feed_dict={data: x_feed}) sess.run(enter_last_sample_mode) output_res_three = sess.run(outputs, feed_dict={data: x_feed}) output_res_four = sess.run(outputs, feed_dict={data: x_feed}) # One and two should be different samples. self.assertTrue((output_res_one != output_res_two).all()) # Two through four should be the same. self.assertAllClose(output_res_two, output_res_three) self.assertAllClose(output_res_three, output_res_four) self.assertAllClose(output_res_two, output_res_four) def testRecurrentNetSamplesWeightsOnce(self): """Test that sampling of the weights is done only once for a sequence. Test strategy: Provide an input sequence x whose value is the same at each time step. If the outputs from f_theta() are the same at each time step, this is evidence (but not proof) that theta is the same at each time step. """ seq_length = 10 batch_size = 1 input_dim = 5 output_dim = 5 bbb_getter = bbb.bayes_by_backprop_getter( posterior_builder=bbb.diagonal_gaussian_posterior_builder, prior_builder=bbb.fixed_gaussian_prior_builder, kl_builder=bbb.stochastic_kl_builder, sampling_mode_tensor=tf.constant(bbb.EstimatorModes.sample)) class NoStateLSTM(snt.LSTM): """An LSTM which ignores hidden state.""" def _build(self, inputs, state): outputs, _ = super(NoStateLSTM, self)._build(inputs, state) return outputs, state with tf.variable_scope("model", custom_getter=bbb_getter): core = NoStateLSTM(output_dim) input_seq = tf.ones(shape=(seq_length, batch_size, input_dim)) output_seq, _ = tf.nn.dynamic_rnn( core, inputs=input_seq, initial_state=core.initial_state(batch_size=batch_size), time_major=True) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) output_res_one = sess.run(output_seq) output_res_two = sess.run(output_seq) # Ensure that the sequence is the same at every time step, a necessary # but not sufficient condition for the weights to be the same. output_zero = output_res_one[0] for time_step_output in output_res_one[1:]: self.assertAllClose(output_zero, time_step_output) # Ensure that the noise is different in the second run by checking that # the output sequence is different now. for first_run_elem, second_run_elem in zip(output_res_one, output_res_two): distance = np.linalg.norm( first_run_elem.flatten() - second_run_elem.flatten()) self.assertGreater(distance, 0.001) def testFreshNoisePerConnection(self): """Test that the `fresh_noise_per_connection` flag works as advertised.""" def create_custom_getter(fresh_noise_per_connection): bbb_getter = bbb.bayes_by_backprop_getter( posterior_builder=bbb.diagonal_gaussian_posterior_builder, prior_builder=bbb.fixed_gaussian_prior_builder, kl_builder=bbb.stochastic_kl_builder, sampling_mode_tensor=tf.constant(bbb.EstimatorModes.sample), fresh_noise_per_connection=fresh_noise_per_connection) return bbb_getter # 1. fresh_noise_per_connection == True. # test strategy: connect a module twice in sample mode and check that the # weights are different. fresh_noise_getter = create_custom_getter(fresh_noise_per_connection=True) with tf.variable_scope("fresh_noise", custom_getter=fresh_noise_getter): fresh_noise_mod = snt.Linear(3) x = tf.ones(shape=(3, 2)) y_fresh_one = fresh_noise_mod(x) y_fresh_two = fresh_noise_mod(x) # 2. fresh_noise_per_connection == False. # test strategy: connect a module twice in sample mode and check that the # weights are the same. reuse_noise_getter = create_custom_getter(fresh_noise_per_connection=False) with tf.variable_scope("reuse_noise", custom_getter=reuse_noise_getter): reuse_noise_mod = snt.Linear(3) y_reuse_one = reuse_noise_mod(x) y_reuse_two = reuse_noise_mod(x) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) yf_one, yf_two, yr_one, yr_two = sess.run([ y_fresh_one, y_fresh_two, y_reuse_one, y_reuse_two ]) self.assertAllClose(yr_one, yr_two) self.assertTrue(np.linalg.norm(yf_one - yf_two) > 0.0001) def testWeightsResampledWithKeepControlDeps(self): """Test that weights are resampled with `keep_control_dependencies=True`. Test strategy: We test the inverse of `testRecurrentNetSamplesWeightsOnce`. Provide an input sequence x whose value is the same at each time step. If the outputs from f_theta() are the different at each time step, then theta is different at each time step. In principle, it is possible that different thetas give the same outputs, but this is very unlikely. """ seq_length = 10 batch_size = 1 input_dim = 5 output_dim = 5 bbb_getter = bbb.bayes_by_backprop_getter( posterior_builder=bbb.diagonal_gaussian_posterior_builder, prior_builder=bbb.fixed_gaussian_prior_builder, kl_builder=bbb.stochastic_kl_builder, sampling_mode_tensor=tf.constant(bbb.EstimatorModes.sample), keep_control_dependencies=True) class NoStateLSTM(snt.LSTM): """An LSTM which ignores hidden state.""" def _build(self, inputs, state): outputs, _ = super(NoStateLSTM, self)._build(inputs, state) return outputs, state with tf.variable_scope("model", custom_getter=bbb_getter): core = NoStateLSTM(output_dim) input_seq = tf.ones(shape=(seq_length, batch_size, input_dim)) output_seq, _ = tf.nn.dynamic_rnn( core, inputs=input_seq, initial_state=core.initial_state(batch_size=batch_size), time_major=True) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) output_res_one = sess.run(output_seq) output_res_two = sess.run(output_seq) # Ensure that the sequence is different at every time step output_zero = output_res_one[0] for time_step_output in output_res_one[1:]: distance = np.linalg.norm( time_step_output.flatten() - output_zero.flatten()) self.assertGreater(distance, 0.001) # Ensure that the noise is different in the second run by checking that # the output sequence is different now. for first_run_elem, second_run_elem in zip(output_res_one, output_res_two): distance = np.linalg.norm( first_run_elem.flatten() - second_run_elem.flatten()) self.assertGreater(distance, 0.001) if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/custom_getters/bayes_by_backprop_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for sonnet.python.modules.custom_getters.non_trainable.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports from absl.testing import parameterized import sonnet as snt import tensorflow.compat.v1 as tf _CONV_NET_2D_KWARGS = { "output_channels": [16, 16], "kernel_shapes": [3], "strides": [2], "paddings": [snt.VALID], } _MLP_KWARGS = { "output_sizes": [16, 16], } def _identity_getter(getter, *args, **kwargs): return getter(*args, **kwargs) class NonTrainableTest(parameterized.TestCase, tf.test.TestCase): def testUsage(self): with tf.variable_scope("", custom_getter=snt.custom_getters.non_trainable): lin1 = snt.Linear(10, name="linear1") x = tf.placeholder(tf.float32, [10, 10]) lin1(x) self.assertEqual(2, len(tf.global_variables())) self.assertEqual(0, len(tf.trainable_variables())) @parameterized.named_parameters( ("NonIdentity", snt.custom_getters.non_trainable, _identity_getter), ("IdentityNon", _identity_getter, snt.custom_getters.non_trainable), ) def testNest(self, getter1, getter2): with tf.variable_scope("scope1", custom_getter=getter1): with tf.variable_scope("scope2", custom_getter=getter2): tf.get_variable("w", [10, 10], tf.float32) self.assertEqual(1, len(tf.global_variables())) self.assertEqual(0, len(tf.trainable_variables())) @parameterized.named_parameters( ("ConvNet2D", snt.nets.ConvNet2D, _CONV_NET_2D_KWARGS, [1, 13, 13, 3]), ("MLP", snt.nets.MLP, _MLP_KWARGS, [1, 16]), ) def testComplex(self, module, kwargs, input_shape): with tf.variable_scope("", custom_getter=snt.custom_getters.non_trainable): module_instance = module(**kwargs) x1 = tf.placeholder(tf.float32, input_shape) x2 = tf.placeholder(tf.float32, input_shape) module_instance(x1) module_instance(x2) self.assertNotEqual(0, len(tf.global_variables())) self.assertEqual(0, len(tf.trainable_variables())) if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/custom_getters/non_trainable_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Custom getter which uses snt.restore_initializer to restore all variables. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import sonnet as snt import tensorflow.compat.v1 as tf def restore_initializer(filename, name_fn=None, collection=tf.GraphKeys.GLOBAL_VARIABLES): """Custom getter to restore all variables with `snt.restore_initializer`. Args: filename: The filename of the checkpoint. name_fn: A function which can map the name of the variable requested. This allows restoring variables with values having different names in the checkpoint. collection: Only set the restore initializer for variables in this collection. If `None`, it will attempt to restore all variables. By default `tf.GraphKeys.GLOBAL_VARIABLES`. Returns: A restore_initializer custom getter, which is a function taking arguments (getter, name, *args, **kwargs). """ def _restore_initializer(getter, name, *args, **kwargs): """Gets variable with restore initializer.""" # Work out what collections this variable will go in. collections = kwargs["collections"] if collections is None: collections = [tf.GraphKeys.GLOBAL_VARIABLES] if (kwargs["trainable"] and tf.GraphKeys.TRAINABLE_VARIABLES not in collections): collections += [tf.GraphKeys.TRAINABLE_VARIABLES] if collection is None or collection in collections: # We don't make use of the 'scope' argument for restore_initializer as we # might want to change the name in more complex ways, such as removing the # scope prefix as well. if name_fn is not None: var_name_in_checkpoint = name_fn(name) else: var_name_in_checkpoint = name tf.logging.info("Restoring '%s' from '%s' into variable '%s'", var_name_in_checkpoint, filename, name) kwargs["initializer"] = snt.restore_initializer( filename, var_name_in_checkpoint, scope="") return getter(name, *args, **kwargs) return _restore_initializer
sonnet-1
sonnet/python/custom_getters/restore_initializer.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for sonnet.python.modules.custom_getters.restore_initializer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os # Dependency imports import numpy as np import sonnet as snt import tensorflow.compat.v1 as tf class RestoreInitializerTest(tf.test.TestCase): def _save_test_checkpoint(self): test_dir = tf.test.get_temp_dir() checkpoint_dir = os.path.join(test_dir, "test_path") checkpoint_path = os.path.join(checkpoint_dir, "checkpoint") g = tf.Graph() with g.as_default(): net = snt.Linear(10, name="linear1") inputs = tf.placeholder(tf.float32, [10, 10]) net(inputs) saver = tf.train.Saver() init = tf.global_variables_initializer() with self.test_session(graph=g) as sess: sess.run(init) saver.save(sess, checkpoint_path, global_step=0) expected_values = sess.run({"w": net.w, "b": net.b}) return checkpoint_dir, expected_values def testSimpleUsage(self): checkpoint_path, expected_values = self._save_test_checkpoint() checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) g = tf.Graph() with g.as_default(): custom_getter = snt.custom_getters.restore_initializer( filename=checkpoint_path) with tf.variable_scope("", custom_getter=custom_getter): inputs = tf.placeholder(tf.float32, [10, 10]) lin1 = snt.Linear(10, name="linear1") lin1(inputs) init = tf.global_variables_initializer() with self.test_session(graph=g) as sess: sess.run(init) w_value, b_value = sess.run([lin1.w, lin1.b]) self.assertAllClose(expected_values["w"], w_value) self.assertAllClose(expected_values["b"], b_value) def testNameFn(self): checkpoint_path, expected_values = self._save_test_checkpoint() checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) def name_fn(name): return name.replace("linear2", "linear1") g = tf.Graph() with g.as_default(): custom_getter = snt.custom_getters.restore_initializer( filename=checkpoint_path, name_fn=name_fn) with tf.variable_scope("", custom_getter=custom_getter): inputs = tf.placeholder(tf.float32, [10, 10]) lin1 = snt.Linear(10, name="linear2") lin1(inputs) init = tf.global_variables_initializer() with self.test_session(graph=g) as sess: sess.run(init) w_value, b_value = sess.run([lin1.w, lin1.b]) self.assertAllClose(expected_values["w"], w_value) self.assertAllClose(expected_values["b"], b_value) def testCollections(self): checkpoint_path, expected_values = self._save_test_checkpoint() checkpoint_path = tf.train.latest_checkpoint(checkpoint_path) g = tf.Graph() with g.as_default(): custom_getter = snt.custom_getters.restore_initializer( filename=checkpoint_path, collection="blah") with tf.variable_scope("", custom_getter=custom_getter): inputs = tf.placeholder(tf.float32, [10, 10]) lin1 = snt.Linear(10, name="linear1") lin1(inputs) tf.add_to_collection("blah", lin1.w) init = tf.global_variables_initializer() with self.test_session(graph=g) as sess: sess.run(init) w_value = sess.run(lin1.w) self.assertFalse(np.allclose(expected_values["w"], w_value)) # b is initialized to zero always. if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/custom_getters/restore_initializer_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for sonnet.python.modules.custom_getters.stop_gradient.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports from absl.testing import parameterized import sonnet as snt import tensorflow.compat.v1 as tf _CONV_NET_2D_KWARGS = { "output_channels": [16, 16], "kernel_shapes": [3], "strides": [2], "paddings": [snt.VALID], } _MLP_KWARGS = { "output_sizes": [16, 16], } def _identity_getter(getter, *args, **kwargs): return getter(*args, **kwargs) class StopGradientTest(parameterized.TestCase, tf.test.TestCase): def testUsage(self): with tf.variable_scope("", custom_getter=snt.custom_getters.stop_gradient): lin1 = snt.Linear(10, name="linear1") x = tf.placeholder(tf.float32, [10, 10]) y = lin1(x) variables = tf.trainable_variables() variable_names = [v.name for v in variables] self.assertEqual(2, len(variables)) self.assertIn("linear1/w:0", variable_names) self.assertIn("linear1/b:0", variable_names) grads = tf.gradients(y, variables) names_to_grads = {var.name: grad for var, grad in zip(variables, grads)} self.assertEqual(None, names_to_grads["linear1/w:0"]) self.assertEqual(None, names_to_grads["linear1/b:0"]) @parameterized.named_parameters( ("StopIdentity", snt.custom_getters.stop_gradient, _identity_getter), ("IdentityStop", _identity_getter, snt.custom_getters.stop_gradient), ) def testNest(self, getter1, getter2): with tf.variable_scope("scope1", custom_getter=getter1): with tf.variable_scope("scope2", custom_getter=getter2): w = tf.get_variable("w", [10, 10], tf.float32) grads = tf.gradients(w, tf.global_variables()) self.assertEqual(grads, [None]) @parameterized.named_parameters( ("ConvNet2D", snt.nets.ConvNet2D, _CONV_NET_2D_KWARGS, [1, 13, 13, 3]), ("MLP", snt.nets.MLP, _MLP_KWARGS, [1, 16]), ) def testComplex(self, module, kwargs, input_shape): with tf.variable_scope("", custom_getter=snt.custom_getters.stop_gradient): conv_net = module(**kwargs) x = tf.placeholder(tf.float32, input_shape) y = conv_net(x) variables = tf.global_variables() grads = tf.gradients(y, variables) self.assertEqual(grads, [None] * len(variables)) if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/custom_getters/stop_gradient_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Stop gradient custom getter.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf def stop_gradient(getter, *args, **kwargs): """Custom getter which prevents variables being optimized. Usage like: with tf.variable_scope("", custom_getter=snt.custom_getters.stop_gradient): net = snt.Linear(num_hidden)(net) or, using the `custom_getter` constructor argument, linear = snt.Linear(num_hidden, custom_getter=snt.custom_getters.stop_gradient) net = linear(net) will result in the gradient with respect to the variables in the linear module being `None`. By default, the variables will still be in the trainable variables collection. When used with a Sonnet module, the module must be constructed inside the variable scope with the custom getter. Just building the module inside said variable scope will not use the custom getter. Args: getter: The true getter to call. *args: Arguments, in the same format as tf.get_variable. **kwargs: Keyword arguments, in the same format as tf.get_variable. Returns: The return value of `getter(*args, **kwargs)` with a tf.stop_gradient. """ return tf.stop_gradient(getter(*args, **kwargs))
sonnet-1
sonnet/python/custom_getters/stop_gradient.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Custom getters for use in TensorFlow and Sonnet.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from sonnet.python.custom_getters import bayes_by_backprop from sonnet.python.custom_getters.context import Context from sonnet.python.custom_getters.non_trainable import non_trainable from sonnet.python.custom_getters.override_args import override_args from sonnet.python.custom_getters.override_args import override_default_args from sonnet.python.custom_getters.restore_initializer import restore_initializer from sonnet.python.custom_getters.stop_gradient import stop_gradient
sonnet-1
sonnet/python/custom_getters/__init__.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Custom getters for Sonnet-compatible bayes by backprop. ## Algorithm Description Bayes by Backprop is an algorithm for learning a probability distribution over neural network weights. Please see https://arxiv.org/abs/1505.05424 for details. This implementation is compatible with Recurrent Neural Networks as in https://arxiv.org/abs/1704.02798. ## Usage A minimal example is demonstrated below. A full example can be found in the Bayesian RNN script here: https://github.com/deepmind/sonnet/tree/master/sonnet/examples/brnn_ptb.py. ``` import sonnet as snt import sonnet.python.custom_getters.bayes_by_backprop as bbb import tensorflow as tf # Use a custom prior builder. def custom_prior_builder(getter, name, *args, **kwargs): return tfp.distributions.Normal(0.0, 0.01) # Use pre-canned builders for diagonal gaussian posterior and stochastic KL. get_bbb_variable_fn = bbb.bayes_by_backprop_getter( prior_builder=custom_prior_builder, posterior_builder=bbb.diagonal_gaussian_posterior_builder, kl_builder=bbb.stochastic_kl_builder) # Demonstration of how to use custom_getters with variable scopes. with tf.variable_scope('network', custom_getter=get_bbb_variable_fn): model = snt.Linear(4) # This approach is compatible with all `tf.Variable`s constructed with # `tf.get_variable()`, not just those contained in sonnet modules. noisy_variable = tf.get_variable('w', shape=(5,), dtype=tf.float32) # An alternative way to use BBB with sonnet modules is to use their custom # getter argument. model2 = snt.Linear(5, custom_getter=get_bbb_variable_fn) # Proceed with the rest of the graph as usual. input_data, target_data = tf.random_normal((3, 2)), tf.random_normal((3, 4)) loss = tf.reduce_sum(tf.square(model(input_data) - target_data)) # Add the scaled KL cost to the loss. # A good choice of scaling is to divide by the number of training examples. # See https://arxiv.org/abs/1505.05424, section 3.4. num_training_examples = 1000 loss += bbb.get_total_kl_cost() / num_training_examples optimizer = tf.train.GradientDescentOptimizer(1e-3) train_op = optimizer.minimize(loss) ``` ## Reusing variables (e.g. RNNs). A unique `tf.Variable` will only count once towards the KL cost returned by `get_total_kl_cost()` regardless of how many times it is used in the graph. By default, every time a variable is retrieved by `tf.get_variable()`, new sampling noise will be used. To disable this behavior, pass the argument `fresh_noise_per_connection=False` to the `bayes_by_backprop_getter` factory. If using `tf.while_loop`, noise is *not* resampled per iteration regardless of the value of `fresh_noise_per_connection`. This is because tensors created outside a `tf.while_loop` are evaluated only once. You can disable this behaviour by passing the argument `keep_control_dependencies=True` to the `bayes_by_backprop_getter` factory. ## Contact jmenick@ """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import math import weakref import tensorflow.compat.v1 as tf import tensorflow_probability as tfp _DEFAULT_SCALE_TRANSFORM = tf.nn.softplus _OK_DTYPES_FOR_BBB = (tf.float16, tf.float32, tf.float64, tf.bfloat16) _OK_PZATION_TYPE = tfp.distributions.FULLY_REPARAMETERIZED class _WeakRegistry(weakref.WeakKeyDictionary): def __getitem__(self, key): try: return weakref.WeakKeyDictionary.__getitem__(self, key) except KeyError: new_value = collections.OrderedDict() self[key] = new_value return new_value _all_var_metadata_registry = _WeakRegistry() def inverse_softplus(y): """The inverse of the softplus function. Computes the *inverse* of softplus, a function which maps an unconstrained real number to the positive reals, e.g. to squash an unconstrained neural network activation to parameterize a variance. Args: y: A positive number. Returns: The number `x` such that softplus(x) = y. """ return math.log(math.exp(y) - 1.0) def scale_variable_initializer(desired_scale): return tf.constant_initializer(inverse_softplus(desired_scale)) # pylint: disable=old-style-class class EstimatorModes: sample = "sample" mean = "mean" last_sample = "last" # pylint: enable=old-style-class _VariableMetadata = collections.namedtuple( "VariableMetadata", ["raw_variable_name", "raw_variable_shape", "scope_name", "posterior", "posterior_estimate", "prior", "kl_cost", "prior_vars", "posterior_vars"]) # pylint: disable=keyword-arg-before-vararg def diagonal_gaussian_posterior_builder( getter, name, shape=None, *args, **kwargs): """A pre-canned builder for diagonal gaussian posterior distributions. Given a true `getter` function and arguments forwarded from `tf.get_variable`, return a distribution object for a diagonal posterior over a variable of the requisite shape. Args: getter: The `getter` passed to a `custom_getter`. Please see the documentation for `tf.get_variable`. name: The `name` argument passed to `tf.get_variable`. shape: The `shape` argument passed to `tf.get_variable`. *args: See positional arguments passed to `tf.get_variable`. **kwargs: See keyword arguments passed to `tf.get_variable`. Returns: An instance of `tfp.distributions.Normal` representing the posterior distribution over the variable in question. """ # Please see the documentation for # `tfp.distributions.param_static_shapes`. parameter_shapes = tfp.distributions.Normal.param_static_shapes(shape) loc_var = getter( name + "/posterior_loc", shape=parameter_shapes["loc"], *args, **kwargs) scale_var = getter( name + "/posterior_scale", shape=parameter_shapes["scale"], *args, **kwargs) posterior = tfp.distributions.Normal( loc=loc_var, scale=tf.nn.softplus(scale_var), name="{}_posterior_dist".format(name)) return posterior # pylint: enable=keyword-arg-before-vararg # pylint: disable=keyword-arg-before-vararg def fixed_gaussian_prior_builder( getter, name, dtype=None, *args, **kwargs): """A pre-canned builder for fixed gaussian prior distributions. Given a true `getter` function and arguments forwarded from `tf.get_variable`, return a distribution object for a scalar-valued fixed gaussian prior which will be broadcast over a variable of the requisite shape. Args: getter: The `getter` passed to a `custom_getter`. Please see the documentation for `tf.get_variable`. name: The `name` argument passed to `tf.get_variable`. dtype: The `dtype` argument passed to `tf.get_variable`. *args: See positional arguments passed to `tf.get_variable`. **kwargs: See keyword arguments passed to `tf.get_variable`. Returns: An instance of `tfp.distributions.Normal` representing the prior distribution over the variable in question. """ del getter # Unused. del args # Unused. del kwargs # Unused. loc = tf.constant(0.0, shape=(), dtype=dtype) scale = tf.constant(0.01, shape=(), dtype=dtype) return tfp.distributions.Normal( loc=loc, scale=scale, name="{}_prior_dist".format(name)) # pylint: enable=keyword-arg-before-vararg def adaptive_gaussian_prior_builder( getter, name, *args, **kwargs): """A pre-canned builder for adaptive scalar gaussian prior distributions. Given a true `getter` function and arguments forwarded from `tf.get_variable`, return a distribution object for a scalar-valued adaptive gaussian prior which will be broadcast over a variable of the requisite shape. This prior's parameters (e.g `loc` and `scale` for a gaussian) will consist of a single learned scalar for the entire `tf.Variable` for which it serves as the prior, regardless of that `tf.Variable`'s shape. Args: getter: The `getter` passed to a `custom_getter`. Please see the documentation for `tf.get_variable`. name: The `name` argument passed to `tf.get_variable`. *args: See positional arguments passed to `tf.get_variable`. **kwargs: See keyword arguments passed to `tf.get_variable`. Returns: An instance of `tfp.distributions.Normal` representing the prior distribution over the variable in question. """ kwargs["shape"] = () loc_var = getter(name + "_prior_loc", *args, **kwargs) kwargs["initializer"] = scale_variable_initializer(0.01) scale_var = getter(name + "_prior_scale", *args, **kwargs) prior = tfp.distributions.Normal( loc=loc_var, scale=tf.nn.softplus(scale_var), name="{}_prior_dist".format(name)) return prior def stochastic_kl_builder(posterior, prior, sample): """A pre-canned builder for a ubiquitous stochastic KL estimator.""" return tf.subtract( tf.reduce_sum(posterior.log_prob(sample)), tf.reduce_sum(prior.log_prob(sample))) def analytic_kl_builder(posterior, prior, sample): """A pre-canned builder for the analytic kl divergence.""" del sample return tf.reduce_sum(tfp.distributions.kl_divergence(posterior, prior)) def bayes_by_backprop_getter( posterior_builder=diagonal_gaussian_posterior_builder, prior_builder=fixed_gaussian_prior_builder, kl_builder=stochastic_kl_builder, sampling_mode_tensor=None, fresh_noise_per_connection=True, keep_control_dependencies=False): """Creates a custom getter which does Bayes by Backprop. Please see `tf.get_variable` for general documentation on custom getters. All arguments are optional. If nothing is configued, then a diagonal gaussian posterior will be used, and a fixed N(0, 0.01) prior will be used. Please see the default `posterior_builder` and `prior_builder` for a more detailed understanding of the default settings. Args: posterior_builder: A builder function which constructs an instance of `tfp.distributions.Distribution` which shall serve as the posterior over the `tf.Variable` of interest. The builder receives the `getter` and the arguments forwarded from `tf.get_variable`. Suppose one wrote ``` tf.get_variable( 'weights', shape=(3,), initializer=tf.zeros_initializer, dtype=tf.float32) ``` then the `posterior_builder` argument would receive the `name`, `shape`, `initializer`, and `dtype` arguments passed above. The builder must return a `tfp.distributions.Distribution` object. Please see the `tf.get_variable` for documentation on `custom_getter` and `getter`, and see `bbb.diagonal_gaussian_posterior_builder` (the default) for an example of using this builder API. prior_builder: A builder function which constructs an instance of `tfp.distributions.Distribution` which shall serve as the prior over the `tf.Variable` of interest. Identical API to `posterior_builder`. See `bbb.fixed_gaussian_prior_builder` (the default) for an example. kl_builder: A builder function which receives the posterior distribution, prior distribution, and a sample from the posterior. It returns a scalar-shaped `tf.Tensor` representing the total KL cost for the `tf.Variable` in question. See `bbb.stochastic_kl_builder` (default) and `bbb.analytic_kl_builder` for examples. sampling_mode_tensor: A `tf.Tensor` which determines how an estimate from the posterior is produced. It must be scalar-shaped and have a `dtype` of `tf.string`. Valid values for this tensor are `bbb.EstimatorModes.sample` (which is the default), `bbb.EstimatorModes.mean`, and `bbb.EstimatorModes.last_sample`. `bbb.EstimatorModes.sample` is appropriate for training, and `bbb.EstimatorModes.mean` can be used at test time. fresh_noise_per_connection: A boolean. Indicates that each time a stochastic variable is retrieved with this custom getter, new sampling noise should be used. This is `True` by default. If this argument is set to `False`, then the same noise is used for each connection. Note that this does not apply to connections within a `tf.while_loop`; the same sampling noise is always used in different iterations of a `tf.while_loop` within one `session.run()` call. See the unit tests for details. keep_control_dependencies: A boolean. This argument should only be used by advanced users. Indicates that each time a stochastic variable is retrieved in the loop body of a `tf.while_loop` construct, new sampling noise should be used. The default behavior is `False`, so that RNNs use the same weights at each recurrent time step. This is done by removing the creation of the Variable from any existing control flow contexts. Notably, the Variables will be created outside the context of any tf.while_loop, making them fetchable. When this argument is `True`, any Variables used in the loop body of a `tf.while_loop` will be non-fetchable. If the KL cost needs to be evaluated, the Variable must *first* be used *outside* the loop body. This op using the Variable simply needs to be placed on the graph to get a stochastic estimate of the KL; it doesn't need to ever be used. Example: ``` def loop_body(i): logits = sonnet_module(queue) i = i + 1 with tf.variable_scope('bbb', custom_getter=bbb.bayes_by_backprop_getter( fresh_noise_per_connection=True, keep_control_dependencies=True)): unused_op = sonnet_module(queue) # Adds KL estimate to bbb Collection final_i = tf.while_loop(lambda i: i < 5, loop_body, tf.constant(0.)) ``` Here when we add `unused_op` to the graph, we also add a number of tensors associated with the particular stochastic variable, including its contribution to the KL cost, to a graph-level registry. These are organized in a per-stochastic-variable data structure and be accessed with `bbb.get_variable_metadata()`. Without this line, these Tensors would instead be added the first time the Variable is used in the while_loop, which would make them non-fetchable. In all cases, the KL cost is only added once per Variable, which is the correct behavior, since if a variable is used multiple times in a model, the KL cost should remain unaffected. Returns: A `custom_getter` function which implements Bayes by Backprop. """ if sampling_mode_tensor is None: sampling_mode_tensor = tf.constant(EstimatorModes.sample) def custom_getter(getter, name, *args, **kwargs): """The custom getter that will be returned.""" if not kwargs.get("trainable", True): return getter(name, *args, **kwargs) if kwargs["dtype"] not in _OK_DTYPES_FOR_BBB: raise ValueError("Disallowed data type {}.".format(kwargs["dtype"])) var_scope = tf.get_variable_scope() if var_scope.reuse and not fresh_noise_per_connection: # Re-use the sampling noise by returning the very same posterior sample # if configured to do so. the_match = [ x for x in get_variable_metadata() if x.raw_variable_name == name] if not the_match: raise ValueError( "Internal error. No metadata for variable {}".format(name)) if len(the_match) > 1: raise ValueError( "Multiple matches for variable {}. Matches: {}".format( name, [x.raw_variable_name for x in the_match])) return the_match[0].posterior_estimate raw_variable_shape = kwargs["shape"] def construct_subgraph(): """Constructs subgraph used to reparameterize the variable in question.""" posterior = posterior_builder( getter, name=name, *args, **kwargs) prior = prior_builder( getter, name=name, *args, **kwargs) # If the user does not return an extra dictionary of prior variables, # then fill in an empty dictionary. if isinstance(posterior, collections.Sequence): posterior_dist, posterior_vars = posterior else: posterior_dist, posterior_vars = posterior, {} if isinstance(prior, collections.Sequence): prior_dist, prior_vars = prior else: prior_dist, prior_vars = prior, {} if posterior_dist.reparameterization_type != _OK_PZATION_TYPE: raise ValueError( "Distribution {} incompatible with Bayes by Backprop.".format( posterior_dist.__class__.__name__)) posterior_estimator = _produce_posterior_estimate(posterior_dist, sampling_mode_tensor, name) kl_cost = kl_builder(posterior_dist, prior_dist, posterior_estimator) variable_metadata = _VariableMetadata( raw_variable_name=name, raw_variable_shape=raw_variable_shape, scope_name=var_scope.name, posterior=posterior_dist, posterior_estimate=posterior_estimator, prior=prior_dist, kl_cost=kl_cost, prior_vars=prior_vars, posterior_vars=posterior_vars) return posterior_estimator, variable_metadata # Entering the `tf.control_dependencies(None)` context is crucial to # provide compatibility with `tf.while_loop` and thus RNNs. The main thing # it does is making the `kl_cost` fetchable by causing these ops to be # created outside the context of any tf.while_loop. Note also that it causes # a RNN core's weights to be sampled just once when unrolled over a # sequence, rather than at every timestep. control_deps = [] if keep_control_dependencies else None with tf.control_dependencies(control_deps): posterior_estimator, var_metadata = construct_subgraph() # Only add these ops to a collection once per unique variable. # This is to ensure that KL costs are not tallied up more than once. var_with_name = _all_var_metadata_registry[tf.get_default_graph()].get(name) if var_with_name is None: _all_var_metadata_registry[tf.get_default_graph()][name] = var_metadata return posterior_estimator return custom_getter def _produce_posterior_estimate(posterior_dist, posterior_estimate_mode, raw_var_name): """Create tensor representing estimate of posterior. Args: posterior_dist: An instance of `tfp.distributions.Distribution`. The variational posterior from which to produce an estimate of the variable in question. posterior_estimate_mode: A `Tensor` of dtype `tf.string`, which determines the inference mode. raw_var_name: The name of the variable over which inference is done. Returns: `z_sample`, a `Tensor` representing an estimate derived from the posterior distribution. """ conds = [ tf.equal(posterior_estimate_mode, tf.constant(EstimatorModes.sample), name="equal_sample_mode"), tf.equal(posterior_estimate_mode, tf.constant(EstimatorModes.mean), name="equal_mean_mode"), tf.equal(posterior_estimate_mode, tf.constant(EstimatorModes.last_sample), name="equal_last_sample_mode"), ] # pylint: disable=unnecessary-lambda results = [ lambda: posterior_dist.sample(), lambda: posterior_dist.mean(), lambda: posterior_dist.last_sample() ] def default_case_branch_raising_error(): err_msg = "Invalid posterior estimate mode." raise_err = tf.Assert(tf.constant(False), data=[tf.constant(err_msg)]) with tf.control_dependencies([raise_err]): return posterior_dist.mean() if hasattr(posterior_dist, "last_sample"): cases = [(conds[0], results[0]), (conds[1], results[1]), (conds[2], results[2])] else: cases = [(conds[0], results[0]), (conds[1], results[1])] z_sample = tf.case( cases, exclusive=True, default=default_case_branch_raising_error, name="{}_posterior_estimate".format(raw_var_name)) # pylint: enable=unnecessary-lambda return z_sample def get_total_kl_cost(name="total_kl_cost", filter_by_name_substring=None): """Get the total cost for all (or a subset of) the stochastic variables. Args: name: A name for the tensor representing the total kl cost. filter_by_name_substring: A string used to filter which variables count toward the total KL cost. By default, this argument is `None`, and all variables trained using Bayes by Backprop are included. If this argument is provided, the variables whose KL costs are summed will be all those whose name contains `filter_by_name_substring`. An example use of this would be to select all variables within a particular scope. Returns: A tensor representing the total KL cost in the ELBO loss. """ all_variable_metadata = get_variable_metadata(filter_by_name_substring) if not all_variable_metadata: tf.logging.warning("No Bayes by Backprop variables found!") return tf.constant(0.0, shape=()) return tf.add_n([md.kl_cost for md in all_variable_metadata], name=name) def get_variable_metadata(scope_name_substring=None): variable_metadata = _all_var_metadata_registry[tf.get_default_graph()] all_variable_metadata = variable_metadata.values() if scope_name_substring is not None: all_variable_metadata = [x for x in all_variable_metadata if scope_name_substring in x.scope_name] else: # Ensure all_variable_metadata is always a list. all_variable_metadata = list(all_variable_metadata) return all_variable_metadata
sonnet-1
sonnet/python/custom_getters/bayes_by_backprop.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Context manager to switch a custom getter on or off.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf class Context(object): """Contextually switching a custom getter on. Example usage, using `snt.custom_getters.stop_gradient` with `Context` to selectively disable gradients flowing to variables for particular connections of the module: ```python custom_getter = snt.custom_getters.Context(snt.custom_getters.stop_gradient) lin = snt.Linear(10, custom_getter=custom_getter) lin(net1) # custom getter not used, gradients on with custom_getter: lin(net2) # custom getter used, gradients off ``` Warning: If the custom getter affects the way the variable is created, then switching it on or off after the variable has been created will have no effect. For example, it is not possible to selectively switch off trainability using `custom_getters.non_trainable`, since this is a creation-time attribute. It is however possible to selectively switch off gradients using `custom_getters.stop_gradient`, since this applies an operation to the variable. """ def __init__(self, getter, verbose=False, default_getter=None): """Initializes a contextual switch for a custom getter. Args: getter: The custom getter which we may want to switch on. verbose: Log out every time a variable is fetched, and whether or not `getter` is used. default_getter: The custom getter to use when this context is not active. If None, the default custom getter is used. Returns: A custom getter which can also be used as a context manager. Entering the context enables the custom getter. """ self._count = 0 self._getter = getter self._verbose = verbose self._default_getter = default_getter def __call__(self, getter, name, *args, **kwargs): if self._count: if self._verbose: tf.logging.info("Context: Fetching variable %s with custom getter.", name) return self._getter(getter, name, *args, **kwargs) else: if self._verbose: tf.logging.info("Context: Fetching variable %s with %s getter.", name, "default" if self._default_getter else "normal") if self._default_getter: return self._default_getter(getter, name, *args, **kwargs) else: return getter(name, *args, **kwargs) def __enter__(self): self._count += 1 def __exit__(self, exception_type, exception_value, exception_traceback): self._count -= 1
sonnet-1
sonnet/python/custom_getters/context.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Non-trainable custom getter.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function def non_trainable(getter, *args, **kwargs): """Custom getter which makes a variable non-trainable. Usage like: with tf.variable_scope("", custom_getter=snt.custom_getters.non_trainable): net = snt.Linear(num_hidden)(net) or, using the `custom_getter` constructor argument, linear = snt.Linear(num_hidden, custom_getter=snt.custom_getters.non_trainable) net = linear(net) will result in the variables inside the linear having `trainable=False`, i.e. won't be added to tf.trainable_variables() and thus won't be optimized. Warning: If `reuse=True` and the variable has previously been created in the same graph with `trainable=True`, this custom getter will do nothing. Similarly if the variable is reused after being created by this custom getter it will still be non-trainable, even if `trainable=True`. When used with a Sonnet module, the module must be constructed inside the variable scope with the custom getter. Just building the module inside said variable scope will not use the custom getter. Args: getter: The true getter to call. *args: Arguments, in the same format as tf.get_variable. **kwargs: Keyword arguments, in the same format as tf.get_variable. Returns: The return value of `getter(*args, **kwargs)` except with `trainable=False` enforced. """ kwargs["trainable"] = False return getter(*args, **kwargs)
sonnet-1
sonnet/python/custom_getters/non_trainable.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for sonnet.python.custom_getters.context.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import sonnet as snt import tensorflow.compat.v1 as tf def _suffix_getter(getter, name, *args, **kwargs): """Adds a suffix to the variable name (custom getter for use in tests).""" unused_original_variable = getter(name, *args, **kwargs) kwargs['reuse'] = None kwargs['trainable'] = False return getter(name + '_custom', *args, **kwargs) class ContextTest(tf.test.TestCase): def testContextCallsCustomGetterOnlyWhenInScope(self): custom_getter = snt.custom_getters.Context(_suffix_getter, verbose=True) with tf.variable_scope('', custom_getter=custom_getter): lin = snt.Linear(10, name='linear') inputs = tf.placeholder(tf.float32, [10, 10]) _ = lin(inputs) self.assertEqual('linear/w:0', lin.w.name) with custom_getter: _ = lin(inputs) self.assertEqual('linear/w_custom:0', lin.w.name) _ = lin(inputs) self.assertEqual('linear/w:0', lin.w.name) def testNestedContextCallsCustomGetterOnlyWhenInScope(self): custom_getter = snt.custom_getters.Context(_suffix_getter) with tf.variable_scope('', custom_getter=custom_getter): lin = snt.Linear(10, name='linear') inputs = tf.placeholder(tf.float32, [10, 10]) with custom_getter: _ = lin(inputs) self.assertEqual('linear/w_custom:0', lin.w.name) with custom_getter: _ = lin(inputs) self.assertEqual('linear/w_custom:0', lin.w.name) _ = lin(inputs) self.assertEqual('linear/w_custom:0', lin.w.name) _ = lin(inputs) self.assertEqual('linear/w:0', lin.w.name) def testTwoContextsOperateIndependently(self): custom_getter1 = snt.custom_getters.Context(_suffix_getter) with tf.variable_scope('', custom_getter=custom_getter1): lin1 = snt.Linear(10, name='linear1') custom_getter2 = snt.custom_getters.Context(_suffix_getter) with tf.variable_scope('', custom_getter=custom_getter2): lin2 = snt.Linear(10, name='linear2') inputs = tf.placeholder(tf.float32, [10, 10]) _ = lin1(inputs), lin2(inputs) with custom_getter1: _ = lin1(inputs), lin2(inputs) self.assertEqual('linear1/w_custom:0', lin1.w.name) self.assertEqual('linear2/w:0', lin2.w.name) with custom_getter2: _ = lin1(inputs), lin2(inputs) self.assertEqual('linear1/w_custom:0', lin1.w.name) self.assertEqual('linear2/w_custom:0', lin2.w.name) _ = lin1(inputs), lin2(inputs) self.assertEqual('linear1/w_custom:0', lin1.w.name) self.assertEqual('linear2/w:0', lin2.w.name) _ = lin1(inputs), lin2(inputs) self.assertEqual('linear1/w:0', lin1.w.name) self.assertEqual('linear2/w:0', lin2.w.name) if __name__ == '__main__': tf.test.main()
sonnet-1
sonnet/python/custom_getters/context_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Custom getter to override specific named arguments of get_variable.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import six def override_args(**kwargs): """Creates a custom getter that applies specified named arguments. Args: **kwargs: Overriding arguments for the custom getter to use in preference the named arguments it's called with. Returns: Custom getter. """ override_kwargs = kwargs def custom_getter(getter, *args, **kwargs): """Custom getter with certain named arguments overridden. Args: getter: Underlying variable getter to invoke. *args: Arguments, compatible with those of tf.get_variable. **kwargs: Keyword arguments, compatible with those of tf.get_variable. Returns: The result of invoking `getter(*args, **kwargs)` except that certain kwargs entries may have been overridden. """ kwargs.update(override_kwargs) return getter(*args, **kwargs) return custom_getter def override_default_args(**kwargs): """Creates a custom getter that applies specified named arguments. The returned custom getter treats the specified named arguments as revised defaults, and does not override any non-`None` argument values supplied by the original get_variable call (or by a nested scope's custom getter). Args: **kwargs: Overriding arguments for the custom getter to use in preference the named arguments it's called with. Returns: Custom getter. """ override_default_kwargs = kwargs def custom_getter(getter, *args, **kwargs): """Custom getter with certain named arguments overridden. Args: getter: Underlying variable getter to invoke. *args: Arguments, compatible with those of tf.get_variable. **kwargs: Keyword arguments, compatible with those of tf.get_variable. Returns: The result of invoking `getter(*args, **kwargs)` except that certain kwargs entries may have been overridden. """ updated_kwargs = override_default_kwargs.copy() updated_kwargs.update({kw: value for kw, value in six.iteritems(kwargs) if value is not None}) return getter(*args, **updated_kwargs) return custom_getter
sonnet-1
sonnet/python/custom_getters/override_args.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Utility functions for dealing with nested structures of Tensors. These complement `nest.flatten` and `nest.pack_sequence_as` from the core TF distribution. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import tensorflow.compat.v1 as tf from tensorflow.contrib import framework as contrib_framework nest = contrib_framework.nest _DONE_WARN = {} def with_deprecation_warning(fn, extra_message=''): """Wraps the function and prints a warn-once (per `extra_message`) warning.""" def new_fn(*args, **kwargs): if extra_message not in _DONE_WARN: tf.logging.warning( 'Sonnet nest is deprecated. Please use ' 'tf.contrib.framework.nest instead. ' + extra_message ) _DONE_WARN[extra_message] = True return fn(*args, **kwargs) return new_fn assert_same_structure = with_deprecation_warning(nest.assert_same_structure) flatten = with_deprecation_warning(nest.flatten) flatten_iterable = with_deprecation_warning( nest.flatten, 'In addition, `flatten_iterable` is renamed to `flatten`.' ) is_sequence = with_deprecation_warning(nest.is_sequence) is_iterable = with_deprecation_warning( nest.is_sequence, 'In addition, `is_iterable` is renamed to `is_sequence`.' ) pack_sequence_as = with_deprecation_warning(nest.pack_sequence_as) map = with_deprecation_warning( # pylint: disable=redefined-builtin nest.map_structure, 'In addition, `map` is renamed to `map_structure`.' ) map_up_to = with_deprecation_warning( nest.map_structure_up_to, 'In addition, `map_up_to` is renamed to `map_structure_up_to`.' ) assert_shallow_structure = with_deprecation_warning( nest.assert_shallow_structure) flatten_up_to = with_deprecation_warning(nest.flatten_up_to) flatten_dict_items = with_deprecation_warning(nest.flatten_dict_items) def pack_iterable_as(structure, flat_iterable): """See `nest.pack_sequence_as`. Provided for named-arg compatibility.""" return nest.pack_sequence_as(structure, flat_iterable) pack_iterable_as = with_deprecation_warning( pack_iterable_as, 'In addition, `pack_iterable_as` is renamed to `pack_sequence_as`.' )
sonnet-1
sonnet/python/ops/nest.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for Restore initializer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import os import re # Dependency imports import numpy as np from sonnet.python.modules import conv from sonnet.python.modules.nets import convnet from sonnet.python.ops import initializers import tensorflow.compat.v1 as tf def _checkpoint(): # Delay access to FLAGS.test_srcdir return os.path.join(os.environ['TEST_SRCDIR'], 'sonnet/sonnet/python/ops/testdata', 'restore_initializer_test_checkpoint') _ONE_CONV_LAYER = 75.901642 _TWO_CONV_LAYERS = 687.9700928 _TWO_CONV_LAYERS_RELU = 61.4554138 _TOLERANCE = 0.001 class RestoreInitializerTest(tf.test.TestCase): def testSimpleRestore(self): with tf.variable_scope('agent/conv_net_2d/conv_2d_0'): bias = tf.get_variable( 'b', shape=[16], initializer=initializers.restore_initializer(_checkpoint(), 'b')) with self.test_session() as session: session.run(tf.global_variables_initializer()) b = session.run(bias) self.assertAllClose(np.linalg.norm(b), 3.9685926, atol=_TOLERANCE) def testScopeRestore(self): c1 = conv.Conv2D( 16, 8, 4, name='conv_2d_0', padding=conv.VALID, initializers={ 'w': initializers.restore_initializer( _checkpoint(), 'w', scope='agent/conv_net_2d/conv_2d_0'), 'b': initializers.restore_initializer( _checkpoint(), 'b', scope='agent/conv_net_2d/conv_2d_0') }) inputs = tf.constant(1 / 255.0, shape=[1, 86, 86, 3]) outputs = c1(inputs) init = tf.global_variables_initializer() tf.get_default_graph().finalize() with self.test_session() as session: session.run(init) o = session.run(outputs) self.assertAllClose(np.linalg.norm(o), _ONE_CONV_LAYER, atol=_TOLERANCE) def testMultipleRestore(self): g = tf.Graph() restore_initializers = { 'w': initializers.restore_initializer(_checkpoint(), 'w'), 'b': initializers.restore_initializer(_checkpoint(), 'b') } with g.as_default(): with tf.variable_scope('agent/conv_net_2d'): c1 = conv.Conv2D( 16, 8, 4, name='conv_2d_0', padding=conv.VALID, initializers=restore_initializers) c2 = conv.Conv2D( 32, 4, 2, name='conv_2d_1', padding=conv.VALID, initializers=restore_initializers) inputs = tf.constant(1 / 255.0, shape=[1, 86, 86, 3]) intermediate_1 = c1(inputs) intermediate_2 = c2(tf.nn.relu(intermediate_1)) outputs = tf.nn.relu(intermediate_2) init = tf.global_variables_initializer() tf.get_default_graph().finalize() with self.test_session() as session: session.run(init) i1, i2, o = session.run([intermediate_1, intermediate_2, outputs]) self.assertAllClose(np.linalg.norm(i1), _ONE_CONV_LAYER, atol=_TOLERANCE) self.assertAllClose(np.linalg.norm(i2), _TWO_CONV_LAYERS, atol=_TOLERANCE) self.assertAllClose( np.linalg.norm(o), _TWO_CONV_LAYERS_RELU, atol=_TOLERANCE) def testMoreMultipleRestore(self): restore_initializers = { 'w': initializers.restore_initializer(_checkpoint(), 'w'), 'b': initializers.restore_initializer(_checkpoint(), 'b') } with tf.variable_scope('agent'): c = convnet.ConvNet2D( output_channels=(16, 32), kernel_shapes=(8, 4), strides=(4, 2), paddings=[conv.VALID], activation=tf.nn.relu, activate_final=True, initializers=restore_initializers) inputs = tf.constant(1 / 255.0, shape=[1, 86, 86, 3]) outputs = c(inputs) init = tf.global_variables_initializer() tf.get_default_graph().finalize() with self.test_session() as session: session.run(init) o = session.run(outputs) self.assertAllClose( np.linalg.norm(o), _TWO_CONV_LAYERS_RELU, atol=_TOLERANCE) def testFromDifferentScope(self): sub = functools.partial(re.sub, r'^[^/]+/', 'agent/') restore_initializers = { 'w': initializers.restore_initializer(_checkpoint(), 'w', sub), 'b': initializers.restore_initializer(_checkpoint(), 'b', sub) } with tf.variable_scope('some_random_scope'): c = convnet.ConvNet2D( output_channels=(16, 32), kernel_shapes=(8, 4), strides=(4, 2), paddings=[conv.VALID], activation=tf.nn.relu, activate_final=True, initializers=restore_initializers) inputs = tf.constant(1 / 255.0, shape=[1, 86, 86, 3]) outputs = c(inputs) init = tf.global_variables_initializer() tf.get_default_graph().finalize() with self.test_session() as session: session.run(init) o = session.run(outputs) self.assertAllClose( np.linalg.norm(o), _TWO_CONV_LAYERS_RELU, atol=_TOLERANCE) def testPartitionedVariable(self): save_path = os.path.join(self.get_temp_dir(), 'partitioned_variable') var_name = 'my_partitioned_var' g1 = tf.Graph() with g1.as_default(): def initializer1(shape, dtype, partition_info): _ = partition_info # Not used for creation. return tf.constant(True, dtype, shape) partitioned_var1 = list(tf.get_variable( var_name, shape=[1 << 3, 10], partitioner=tf.fixed_size_partitioner(4), initializer=initializer1, dtype=tf.bool)) with self.test_session(graph=g1) as session: with tf.device('/cpu:0'): tf.global_variables_initializer().run() pv1 = session.run(partitioned_var1) save = tf.train.Saver(partitioned_var1) save.save(session, save_path) g2 = tf.Graph() with g2.as_default(): initializer2 = initializers.restore_initializer(save_path, var_name, '') partitioned_var2 = list(tf.get_variable( var_name, shape=[1 << 3, 10], partitioner=tf.fixed_size_partitioner(4), initializer=initializer2, dtype=tf.bool)) with self.test_session(graph=g2) as session: tf.global_variables_initializer().run() pv2 = session.run(partitioned_var2) self.assertAllEqual(pv1, pv2) def testTopLevelVariable(self): save_path = os.path.join(self.get_temp_dir(), 'toplevel_variable') g1 = tf.Graph() g2 = tf.Graph() with g1.as_default(): var1 = tf.get_variable( 'var1', shape=[], initializer=tf.constant_initializer(42)) with g2.as_default(): var2 = tf.get_variable( 'var2', shape=[], initializer=initializers.restore_initializer(save_path, 'var1')) with self.test_session(graph=g1) as session: tf.global_variables_initializer().run() save = tf.train.Saver([var1]) save.save(session, save_path) with self.test_session(graph=g2) as session: tf.global_variables_initializer().run() v2 = session.run(var2) self.assertAllEqual(v2, 42) if __name__ == '__main__': tf.test.main()
sonnet-1
sonnet/python/ops/initializers_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Custom sonnet ops in TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function
sonnet-1
sonnet/python/ops/__init__.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """A checkpoint-restoring Tensorflow initializer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import tensorflow.compat.v1 as tf from tensorflow.python.ops import init_ops from tensorflow.python.ops import io_ops class _Restore(init_ops.Initializer): """Initializer that restores tensors from a checkpoint.""" def __init__(self, filename, var_name, scope=None): """Construct a new restoring initializer. Will read from the checkpoint from the SSTables file `filename` using the RestoreV2 Tensorflow op. The actual variable read from the checkpoint will be `scope_name` + '/' + `var_name` (or just `var_name` if `scope_name` is empty), where `scope_name` is given by one of (1) The current scope's name at the point where the initializer gets called, if the `scope` argument to this constructor is None, (2) If `scope` is callable, the result of applying it to the current scope's name, (3) Otherwise, the `scope` argument to this constructor itself. Args: filename: Name of an SSTables entry where the checkpoint is hosted. var_name: Name of the variable to restore. scope: The variable scope's name of the variable to restore, see above. """ self._filename = filename self._var_name = var_name self._scope = scope def _partition_spec(self, shape, partition_info): """Build magic (and sparsely documented) shapes_and_slices spec string.""" if partition_info is None: return '' # Empty string indicates a non-partitioned tensor. ssi = tf.Variable.SaveSliceInfo( full_name=self._var_name, full_shape=partition_info.full_shape, var_offset=partition_info.var_offset, var_shape=shape) return ssi.spec def __call__(self, shape, dtype=None, partition_info=None): # Creating different RestoreV2 ops when a single one could # output several tensors seems inefficient, but that's actually # what tf.Saver.restore_op (via tf.BaseSaverBuilder) does too. if self._scope is None: scope_name = tf.get_variable_scope().name elif callable(self._scope): scope_name = self._scope(tf.get_variable_scope().name) else: scope_name = self._scope tensor_name = self._var_name if scope_name: tensor_name = '{}/{}'.format(scope_name, tensor_name) tensor = io_ops.restore_v2( self._filename, [tensor_name], [self._partition_spec(shape, partition_info)], [dtype])[0] tensor.set_shape(shape) return tensor # pylint: disable=invalid-name restore_initializer = _Restore # pylint: enable=invalid-name
sonnet-1
sonnet/python/ops/initializers.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for sonnet.python.ops.nest. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections # Dependency imports import numpy as np import six from sonnet.python.ops import nest import tensorflow.compat.v1 as tf typekw = "class" if six.PY3 else "type" class NestTest(tf.test.TestCase): def testStringRepeat(self): ab_tuple = collections.namedtuple("ab_tuple", "a, b") inp_a = ab_tuple(a="foo", b=("bar", "baz")) inp_b = ab_tuple(a=2, b=(1, 3)) out = nest.map(lambda string, repeats: string * repeats, inp_a, inp_b) self.assertEqual(out.a, "foofoo") self.assertEqual(out.b[0], "bar") self.assertEqual(out.b[1], "bazbazbaz") def testMapSingleCollection(self): ab_tuple = collections.namedtuple("ab_tuple", "a, b") nt = ab_tuple(a=("something", "something_else"), b="yet another thing") rev_nt = nest.map(lambda x: x[::-1], nt) # Check the output is the correct structure, and all strings are reversed. nest.assert_same_structure(nt, rev_nt) self.assertEqual(nt.a[0][::-1], rev_nt.a[0]) self.assertEqual(nt.a[1][::-1], rev_nt.a[1]) self.assertEqual(nt.b[::-1], rev_nt.b) def testMapOverTwoTuples(self): inp_a = (tf.placeholder(tf.float32, shape=[3, 4]), tf.placeholder(tf.float32, shape=[3, 7])) inp_b = (tf.placeholder(tf.float32, shape=[3, 4]), tf.placeholder(tf.float32, shape=[3, 7])) output = nest.map(lambda x1, x2: x1 + x2, inp_a, inp_b) nest.assert_same_structure(output, inp_a) self.assertShapeEqual(np.zeros((3, 4)), output[0]) self.assertShapeEqual(np.zeros((3, 7)), output[1]) feed_dict = { inp_a: (np.random.randn(3, 4), np.random.randn(3, 7)), inp_b: (np.random.randn(3, 4), np.random.randn(3, 7)) } with self.test_session() as sess: output_np = sess.run(output, feed_dict=feed_dict) self.assertAllClose(output_np[0], feed_dict[inp_a][0] + feed_dict[inp_b][0]) self.assertAllClose(output_np[1], feed_dict[inp_a][1] + feed_dict[inp_b][1]) def testStructureMustBeSame(self): inp_a = (3, 4) inp_b = (42, 42, 44) err = "The two structures don't have the same number of elements." with self.assertRaisesRegexp(ValueError, err): nest.map(lambda a, b: a + b, inp_a, inp_b) def testMultiNest(self): inp_a = (3, (4, 5)) inp_b = (42, (42, 44)) output = nest.map(lambda a, b: a + b, inp_a, inp_b) self.assertEqual((45, (46, 49)), output) def testNoSequences(self): with self.assertRaisesRegexp(ValueError, "Must provide at least one structure"): nest.map(lambda x: x) def testEmptySequences(self): f = lambda x: x + 1 empty_nt = collections.namedtuple("empty_nt", "") self.assertEqual((), nest.map(f, ())) self.assertEqual([], nest.map(f, [])) self.assertEqual(empty_nt(), nest.map(f, empty_nt())) # This is checking actual equality of types, empty list != empty tuple self.assertNotEqual((), nest.map(f, [])) def testFlattenAndPackIterable(self): # A nice messy mix of tuples, lists, dicts, and `OrderedDict`s. named_tuple = collections.namedtuple("A", ("b", "c")) mess = [ "z", named_tuple(3, 4), { "c": [ 1, collections.OrderedDict([ ("b", 3), ("a", 2), ]), ], "b": 5 }, 17 ] flattened = nest.flatten_iterable(mess) self.assertEqual(flattened, ["z", 3, 4, 5, 1, 2, 3, 17]) structure_of_mess = [ 14, named_tuple("a", True), { "c": [ 0, collections.OrderedDict([ ("b", 9), ("a", 8), ]), ], "b": 3 }, "hi everybody", ] unflattened = nest.pack_iterable_as(structure_of_mess, flattened) self.assertEqual(unflattened, mess) def testFlattenIterable_numpyIsNotFlattened(self): structure = np.array([1, 2, 3]) flattened = nest.flatten_iterable(structure) self.assertEqual(len(flattened), 1) def testFlattenIterable_stringIsNotFlattened(self): structure = "lots of letters" flattened = nest.flatten_iterable(structure) self.assertEqual(len(flattened), 1) def testFlatternIterable_scalarStructure(self): # Tests can call flatten_iterable with single "scalar" object. structure = "hello" flattened = nest.flatten_iterable(structure) unflattened = nest.pack_iterable_as("goodbye", flattened) self.assertEqual(structure, unflattened) def testPackIterableAs_notIterableError(self): # NOTE(taylorrobie): The second pattern is for version compatibility. with self.assertRaisesRegexp( TypeError, "(Attempted to pack value:\n bye\ninto a sequence, but found " "incompatible type `<(type|class) 'str'>` instead.)|" "(flat_sequence must be a sequence)"): nest.pack_iterable_as("hi", "bye") def testPackIterableAs_scalarStructureError(self): # NOTE(taylorrobie): The second pattern is for version compatibility. with self.assertRaisesRegexp( ValueError, "(nest cannot guarantee that it is safe to map one to the other.)|" "(Structure is a scalar)"): nest.pack_iterable_as("hi", ["bye", "twice"]) def testPackIterableAs_wrongLengthsError(self): with self.assertRaisesRegexp( ValueError, "Structure had 2 elements, but flat_sequence had 3 elements."): nest.pack_iterable_as(["hello", "world"], ["and", "goodbye", "again"]) if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/ops/nest_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for sonnet.python.modules.base.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import inspect import pickle # Dependency imports from absl.testing import parameterized import numpy as np import six from sonnet.python.modules import base from sonnet.python.modules.base_errors import NotSupportedError import tensorflow.compat.v1 as tf from tensorflow.contrib.eager.python import tfe as contrib_eager tfe = contrib_eager logging = tf.logging class ModuleWithClassKeys(base.AbstractModule): """Dummy module that defines some keys as class attributes.""" POSSIBLE_INITIALIZER_KEYS = {"foo", "bar"} class ModuleWithNoInitializerKeys(base.AbstractModule): """Dummy module without any intiailizer keys.""" pass class ModuleWithCustomInitializerKeys(base.AbstractModule): """Dummy module that overrides get_possible_initializer_keys.""" @classmethod def get_possible_initializer_keys(cls, custom_key): return {"foo"} if custom_key else {"bar"} class IdentityModule(base.AbstractModule): """Sonnet module that builds a single `tf.identity` op.""" def _build(self, inputs): return tf.identity(inputs) class NoInitIdentityModule(base.AbstractModule): """Sonnet module that inherits `base.AbstractModule.__init__`.""" def _build(self, inputs): return tf.identity(inputs) class NoSuperInitIdentityModule(base.AbstractModule): """Sonnet module that doesn't call `base.AbstractModule.__init__`.""" def __init__(self): pass # Don't call superclass initializer. def _build(self, inputs): return tf.identity(inputs) class SimpleModule(base.AbstractModule): """Simple module with variables created in constructor and build.""" def __init__(self, custom_getter=None, name="simple_module"): super(SimpleModule, self).__init__(custom_getter=custom_getter, name=name) with self._enter_variable_scope(): self._b = tf.get_variable("b", dtype=tf.float32, shape=[10, 10]) def _build(self, inputs): """Connect a simple module to the graph.""" self._w = tf.get_variable("w", dtype=tf.float32, shape=[10, 10]) return self._w * inputs + self._b class ComplexModule(base.AbstractModule): """Complex module consisting of two sub modules.""" def __init__(self, custom_getter=None, name="complex_module"): super(ComplexModule, self).__init__(custom_getter=custom_getter, name=name) with self._enter_variable_scope(): self._a = SimpleModule(name="linear_1") def _build(self, inputs): self._b = SimpleModule(name="linear_2") return self._b(self._a(inputs)) # pylint: disable=not-callable class ModuleWithSubmodules(base.AbstractModule): def __init__(self, submodule_a, submodule_b, custom_getter=None, name="module_with_submodules"): super(ModuleWithSubmodules, self).__init__( custom_getter=custom_getter, name=name) self._submodule_a = submodule_a self._submodule_b = submodule_b def _build(self, inputs): c = SimpleModule(name="simple_build") d = ComplexModule(name="complex_build") return d(self._submodule_a(inputs)) + self._submodule_b(c(inputs)) # pylint: disable=not-callable @contrib_eager.run_all_tests_in_graph_and_eager_modes class AbstractModuleTest(parameterized.TestCase, tf.test.TestCase): def testInitializerKeys(self): keys = ModuleWithClassKeys.get_possible_initializer_keys() self.assertEqual(keys, {"foo", "bar"}) keys = ModuleWithNoInitializerKeys.get_possible_initializer_keys() self.assertEqual(keys, set()) if six.PY2: msg = "takes exactly 2 arguments" else: msg = "missing 1 required positional argument" self.assertRaisesRegexp( TypeError, msg, ModuleWithCustomInitializerKeys.get_possible_initializer_keys) keys = ModuleWithCustomInitializerKeys.get_possible_initializer_keys(True) self.assertEqual(keys, {"foo"}) keys = ModuleWithCustomInitializerKeys.get_possible_initializer_keys(False) self.assertEqual(keys, {"bar"}) def testMultipleGraphs(self): id_mod = IdentityModule(name="identity") # gpylint incorrectly thinks IdentityModule is not callable, so disable. # pylint: disable=not-callable with tf.Graph().as_default() as graph: id_mod(tf.ones(dtype=tf.float32, shape=[42])) self.assertEqual(id_mod._graph, graph) with tf.Graph().as_default(): with self.assertRaisesRegexp(base.DifferentGraphError, "Cannot connect module"): id_mod(tf.ones(dtype=tf.float32, shape=[42])) # pylint: enable=not-callable def testNameScopeRecording(self): if tf.executing_eagerly(): self.skipTest("Name scopes are not recorded in eager mode.") id_mod = IdentityModule(name="foo") # Connect inside different name scope contexts, check that each is recorded. # pylint: disable=not-callable id_mod(tf.ones(dtype=tf.float32, shape=[22])) self.assertIn(id_mod.name_scopes, (("foo",), ("foo_1",))) with tf.name_scope("blah"): id_mod(tf.ones(dtype=tf.float32, shape=[23])) self.assertIn(id_mod.name_scopes, (("foo", "blah/foo"), ("foo_1", "blah/foo"))) with tf.name_scope("baz"): id_mod(tf.ones(dtype=tf.float32, shape=[24])) # pylint: enable=not-callable self.assertIn(id_mod.name_scopes, (("foo", "blah/foo", "baz/foo"), ("foo_1", "blah/foo", "baz/foo"))) def testNameScopeRecordingNotSupportedEager(self): if not tf.executing_eagerly(): self.skipTest("Name scopes are recorded in graph mode.") id_mod = IdentityModule(name="foo") id_mod(tf.ones(dtype=tf.float32, shape=[22])) with self.assertRaisesRegexp(base.NotSupportedError, "not supported in eager"): id_mod.name_scopes # pylint: disable=pointless-statement def testSubgraphsRecording(self): if tf.executing_eagerly(): self.skipTest("Subgraphs are not recorded in eager mode.") id_mod = IdentityModule(name="foo") with self.assertRaisesRegexp(base.NotConnectedError, "not instantiated yet"): id_mod.last_connected_subgraph() # pylint: disable=not-callable inputs = tf.ones(dtype=tf.float32, shape=[21]) outputs = id_mod(inputs) with tf.name_scope("blah"): blah_inputs = tf.ones(dtype=tf.float32, shape=[22]) blah_outputs = id_mod(blah_inputs) with tf.name_scope("baz"): baz_inputs = tf.ones(dtype=tf.float32, shape=[23]) baz_outputs = id_mod(baz_inputs) # pylint: enable=not-callable subgraphs = id_mod.connected_subgraphs self.assertEqual(id_mod.last_connected_subgraph.name_scope, "baz/foo") self.assertIs(id_mod.last_connected_subgraph, subgraphs[2]) self.assertIs(subgraphs[0].module, id_mod) self.assertIn(subgraphs[0].name_scope, ("foo", "foo_1")) self.assertEqual(subgraphs[1].name_scope, "blah/foo") self.assertEqual(subgraphs[2].name_scope, "baz/foo") self.assertIs(subgraphs[0].inputs["inputs"], inputs) self.assertIs(subgraphs[1].inputs["inputs"], blah_inputs) self.assertIs(subgraphs[2].inputs["inputs"], baz_inputs) self.assertIs(subgraphs[0].outputs, outputs) self.assertIs(subgraphs[1].outputs, blah_outputs) self.assertIs(subgraphs[2].outputs, baz_outputs) def testSubgraphsNotRecordedEager(self): if not tf.executing_eagerly(): self.skipTest("Subgraphs are recorded in graph mode") id_mod = IdentityModule(name="foo") with self.assertRaisesRegexp(base.NotSupportedError, "not tracked in eager mode"): id_mod.last_connected_subgraph() # pylint: disable=not-callable inputs = tf.ones(dtype=tf.float32, shape=[21]) id_mod(inputs) with tf.name_scope("blah"): blah_inputs = tf.ones(dtype=tf.float32, shape=[22]) id_mod(blah_inputs) with tf.name_scope("baz"): baz_inputs = tf.ones(dtype=tf.float32, shape=[23]) id_mod(baz_inputs) # pylint: enable=not-callable with self.assertRaisesRegexp(base.NotSupportedError, "not tracked in eager mode"): id_mod.connected_subgraphs # pylint: disable=pointless-statement def testInitNoNamedArgs(self): """Tests if calling __init__ without named args raises a ValueError.""" with self.assertRaises(ValueError): NoInitIdentityModule("foobar") def testInitInvalidTypeArgs(self): """Tests if calling __init__ without a string name raises a TypeError.""" with self.assertRaises(TypeError): NoInitIdentityModule(name=123) def testInitNoArgs(self): """Tests if calling __init__ with no args uses correct defaults.""" module = NoInitIdentityModule() self.assertEqual(module.module_name, "no_init_identity_module") def testInitNoSuper(self): """Tests if a __call__ with no __init__ raises an error.""" module = NoSuperInitIdentityModule() with self.assertRaises(base.NotInitializedError): module(tf.constant([1])) # pylint: disable=not-callable def testPicklingNotSupported(self): module = IdentityModule() with self.assertRaisesRegexp(base.NotSupportedError, "cannot be serialized"): # Writing the object to a string will fail. pickle.dumps(module) def testCustomGetter(self): connection_count = {"x": 0} def custom_getter(getter, name, *args, **kwargs): connection_count["x"] += 1 return getter(name, *args, **kwargs) inputs = tf.ones(dtype=tf.float32, shape=[10, 10]) with tf.variable_scope("scope"): module = SimpleModule(name="mod1") module(inputs) # pylint: disable=not-callable self.assertEqual(0, connection_count["x"]) module = SimpleModule(custom_getter=custom_getter, name="mod2") module(inputs) # pylint: disable=not-callable self.assertEqual(2, connection_count["x"]) # w & b module = SimpleModule(custom_getter={"w": custom_getter}, name="mod3") module(inputs) # pylint: disable=not-callable self.assertEqual(3, connection_count["x"]) # w module = SimpleModule(custom_getter={"w.*": custom_getter}, name="mod3") module(inputs) # pylint: disable=not-callable self.assertEqual(4, connection_count["x"]) # w module = SimpleModule(custom_getter={".*": custom_getter}, name="mod4") module(inputs) # pylint: disable=not-callable self.assertEqual(6, connection_count["x"]) # w & b err = r"More than one custom_getter matched scope/mod5/w \(w\):.*" with self.assertRaisesRegexp(KeyError, err): module = SimpleModule( custom_getter={".*": custom_getter, "w.*": custom_getter}, name="mod5") module(inputs) # pylint: disable=not-callable err = "Given custom_getter is not callable." with self.assertRaisesRegexp(TypeError, err): module = SimpleModule(custom_getter=0, name="mod6") with self.assertRaisesRegexp(TypeError, err): module = SimpleModule(custom_getter={"w": 0}, name="mod7") def testCustomGetterNested(self): def custom_getter(getter, name, *args, **kwargs): kwargs["trainable"] = False return getter(name, *args, **kwargs) inputs = tf.ones(dtype=tf.float32, shape=[10, 10]) with tf.variable_scope("scope"): module = ComplexModule(name="mod1") module(inputs) # pylint: disable=not-callable self.assertLen(tf.trainable_variables(), 4) module = ComplexModule(custom_getter=custom_getter, name="mod2") module(inputs) # pylint: disable=not-callable self.assertLen(tf.trainable_variables(), 4) # All variables. module = ComplexModule(custom_getter={".*/w": custom_getter}, name="mod3") module(inputs) # pylint: disable=not-callable trainable_names = [v.name for v in tf.trainable_variables()] self.assertLen(trainable_names, 6) # linear_1/w and linear_2/w. self.assertIn("scope/mod3/linear_1/b:0", trainable_names) self.assertIn("scope/mod3/linear_2/b:0", trainable_names) module = ComplexModule(custom_getter={".*/b": custom_getter}, name="mod4") module(inputs) # pylint: disable=not-callable trainable_names = [v.name for v in tf.trainable_variables()] self.assertLen(trainable_names, 8) # linear_1/b and linear_2/b. self.assertIn("scope/mod4/linear_1/w:0", trainable_names) self.assertIn("scope/mod4/linear_2/w:0", trainable_names) module = ComplexModule(custom_getter={".*": custom_getter}, name="mod5") module(inputs) # pylint: disable=not-callable self.assertLen(tf.trainable_variables(), 8) # All variables. module = ComplexModule(custom_getter={"w": custom_getter}, name="mod6") module(inputs) # pylint: disable=not-callable self.assertLen(tf.trainable_variables(), 12) # No variables. @parameterized.parameters( [lambda m: m.get_all_variables(), lambda m: m.variables, lambda m: m.trainable_variables] ) def testGetAllTrainableVariables(self, all_trainable_variables): inputs = tf.ones(dtype=tf.float32, shape=[10, 10]) submodule_a = SimpleModule(name="simple_submodule") submodule_b = ComplexModule(name="complex_submodule") module = ModuleWithSubmodules( submodule_a=submodule_a, submodule_b=submodule_b) with self.assertRaisesRegexp(base.NotConnectedError, "not instantiated yet"): all_trainable_variables(module) module(inputs) # pylint: disable=not-callable # Check correct for SimpleModule. submodule_a_variables = submodule_a.get_variables() submodule_a_variable_names = sorted( [str(v.name) for v in submodule_a_variables]) submodule_a_all_variables = all_trainable_variables(submodule_a) submodule_a_all_variable_names = sorted( [str(v.name) for v in submodule_a_all_variables]) self.assertEqual(submodule_a_variable_names, submodule_a_all_variable_names) self.assertEqual([ "simple_submodule/b:0", "simple_submodule/w:0", ], submodule_a_variable_names) # Check correct for ComplexModule submodule_b_variables = all_trainable_variables(submodule_b) submodule_b_variable_names = sorted( [str(v.name) for v in submodule_b_variables]) self.assertEqual([ "complex_submodule/linear_1/b:0", "complex_submodule/linear_1/w:0", "complex_submodule/linear_2/b:0", "complex_submodule/linear_2/w:0", ], submodule_b_variable_names) all_variables = all_trainable_variables(module) all_variable_names = sorted([str(v.name) for v in all_variables]) self.assertEqual([ "complex_submodule/linear_1/b:0", "complex_submodule/linear_1/w:0", "complex_submodule/linear_2/b:0", "complex_submodule/linear_2/w:0", "module_with_submodules/complex_build/linear_1/b:0", "module_with_submodules/complex_build/linear_1/w:0", "module_with_submodules/complex_build/linear_2/b:0", "module_with_submodules/complex_build/linear_2/w:0", "module_with_submodules/simple_build/b:0", "module_with_submodules/simple_build/w:0", "simple_submodule/b:0", "simple_submodule/w:0", ], all_variable_names) self.assertEmpty(module.get_all_variables( collection=tf.GraphKeys.LOCAL_VARIABLES)) # Create another ModuleWithSubmodules with the same submodules module = ModuleWithSubmodules( submodule_a=submodule_a, submodule_b=submodule_b) module(inputs) # pylint: disable=not-callable all_variables = all_trainable_variables(module) all_variable_names = sorted([str(v.name) for v in all_variables]) self.assertEqual([ "complex_submodule/linear_1/b:0", "complex_submodule/linear_1/w:0", "complex_submodule/linear_2/b:0", "complex_submodule/linear_2/w:0", "module_with_submodules_1/complex_build/linear_1/b:0", "module_with_submodules_1/complex_build/linear_1/w:0", "module_with_submodules_1/complex_build/linear_2/b:0", "module_with_submodules_1/complex_build/linear_2/w:0", "module_with_submodules_1/simple_build/b:0", "module_with_submodules_1/simple_build/w:0", "simple_submodule/b:0", "simple_submodule/w:0", ], all_variable_names) @parameterized.parameters( [lambda m: m.get_all_variables(tf.GraphKeys.LOCAL_VARIABLES), lambda m: m.non_trainable_variables]) def testGetAllLocalVariables(self, get_non_trainable_variables): def local_custom_getter(getter, *args, **kwargs): kwargs["trainable"] = False if "collections" in kwargs and kwargs["collections"] is not None: kwargs["collections"] += [tf.GraphKeys.LOCAL_VARIABLES] else: kwargs["collections"] = [tf.GraphKeys.LOCAL_VARIABLES] return getter(*args, **kwargs) inputs = tf.ones(dtype=tf.float32, shape=[10, 10]) # Create a new ModuleWithSubmodules that uses all local variables with tf.variable_scope("", custom_getter=local_custom_getter): submodule_a = SimpleModule(name="simple_submodule") submodule_b = ComplexModule(name="complex_submodule") local_module = ModuleWithSubmodules( submodule_a=submodule_a, submodule_b=submodule_b) local_module(inputs) # pylint: disable=not-callable self.assertEmpty(local_module.get_all_variables()) self.assertEmpty(tf.all_variables()) self.assertLen(tf.local_variables(), 12) all_variables = get_non_trainable_variables(local_module) all_variable_names = sorted([str(v.name) for v in all_variables]) self.assertEqual([ "complex_submodule/linear_1/b:0", "complex_submodule/linear_1/w:0", "complex_submodule/linear_2/b:0", "complex_submodule/linear_2/w:0", "module_with_submodules/complex_build/linear_1/b:0", "module_with_submodules/complex_build/linear_1/w:0", "module_with_submodules/complex_build/linear_2/b:0", "module_with_submodules/complex_build/linear_2/w:0", "module_with_submodules/simple_build/b:0", "module_with_submodules/simple_build/w:0", "simple_submodule/b:0", "simple_submodule/w:0", ], all_variable_names) def testGetAllVariablesWithConditionalConstruction(self): inputs = tf.ones(dtype=tf.float32, shape=[10, 10]) cond = tf.constant(0.) module_a = SimpleModule(name="module_a") module_b = SimpleModule(name="module_b") _ = tf.cond(cond > 0, lambda: module_a(inputs), lambda: module_b(inputs)) # pylint: disable=not-callable if tf.executing_eagerly(): # In eager mode only the true branch is taken. msg = "module_a not instantiated yet" with self.assertRaisesRegexp(base.NotConnectedError, msg): module_a.get_all_variables() else: # check module_a all_variables = module_a.get_all_variables() all_variable_names = sorted([str(v.name) for v in all_variables]) self.assertEqual(["module_a/b:0", "module_a/w:0"], all_variable_names) # check module_b all_variables = module_b.get_all_variables() all_variable_names = sorted([str(v.name) for v in all_variables]) self.assertEqual(["module_b/b:0", "module_b/w:0"], all_variable_names) @parameterized.parameters(None, "", "complex_module") def testVariablesFromNestedModule(self, name): outer = ComplexModule(name=name) outer(tf.zeros([10, 10])) inner1 = outer._b outer(tf.zeros([10, 10])) inner2 = outer._b # Calling the outer module triggers the inner module to re-constructed. The # new inner module should have literally the same variables as the old one. self.assertIsNot(inner1, inner2) self.assertNotEmpty(inner1.variables) self.assertLen(inner2.variables, len(inner1.variables)) for v1, v2 in zip(inner1.variables, inner2.variables): self.assertIs(v1, v2) def testCallSignatureAndDocstring(self): my_module = SimpleModule() self.assertEqual( inspect.getargspec(my_module.__call__), inspect.getargspec(my_module._build)) self.assertEqual(my_module.__call__.__doc__, my_module._build.__doc__) def _make_model_with_params(inputs, output_size): weight_shape = [inputs.get_shape().as_list()[-1], output_size] weight = tf.get_variable("w", shape=weight_shape, dtype=inputs.dtype) return tf.matmul(inputs, weight) @contrib_eager.run_all_tests_in_graph_and_eager_modes class ModuleTest(tf.test.TestCase): def testFunctionType(self): with self.assertRaises(TypeError) as cm: base.Module(build="not_a_function") self.assertEqual(str(cm.exception), "Input 'build' must be callable.") def testSharing(self): batch_size = 3 in_size = 4 input_data = np.random.rand(batch_size, in_size) inputs1 = tf.constant(input_data) inputs2 = tf.constant(input_data) build = functools.partial(_make_model_with_params, output_size=10) model = base.Module(build) self.assertEqual(model.scope_name, "make_model_with_params") outputs1 = model(inputs1) outputs2 = model(inputs2) self.evaluate(tf.global_variables_initializer()) outputs1, outputs2 = self.evaluate([outputs1, outputs2]) self.assertAllClose(outputs1, outputs2) def testCustomGetter(self): def simple_module_build(inputs): w = tf.get_variable("w", dtype=tf.float32, shape=[10, 10]) b = tf.get_variable("b", dtype=tf.float32, shape=[10, 10]) return w * inputs + b connection_count = {"x": 0} def custom_getter(getter, name, *args, **kwargs): connection_count["x"] += 1 return getter(name, *args, **kwargs) create_module = functools.partial(base.Module, build=simple_module_build) inputs = tf.ones(dtype=tf.float32, shape=[10, 10]) with tf.variable_scope("scope"): module = create_module(name="mod1") module(inputs) # pylint: disable=not-callable self.assertEqual(0, connection_count["x"]) module = create_module(custom_getter=custom_getter, name="mod2") module(inputs) # pylint: disable=not-callable self.assertEqual(2, connection_count["x"]) # w & b module = create_module(custom_getter={"w": custom_getter}, name="mod3") module(inputs) # pylint: disable=not-callable self.assertEqual(3, connection_count["x"]) # w module = create_module(custom_getter={"w.*": custom_getter}, name="mod3") module(inputs) # pylint: disable=not-callable self.assertEqual(4, connection_count["x"]) # w module = create_module(custom_getter={".*": custom_getter}, name="mod4") module(inputs) # pylint: disable=not-callable self.assertEqual(6, connection_count["x"]) # w & b err = r"More than one custom_getter matched scope/mod5/w \(w\):.*" with self.assertRaisesRegexp(KeyError, err): module = create_module( custom_getter={".*": custom_getter, "w.*": custom_getter}, name="mod5") module(inputs) # pylint: disable=not-callable err = "Given custom_getter is not callable." with self.assertRaisesRegexp(TypeError, err): module = create_module(custom_getter=0, name="mod6") with self.assertRaisesRegexp(TypeError, err): module = create_module(custom_getter={"w": 0}, name="mod7") def testGetVariablesDifferentGraphScope(self): with tf.Graph().as_default(): inputs = tf.constant(np.random.rand(10, 10), dtype=tf.float32) simple_module = SimpleModule() simple_module(inputs) # pylint: disable=not-callable # Should have 2 variables whether queried in or out of the Graph scope. self.assertEqual(len(simple_module.get_variables()), 2) self.assertEqual(len(simple_module.get_variables()), 2) def testGraphProperty(self): with tf.Graph().as_default() as graph_1: id_a = IdentityModule() id_a(tf.constant(np.zeros(10))) # pylint: disable=not-callable id_b = IdentityModule() id_b(tf.constant(np.ones(5))) # pylint: disable=not-callable with tf.Graph().as_default() as graph_2: id_c = IdentityModule() id_c(tf.constant(np.eye(3))) # pylint: disable=not-callable self.assertEqual(id_a.graph, id_b.graph) self.assertEqual(id_a.graph, graph_1) self.assertNotEqual(id_a.graph, id_c.graph) self.assertEqual(id_c.graph, graph_2) class ConnectionObserverTest(tf.test.TestCase): def _connection_observer(self, subgraph): self._connected_subgraphs.append(subgraph) def setUp(self): self._inputs = tf.zeros(shape=(10, 10), dtype=tf.float32) self._connected_subgraphs = [] def testObservesWrappedFunction(self): activation_module = base.Module(tf.nn.relu) with base.observe_connections(self._connection_observer): outputs = activation_module(self._inputs) self.assertEqual(1, len(self._connected_subgraphs)) self.assertIs(activation_module, self._connected_subgraphs[0].module) self.assertIs(self._inputs, self._connected_subgraphs[0].inputs["args"][0]) self.assertIs(self._connected_subgraphs[0].outputs, outputs) def testObservesSimpleModule(self): simple_module = SimpleModule() with base.observe_connections(self._connection_observer): outputs = simple_module(self._inputs) self.assertEqual(1, len(self._connected_subgraphs)) self.assertIs(simple_module, self._connected_subgraphs[0].module) self.assertIs(self._inputs, self._connected_subgraphs[0].inputs["inputs"]) self.assertIs(self._connected_subgraphs[0].outputs, outputs) def testObservesComplexModule(self): complex_module = ComplexModule() with base.observe_connections(self._connection_observer): outputs = complex_module(self._inputs) self.assertEqual(3, len(self._connected_subgraphs)) self.assertIsInstance(self._connected_subgraphs[0].module, SimpleModule) self.assertIs(self._inputs, self._connected_subgraphs[0].inputs["inputs"]) self.assertIsInstance(self._connected_subgraphs[1].module, SimpleModule) self.assertIs(self._connected_subgraphs[0].outputs, self._connected_subgraphs[1].inputs["inputs"]) self.assertIs(self._connected_subgraphs[1].outputs, outputs) self.assertIs(complex_module, self._connected_subgraphs[2].module) self.assertIs(self._connected_subgraphs[2].outputs, outputs) class MatMulModule(base.AbstractModule): call_count = 0 def _build(self, x): self.call_count += 1 self.w = tf.get_variable("w", [x.shape[1], 32]) return x * self.w @contrib_eager.run_all_tests_in_graph_and_eager_modes class DefunTest(tf.test.TestCase): def testDefunWrappedProperty(self): module = MatMulModule() self.assertFalse(module.defun_wrapped) for _ in range(2): module.defun() self.assertTrue(module.defun_wrapped) def testCallWithDefun(self): module = MatMulModule() module.defun() batch_size = 10 output = module(tf.zeros([batch_size, 1])) self.assertListEqual(output.shape.as_list(), [batch_size, 32]) def testCallWithDefunTracingTwice(self): module = MatMulModule() module.defun() batch_size = 10 for _ in range(2): output = module(tf.zeros([batch_size, 1])) self.assertListEqual(output.shape.as_list(), [batch_size, 32]) self.assertEqual(module.call_count, 1) # Calling with a different batch_size causes `defun` to re-trace our module. batch_size *= 2 for _ in range(2): output = module(tf.zeros([batch_size, 1])) self.assertListEqual(output.shape.as_list(), [batch_size, 32]) self.assertEqual(module.call_count, 2) def testGetVariablesDisabledWhenUsingDefun(self): module = MatMulModule() module.defun() module(tf.zeros([1, 1])) if tf.executing_eagerly(): msg = ".*get_variables.*not supported .* wrapped with defun" with self.assertRaisesRegexp(NotSupportedError, msg): module.get_variables() else: self.assertEqual(module.get_variables(), (module.w,)) if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/modules/base_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for Recurrent cores in snt.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import itertools # Dependency imports from absl.testing import parameterized import mock import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin import sonnet as snt import tensorflow.compat.v1 as tf from tensorflow.contrib import rnn as contrib_rnn from tensorflow.contrib.eager.python import tfe as contrib_eager from tensorflow.python.ops import variables # pylint: disable=g-direct-tensorflow-import @contrib_eager.run_all_tests_in_graph_and_eager_modes class VanillaRNNTest(tf.test.TestCase): def setUp(self): super(VanillaRNNTest, self).setUp() self.batch_size = 3 self.in_size = 4 self.hidden_size = 18 def testShape(self): inputs = tf.ones( dtype=tf.float32, shape=[self.batch_size, self.in_size]) prev_state = tf.ones( dtype=tf.float32, shape=[self.batch_size, self.hidden_size]) vanilla_rnn = snt.VanillaRNN(name="rnn", hidden_size=self.hidden_size) output, next_state = vanilla_rnn(inputs, prev_state) shape = np.ndarray((self.batch_size, self.hidden_size)) self.assertShapeEqual(shape, output) self.assertShapeEqual(shape, next_state) def testVariables(self): mod_name = "rnn" inputs = tf.ones( dtype=tf.float32, shape=[self.batch_size, self.in_size]) prev_state = tf.ones( dtype=tf.float32, shape=[self.batch_size, self.hidden_size]) vanilla_rnn = snt.VanillaRNN(name=mod_name, hidden_size=self.hidden_size) self.assertEqual(vanilla_rnn.scope_name, mod_name) with self.assertRaisesRegexp(snt.Error, "not instantiated yet"): vanilla_rnn.get_variables() vanilla_rnn(inputs, prev_state) rnn_variables = vanilla_rnn.get_variables() self.assertEqual(len(rnn_variables), 4, "RNN should have 4 variables") in_to_hidden_w = next( v for v in rnn_variables if v.name == "%s/in_to_hidden/w:0" % mod_name) in_to_hidden_b = next( v for v in rnn_variables if v.name == "%s/in_to_hidden/b:0" % mod_name) hidden_to_hidden_w = next( v for v in rnn_variables if v.name == "%s/hidden_to_hidden/w:0" % mod_name) hidden_to_hidden_b = next( v for v in rnn_variables if v.name == "%s/hidden_to_hidden/b:0" % mod_name) self.assertShapeEqual(np.ndarray((self.in_size, self.hidden_size)), tf.convert_to_tensor(in_to_hidden_w)) self.assertShapeEqual(np.ndarray(self.hidden_size), tf.convert_to_tensor(in_to_hidden_b)) self.assertShapeEqual(np.ndarray((self.hidden_size, self.hidden_size)), tf.convert_to_tensor(hidden_to_hidden_w)) self.assertShapeEqual(np.ndarray(self.hidden_size), tf.convert_to_tensor(hidden_to_hidden_b)) def testComputation(self): input_data = np.random.randn(self.batch_size, self.in_size) prev_state_data = np.random.randn(self.batch_size, self.hidden_size) inputs = tf.convert_to_tensor(input_data) prev_state = tf.convert_to_tensor(prev_state_data) vanilla_rnn = snt.VanillaRNN(name="rnn", hidden_size=self.hidden_size) output, next_state = vanilla_rnn(inputs, prev_state) in_to_hid = vanilla_rnn.in_to_hidden_variables hid_to_hid = vanilla_rnn.hidden_to_hidden_variables # With random data, check the TF calculation matches the Numpy version. self.evaluate(tf.global_variables_initializer()) fetches = [output, next_state, in_to_hid[0], in_to_hid[1], hid_to_hid[0], hid_to_hid[1]] output = self.evaluate(fetches) output_v, next_state_v, in_to_hid_w, in_to_hid_b = output[:4] hid_to_hid_w, hid_to_hid_b = output[4:] real_in_to_hid = np.dot(input_data, in_to_hid_w) + in_to_hid_b real_hid_to_hid = np.dot(prev_state_data, hid_to_hid_w) + hid_to_hid_b real_output = np.tanh(real_in_to_hid + real_hid_to_hid) self.assertAllClose(real_output, output_v) self.assertAllClose(real_output, next_state_v) def testInitializers(self): inputs = tf.ones( dtype=tf.float32, shape=[self.batch_size, self.in_size]) prev_state = tf.ones( dtype=tf.float32, shape=[self.batch_size, self.hidden_size]) with self.assertRaisesRegexp(KeyError, "Invalid initializer keys.*"): snt.VanillaRNN(name="rnn", hidden_size=self.hidden_size, initializers={"invalid": None}) err = "Initializer for 'w' is not a callable function" with self.assertRaisesRegexp(TypeError, err): snt.VanillaRNN(name="rnn", hidden_size=self.hidden_size, initializers={"in_to_hidden": {"w": tf.zeros([10, 10])}}) # Nested initializer. valid_initializers = { "in_to_hidden": { "w": tf.ones_initializer(), }, "hidden_to_hidden": { "b": tf.ones_initializer(), } } vanilla_rnn = snt.VanillaRNN(name="rnn", hidden_size=self.hidden_size, initializers=valid_initializers) vanilla_rnn(inputs, prev_state) init = tf.global_variables_initializer() self.evaluate(init) w_v, b_v = self.evaluate([ vanilla_rnn.in_to_hidden_linear.w, vanilla_rnn.hidden_to_hidden_linear.b, ]) self.assertAllClose(w_v, np.ones([self.in_size, self.hidden_size])) self.assertAllClose(b_v, np.ones([self.hidden_size])) def testPartitioners(self): if tf.executing_eagerly(): self.skipTest("Partitioned variables are not supported in eager mode.") inputs = tf.ones( dtype=tf.float32, shape=[self.batch_size, self.in_size]) prev_state = tf.ones( dtype=tf.float32, shape=[self.batch_size, self.hidden_size]) with self.assertRaisesRegexp(KeyError, "Invalid partitioner keys.*"): snt.VanillaRNN(name="rnn", hidden_size=self.hidden_size, partitioners={"invalid": None}) err = "Partitioner for 'w' is not a callable function" with self.assertRaisesRegexp(TypeError, err): snt.VanillaRNN(name="rnn", hidden_size=self.hidden_size, partitioners={"in_to_hidden": {"w": tf.zeros([10, 10])}}) # Nested partitioners. valid_partitioners = { "in_to_hidden": { "w": tf.fixed_size_partitioner(num_shards=2), "b": tf.fixed_size_partitioner(num_shards=2), }, "hidden_to_hidden": { "w": tf.fixed_size_partitioner(num_shards=2), "b": tf.fixed_size_partitioner(num_shards=2), } } vanilla_rnn = snt.VanillaRNN(name="rnn", hidden_size=self.hidden_size, partitioners=valid_partitioners) vanilla_rnn(inputs, prev_state) self.assertEqual(type(vanilla_rnn.in_to_hidden_linear.w), variables.PartitionedVariable) self.assertEqual(type(vanilla_rnn.in_to_hidden_linear.b), variables.PartitionedVariable) self.assertEqual(type(vanilla_rnn.hidden_to_hidden_linear.w), variables.PartitionedVariable) self.assertEqual(type(vanilla_rnn.hidden_to_hidden_linear.b), variables.PartitionedVariable) def testRegularizers(self): inputs = tf.ones( dtype=tf.float32, shape=[self.batch_size, self.in_size]) prev_state = tf.ones( dtype=tf.float32, shape=[self.batch_size, self.hidden_size]) with self.assertRaisesRegexp(KeyError, "Invalid regularizer keys.*"): snt.VanillaRNN(name="rnn", hidden_size=self.hidden_size, regularizers={"invalid": None}) err = "Regularizer for 'w' is not a callable function" with self.assertRaisesRegexp(TypeError, err): snt.VanillaRNN(name="rnn", hidden_size=self.hidden_size, regularizers={"in_to_hidden": {"w": tf.zeros([10, 10])}}) # Nested regularizers. valid_regularizers = { "in_to_hidden": { "w": tf.nn.l2_loss, }, "hidden_to_hidden": { "b": tf.nn.l2_loss, } } vanilla_rnn = snt.VanillaRNN(name="rnn", hidden_size=self.hidden_size, regularizers=valid_regularizers) vanilla_rnn(inputs, prev_state) regularizers = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(len(regularizers), 2) @contrib_eager.run_all_tests_in_graph_and_eager_modes class DeepRNNTest(tf.test.TestCase, parameterized.TestCase): def testShape(self): batch_size = 3 batch_size_shape = tf.TensorShape(batch_size) in_size = 2 hidden1_size = 4 hidden2_size = 5 inputs = tf.ones(dtype=tf.float32, shape=[batch_size, in_size]) prev_state0 = tf.ones( dtype=tf.float32, shape=[batch_size, in_size]) prev_state1 = tf.ones( dtype=tf.float32, shape=[batch_size, hidden1_size]) prev_state2 = tf.ones( dtype=tf.float32, shape=[batch_size, hidden2_size]) prev_state = (prev_state0, prev_state1, prev_state2) # Test recurrent and non-recurrent cores cores = [snt.VanillaRNN(name="rnn0", hidden_size=in_size), snt.VanillaRNN(name="rnn1", hidden_size=hidden1_size), snt.VanillaRNN(name="rnn2", hidden_size=hidden2_size)] deep_rnn = snt.DeepRNN(cores, name="deep_rnn", skip_connections=True) output, next_state = deep_rnn(inputs, prev_state) output_shape = output.get_shape() output_size = in_size + hidden1_size + hidden2_size self.assertTrue( output_shape.is_compatible_with([batch_size, output_size])) self.assertTrue(output_shape.is_compatible_with( batch_size_shape.concatenate(deep_rnn.output_size))) next_state_shape = (next_state[0].get_shape(), next_state[1].get_shape(), next_state[2].get_shape()) self.assertTrue( next_state_shape[0].is_compatible_with([batch_size, in_size])) self.assertTrue( next_state_shape[1].is_compatible_with([batch_size, hidden1_size])) self.assertTrue( next_state_shape[2].is_compatible_with([batch_size, hidden2_size])) for state_shape, expected_shape in zip(next_state_shape, deep_rnn.state_size): self.assertTrue(state_shape.is_compatible_with( batch_size_shape.concatenate(expected_shape))) # Initial state should be a valid state initial_state = deep_rnn.initial_state(batch_size, tf.float32) self.assertTrue(len(initial_state), len(next_state)) self.assertShapeEqual(np.ndarray((batch_size, in_size)), initial_state[0]) self.assertShapeEqual(np.ndarray((batch_size, hidden1_size)), initial_state[1]) self.assertShapeEqual(np.ndarray((batch_size, hidden2_size)), initial_state[2]) def testMultiDimShape(self): batch_size = 3 num_layers = 2 input_shape = (8, 8) input_channels = 4 output_channels = 5 input_shape = (batch_size,) + input_shape + (input_channels,) output_shape = input_shape[:-1] + (output_channels,) inputs = tf.ones(dtype=tf.float32, shape=input_shape) prev_hidden = tf.ones(dtype=tf.float32, shape=output_shape) prev_cell = tf.ones(dtype=tf.float32, shape=output_shape) prev_state = [(prev_hidden, prev_cell) for _ in range(num_layers)] def _create_lstm(): return snt.Conv2DLSTM( input_shape=input_shape[1:], output_channels=output_channels, kernel_shape=1) deep_rnn = snt.DeepRNN([_create_lstm() for _ in range(num_layers)], name="deep_rnn", skip_connections=True) output, next_state = deep_rnn(inputs, prev_state) expected_output_shape = list(output_shape) expected_output_shape[-1] *= num_layers self.assertAllEqual(output.get_shape().as_list(), expected_output_shape) self.assertAllEqual(deep_rnn.output_size.as_list(), expected_output_shape[1:]) next_state_shape = [ [state[0].get_shape().as_list(), state[1].get_shape().as_list()] for state in next_state] expected_next_state_shape = ( [[list(output_shape) for _ in range(2)]] * num_layers) self.assertAllEqual(next_state_shape, expected_next_state_shape) def testIncompatibleOptions(self): in_size = 2 hidden1_size = 4 hidden2_size = 5 cores = [snt.Linear(name="linear", output_size=in_size), snt.VanillaRNN(name="rnn1", hidden_size=hidden1_size), snt.VanillaRNN(name="rnn2", hidden_size=hidden2_size)] with self.assertRaisesRegexp( ValueError, "skip_connections are enabled but not all cores are " "`snt.RNNCore`s, which is not supported"): snt.DeepRNN(cores, name="deep_rnn", skip_connections=True) cells = [contrib_rnn.BasicLSTMCell(5), contrib_rnn.BasicLSTMCell(5)] with self.assertRaisesRegexp( ValueError, "skip_connections are enabled but not all cores are " "`snt.RNNCore`s, which is not supported"): snt.DeepRNN(cells, skip_connections=True) def test_non_recurrent_mappings(self): insize = 2 hidden1_size = 4 hidden2_size = 5 seq_length = 7 batch_size = 3 # As mentioned above, non-recurrent cores are not supported with # skip connections. But test that some number of non-recurrent cores # is okay (particularly as the last core) without skip connections. cores1 = [snt.LSTM(hidden1_size), tf.tanh, snt.Linear(hidden2_size)] core1 = snt.DeepRNN(cores1, skip_connections=False) core1_h0 = core1.initial_state(batch_size=batch_size) cores2 = [snt.LSTM(hidden1_size), snt.Linear(hidden2_size), tf.tanh] core2 = snt.DeepRNN(cores2, skip_connections=False) core2_h0 = core2.initial_state(batch_size=batch_size) xseq = tf.random_normal(shape=[seq_length, batch_size, insize]) y1, _ = tf.nn.dynamic_rnn( core1, xseq, initial_state=core1_h0, time_major=True) y2, _ = tf.nn.dynamic_rnn( core2, xseq, initial_state=core2_h0, time_major=True) self.evaluate(tf.global_variables_initializer()) self.evaluate([y1, y2]) def testVariables(self): batch_size = 3 in_size = 2 hidden1_size = 4 hidden2_size = 5 mod_name = "deep_rnn" inputs = tf.ones(dtype=tf.float32, shape=[batch_size, in_size]) prev_state1 = tf.ones( dtype=tf.float32, shape=[batch_size, hidden1_size]) prev_state2 = tf.ones( dtype=tf.float32, shape=[batch_size, hidden1_size]) prev_state = (prev_state1, prev_state2) cores = [snt.VanillaRNN(name="rnn1", hidden_size=hidden1_size), snt.VanillaRNN(name="rnn2", hidden_size=hidden2_size)] deep_rnn = snt.DeepRNN(cores, name=mod_name) self.assertEqual(deep_rnn.scope_name, mod_name) with self.assertRaisesRegexp(snt.Error, "not instantiated yet"): deep_rnn.get_variables() deep_rnn(inputs, prev_state) # No variables now exposed by the DeepRNN. self.assertEqual(deep_rnn.get_variables(), ()) # Have to retrieve the modules from the cores individually. deep_rnn_variables = tuple(itertools.chain.from_iterable( [c.get_variables() for c in cores])) self.assertEqual(len(deep_rnn_variables), 4 * len(cores), # pylint: disable=g-generic-assert "Cores should have %d variables" % (4 * len(cores))) for v in deep_rnn_variables: self.assertRegexpMatches( v.name, "rnn(1|2)/(in_to_hidden|hidden_to_hidden)/(w|b):0") @parameterized.parameters((True, True), (True, False), (False, True), (False, False)) def testComputation(self, skip_connections, create_initial_state): batch_size = 3 in_size = 2 hidden1_size = 4 hidden2_size = 5 mod_name = "deep_rnn" cores = [snt.VanillaRNN(name="rnn1", hidden_size=hidden1_size), snt.VanillaRNN(name="rnn2", hidden_size=hidden2_size)] deep_rnn = snt.DeepRNN(cores, name=mod_name, skip_connections=skip_connections) inputs = tf.constant(np.random.randn(batch_size, in_size), dtype=tf.float32) if create_initial_state: prev_state = deep_rnn.initial_state(batch_size, tf.float32) else: prev_state1 = tf.constant( np.random.randn(batch_size, hidden1_size), dtype=tf.float32) prev_state2 = tf.constant( np.random.randn(batch_size, hidden2_size), dtype=tf.float32) prev_state = (prev_state1, prev_state2) output, next_state = deep_rnn(inputs, prev_state) # With random data, check the DeepRNN calculation matches the manual # stacking version. self.evaluate(tf.global_variables_initializer()) outputs_value = self.evaluate([output, next_state[0], next_state[1]]) output_value, next_state1_value, next_state2_value = outputs_value # Build manual computation graph output1, next_state1 = cores[0](inputs, prev_state[0]) if skip_connections: input2 = tf.concat([inputs, output1], 1) else: input2 = output1 output2, next_state2 = cores[1](input2, prev_state[1]) if skip_connections: manual_output = tf.concat([output1, output2], 1) else: manual_output = output2 manual_outputs_value = self.evaluate( [manual_output, next_state1, next_state2]) manual_output_value = manual_outputs_value[0] manual_next_state1_value = manual_outputs_value[1] manual_next_state2_value = manual_outputs_value[2] self.assertAllClose(output_value, manual_output_value) self.assertAllClose(next_state1_value, manual_next_state1_value) self.assertAllClose(next_state2_value, manual_next_state2_value) def testNonRecurrentOnly(self): batch_size = 3 in_size = 2 output1_size = 4 output2_size = 5 cores = [snt.Linear(name="linear1", output_size=output1_size), snt.Linear(name="linear2", output_size=output2_size)] # Build DeepRNN of non-recurrent components. deep_rnn = snt.DeepRNN(cores, name="deeprnn", skip_connections=False) input_data = np.random.randn(batch_size, in_size) input_ = tf.constant(input_data, dtype=tf.float32) output, _ = deep_rnn(input_, ()) # Build manual computation graph. output1 = cores[0](input_) input2 = output1 output2 = cores[1](input2) manual_output = output2 self.evaluate(tf.global_variables_initializer()) output_value = self.evaluate(output) manual_out_value = self.evaluate(manual_output) self.assertAllClose(output_value, manual_out_value) @parameterized.parameters((False, False), (False, True), (True, False), (True, True)) def testInitialState(self, trainable, use_custom_initial_value): batch_size = 3 hidden1_size = 4 hidden2_size = 5 output1_size = 6 output2_size = 7 initializer = None if use_custom_initial_value: initializer = [tf.constant_initializer(8), tf.constant_initializer(9)] # Test that the initial state of a non-recurrent DeepRNN is an empty list. non_recurrent_cores = [snt.Linear(output_size=output1_size), snt.Linear(output_size=output2_size)] dummy_deep_rnn = snt.DeepRNN(non_recurrent_cores, skip_connections=False) dummy_initial_state = dummy_deep_rnn.initial_state( batch_size, trainable=trainable) self.assertFalse(dummy_initial_state) # Test that the initial state of a recurrent DeepRNN is the same as calling # all cores' initial_state method. cores = [snt.VanillaRNN(hidden_size=hidden1_size), snt.VanillaRNN(hidden_size=hidden2_size)] deep_rnn = snt.DeepRNN(cores) initial_state = deep_rnn.initial_state(batch_size, trainable=trainable, trainable_initializers=initializer) expected_initial_state = [] for i, core in enumerate(cores): with tf.variable_scope("core-%d" % i): expected_initializer = None if initializer: expected_initializer = initializer[i] expected_initial_state.append( core.initial_state(batch_size, trainable=trainable, trainable_initializers=expected_initializer)) self.evaluate(tf.global_variables_initializer()) initial_state_value = self.evaluate(initial_state) expected_initial_state_value = self.evaluate(expected_initial_state) for expected_value, actual_value in zip(expected_initial_state_value, initial_state_value): self.assertAllEqual(actual_value, expected_value) def testInitialStateInModule(self): # Check that scopes play nicely with initial states created inside modules. batch_size = 6 def module_build(): core = snt.DeepRNN([snt.LSTM(4), snt.LSTM(5)]) initial_state1 = core.initial_state( batch_size, dtype=tf.float32, trainable=True) initial_state2 = core.initial_state( batch_size + 1, dtype=tf.float32, trainable=True) return initial_state1, initial_state2 initial_state_module = snt.Module(module_build) initial_state = initial_state_module() self.evaluate(tf.global_variables_initializer()) initial_state_value = self.evaluate(initial_state) self.assertEqual(initial_state_value[0][0][0].shape, (batch_size, 4)) self.assertEqual(initial_state_value[1][0][0].shape, (batch_size + 1, 4)) self.assertEqual(initial_state_value[0][0][1].shape, (batch_size, 4)) self.assertEqual(initial_state_value[1][0][1].shape, (batch_size + 1, 4)) self.assertEqual(initial_state_value[0][1][0].shape, (batch_size, 5)) self.assertEqual(initial_state_value[1][1][0].shape, (batch_size + 1, 5)) self.assertEqual(initial_state_value[0][1][1].shape, (batch_size, 5)) self.assertEqual(initial_state_value[1][1][1].shape, (batch_size + 1, 5)) def testInitialStateNames(self): if tf.executing_eagerly(): return self.skipTest("Tensor.name is meaningless in eager mode.") hidden_size_a = 3 hidden_size_b = 4 batch_size = 5 deep_rnn = snt.DeepRNN( [snt.LSTM(hidden_size_a, name="a"), snt.LSTM(hidden_size_b, name="b")]) deep_rnn_state = deep_rnn.initial_state(batch_size, trainable=True) self.assertEqual( deep_rnn_state[0][0].name, "deep_rnn_initial_state/a_initial_state/state_hidden_tiled:0") self.assertEqual( deep_rnn_state[0][1].name, "deep_rnn_initial_state/a_initial_state/state_cell_tiled:0") self.assertEqual( deep_rnn_state[1][0].name, "deep_rnn_initial_state/b_initial_state/state_hidden_tiled:0") self.assertEqual( deep_rnn_state[1][1].name, "deep_rnn_initial_state/b_initial_state/state_cell_tiled:0") other_start_state = deep_rnn.initial_state( batch_size, trainable=True, name="blah") self.assertEqual(other_start_state[0][0].name, "blah/a_initial_state/state_hidden_tiled:0") self.assertEqual(other_start_state[0][1].name, "blah/a_initial_state/state_cell_tiled:0") self.assertEqual(other_start_state[1][0].name, "blah/b_initial_state/state_hidden_tiled:0") self.assertEqual(other_start_state[1][1].name, "blah/b_initial_state/state_cell_tiled:0") def testSkipConnectionOptions(self): batch_size = 3 x_seq_shape = [10, batch_size, 2] num_hidden = 5 num_layers = 4 final_hidden_size = 9 x_seq = tf.constant(np.random.normal(size=x_seq_shape), dtype=tf.float32) cores = [snt.LSTM(num_hidden) for _ in xrange(num_layers - 1)] final_core = snt.LSTM(final_hidden_size) cores += [final_core] deep_rnn_core = snt.DeepRNN(cores, skip_connections=True, concat_final_output_if_skip=False) initial_state = deep_rnn_core.initial_state(batch_size=batch_size) output_seq, _ = tf.nn.dynamic_rnn(deep_rnn_core, x_seq, time_major=True, initial_state=initial_state, dtype=tf.float32) initial_output = output_seq[0] self.evaluate(tf.global_variables_initializer()) initial_output_res = self.evaluate(initial_output) expected_shape = (batch_size, final_hidden_size) self.assertSequenceEqual(initial_output_res.shape, expected_shape) def testMLPFinalCore(self): batch_size = 2 sequence_length = 3 input_size = 4 mlp_last_layer_size = 17 cores = [ snt.LSTM(hidden_size=10), snt.nets.MLP(output_sizes=[6, 7, mlp_last_layer_size]), ] deep_rnn = snt.DeepRNN(cores, skip_connections=False) input_sequence = tf.constant( np.random.randn(sequence_length, batch_size, input_size), dtype=tf.float32) initial_state = deep_rnn.initial_state(batch_size=batch_size) output, unused_final_state = tf.nn.dynamic_rnn( deep_rnn, input_sequence, initial_state=initial_state, time_major=True) self.assertEqual( output.get_shape(), tf.TensorShape([sequence_length, batch_size, mlp_last_layer_size])) def testFinalCoreHasNoSizeWarning(self): cores = [snt.LSTM(hidden_size=10), snt.Linear(output_size=42), tf.nn.relu] rnn = snt.DeepRNN(cores, skip_connections=False) with mock.patch.object(tf.logging, "warning") as mocked_logging_warning: # This will produce a warning. unused_output_size = rnn.output_size self.assertTrue(mocked_logging_warning.called) first_call_args = mocked_logging_warning.call_args[0] self.assertIn("final core %s does not have the " ".output_size field", first_call_args[0]) self.assertEqual(first_call_args[2], 42) def testNoSizeButAlreadyConnected(self): batch_size = 16 cores = [snt.LSTM(hidden_size=10), snt.Linear(output_size=42), tf.nn.relu] rnn = snt.DeepRNN(cores, skip_connections=False) unused_output = rnn(tf.zeros((batch_size, 128)), rnn.initial_state(batch_size=batch_size)) with mock.patch.object(tf.logging, "warning") as mocked_logging_warning: output_size = rnn.output_size # Correct size is automatically inferred. self.assertEqual(output_size, tf.TensorShape([42])) self.assertTrue(mocked_logging_warning.called) first_call_args = mocked_logging_warning.call_args[0] self.assertIn("DeepRNN has been connected into the graph, " "so inferred output size", first_call_args[0]) @contrib_eager.run_all_tests_in_graph_and_eager_modes class ModelRNNTest(tf.test.TestCase): def setUp(self): super(ModelRNNTest, self).setUp() self.batch_size = 3 self.hidden_size = 4 self.model = snt.Module(name="model", build=tf.identity) self.model.output_size = tf.TensorShape(self.hidden_size) def testShape(self): model_rnn = snt.ModelRNN(self.model) inputs = tf.ones([self.batch_size, 5]) prev_state = tf.ones( dtype=tf.float32, shape=[self.batch_size, self.hidden_size]) outputs, next_state = model_rnn(inputs, prev_state) batch_size_shape = tf.TensorShape(self.batch_size) expected_shape = batch_size_shape.concatenate(self.model.output_size) self.assertNotEqual(expected_shape, inputs.get_shape()) self.assertEqual(expected_shape, prev_state.get_shape()) self.assertEqual(expected_shape, next_state.get_shape()) self.assertEqual(expected_shape, outputs.get_shape()) def testComputation(self): model_rnn = snt.ModelRNN(self.model) inputs = tf.random_normal([self.batch_size, 5]) prev_state_data = np.random.randn(self.batch_size, self.hidden_size) prev_state = tf.convert_to_tensor(prev_state_data) outputs, next_state = model_rnn(inputs, prev_state) outputs_value = self.evaluate([outputs, next_state]) outputs_value, next_state_value = outputs_value self.assertAllClose(prev_state_data, outputs_value) self.assertAllClose(outputs_value, next_state_value) def testBadArguments(self): with self.assertRaises(AttributeError): snt.ModelRNN(tf.identity) with self.assertRaises(TypeError): snt.ModelRNN(np.array([42])) @contrib_eager.run_all_tests_in_graph_and_eager_modes class BidirectionalRNNTest(tf.test.TestCase): toy_out = collections.namedtuple("toy_out", ("out_one", "out_two")) class ToyRNN(snt.RNNCore): """Basic fully connected vanilla RNN core.""" def __init__(self, hidden_size, name="toy_rnn"): """Construct a Toy RNN core to generate Multimodal output/state.""" super(BidirectionalRNNTest.ToyRNN, self).__init__(name=name) with self._enter_variable_scope(): self._wrapped_lstm = snt.LSTM(hidden_size) def _build(self, input_, prev_state): """Connects the ToyRNN module into the graph.""" output, state = self._wrapped_lstm(input_, prev_state) return BidirectionalRNNTest.toy_out(output, output), state def initial_state(self, batch_size, dtype=tf.float32, trainable=False, trainable_initializers=None, trainable_regularizers=None, name=None): return self._wrapped_lstm.initial_state(batch_size) @property def state_size(self): return self._wrapped_lstm.state_size @property def output_size(self): return BidirectionalRNNTest.toy_out(self._wrapped_lstm.output_size, self._wrapped_lstm.output_size) def setUp(self): super(BidirectionalRNNTest, self).setUp() self.seq_len = 8 self.feature_size = 12 self.batch_size = 5 self.hidden_size_forward = 10 self.hidden_size_backward = 20 self.forward_core = BidirectionalRNNTest.ToyRNN( hidden_size=self.hidden_size_forward, name="forward_model") self.backward_core = snt.LSTM( hidden_size=self.hidden_size_backward, name="backward_model") def testShape(self): """Test forward backward models with multi-modal output/state.""" bidir_rnn = snt.BidirectionalRNN( self.forward_core, self.backward_core) seq = tf.zeros([self.seq_len, self.batch_size, self.feature_size]) state = bidir_rnn.initial_state(self.batch_size) output = bidir_rnn(seq, state) shape_forward = (self.seq_len, self.batch_size, self.hidden_size_forward) shape_backward = (self.seq_len, self.batch_size, self.hidden_size_backward) self.assertAllEqual(output["outputs"]["forward"].out_one.get_shape(), shape_forward) self.assertAllEqual(output["outputs"]["forward"].out_two.get_shape(), shape_forward) self.assertAllEqual(output["outputs"]["backward"].get_shape(), shape_backward) self.assertAllEqual(output["state"]["forward"].cell.get_shape(), shape_forward) self.assertAllEqual(output["state"]["forward"].hidden.get_shape(), shape_forward) self.assertAllEqual(output["state"]["backward"].cell.get_shape(), shape_backward) self.assertAllEqual(output["state"]["backward"].hidden.get_shape(), shape_backward) if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/modules/basic_rnn_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Modules for attending over memory.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections # Dependency imports import numpy as np from sonnet.python.modules import base from sonnet.python.modules import basic import tensorflow.compat.v1 as tf # Result of AttentiveRead._build(). See docstring therein for details. AttentionOutput = collections.namedtuple( "AttentionOutput", ["read", "weights", "weight_logits"]) class AttentiveRead(base.AbstractModule): """A module for reading with attention. This module reads a weighted sum of embeddings from memory, where each memory slot's weight is based on the logit returned by an attention embedding module. A mask may be given to ignore some memory slots (e.g. when attending over variable-length sequences). """ def __init__(self, attention_logit_mod, name="attention"): """Initialize AttentiveRead module. Args: attention_logit_mod: Module that produces logit corresponding to a memory slot's compatibility. Must map a [batch_size * memory_size, memory_word_size + query_word_size]-shaped Tensor to a [batch_size * memory_size, 1] shape Tensor. name: string. Name for module. """ super(AttentiveRead, self).__init__(name=name) self._attention_logit_mod = attention_logit_mod def _build(self, memory, query, memory_mask=None): """Perform a differentiable read. Args: memory: [batch_size, memory_size, memory_word_size]-shaped Tensor of dtype float32. This represents, for each example and memory slot, a single embedding to attend over. query: [batch_size, query_word_size]-shaped Tensor of dtype float32. Represents, for each example, a single embedding representing a query. memory_mask: None or [batch_size, memory_size]-shaped Tensor of dtype bool. An entry of False indicates that a memory slot should not enter the resulting weighted sum. If None, all memory is used. Returns: An AttentionOutput instance containing: read: [batch_size, memory_word_size]-shaped Tensor of dtype float32. This represents, for each example, a weighted sum of the contents of the memory. weights: [batch_size, memory_size]-shaped Tensor of dtype float32. This represents, for each example and memory slot, the attention weights used to compute the read. weight_logits: [batch_size, memory_size]-shaped Tensor of dtype float32. This represents, for each example and memory slot, the logits of the attention weights, that is, `weights` is calculated by taking the softmax of the weight logits. Raises: UnderspecifiedError: if memory_word_size or query_word_size can not be inferred. IncompatibleShapeError: if memory, query, memory_mask, or output of attention_logit_mod do not match expected shapes. """ if len(memory.get_shape()) != 3: raise base.IncompatibleShapeError( "memory must have shape [batch_size, memory_size, memory_word_size].") if len(query.get_shape()) != 2: raise base.IncompatibleShapeError( "query must have shape [batch_size, query_word_size].") if memory_mask is not None and len(memory_mask.get_shape()) != 2: raise base.IncompatibleShapeError( "memory_mask must have shape [batch_size, memory_size].") # Ensure final dimensions are defined, else the attention logit module will # be unable to infer input size when constructing variables. inferred_memory_word_size = memory.get_shape()[2].value inferred_query_word_size = query.get_shape()[1].value if inferred_memory_word_size is None or inferred_query_word_size is None: raise base.UnderspecifiedError( "memory_word_size and query_word_size must be known at graph " "construction time.") memory_shape = tf.shape(memory) batch_size = memory_shape[0] memory_size = memory_shape[1] query_shape = tf.shape(query) query_batch_size = query_shape[0] # Transform query to have same number of words as memory. # # expanded_query: [batch_size, memory_size, query_word_size]. expanded_query = tf.tile(tf.expand_dims(query, dim=1), [1, memory_size, 1]) # Compute attention weights for each memory slot. # # attention_weight_logits: [batch_size, memory_size] with tf.control_dependencies( [tf.assert_equal(batch_size, query_batch_size)]): concatenated_embeddings = tf.concat( values=[memory, expanded_query], axis=2) batch_apply_attention_logit = basic.BatchApply( self._attention_logit_mod, n_dims=2, name="batch_apply_attention_logit") attention_weight_logits = batch_apply_attention_logit( concatenated_embeddings) # Note: basic.BatchApply() will automatically reshape the [batch_size * # memory_size, 1]-shaped result of self._attention_logit_mod(...) into a # [batch_size, memory_size, 1]-shaped Tensor. If # self._attention_logit_mod(...) returns something with more dimensions, # then attention_weight_logits will have extra dimensions, too. if len(attention_weight_logits.get_shape()) != 3: raise base.IncompatibleShapeError( "attention_weight_logits must be a rank-3 Tensor. Are you sure that " "attention_logit_mod() returned [batch_size * memory_size, 1]-shaped" " Tensor?") # Remove final length-1 dimension. attention_weight_logits = tf.squeeze(attention_weight_logits, [2]) # Mask out ignored memory slots by assigning them very small logits. Ensures # that every example has at least one valid memory slot, else we'd end up # averaging all memory slots equally. if memory_mask is not None: num_remaining_memory_slots = tf.reduce_sum( tf.cast(memory_mask, dtype=tf.int32), axis=[1]) with tf.control_dependencies( [tf.assert_positive(num_remaining_memory_slots)]): finfo = np.finfo(np.float32) kept_indices = tf.cast(memory_mask, dtype=tf.float32) ignored_indices = tf.cast(tf.logical_not(memory_mask), dtype=tf.float32) lower_bound = finfo.max * kept_indices + finfo.min * ignored_indices attention_weight_logits = tf.minimum(attention_weight_logits, lower_bound) # attended_memory: [batch_size, memory_word_size]. attention_weight = tf.reshape( tf.nn.softmax(attention_weight_logits), shape=[batch_size, memory_size, 1]) # The multiplication is elementwise and relies on broadcasting the weights # across memory_word_size. Then we sum across the memory slots. attended_memory = tf.reduce_sum(memory * attention_weight, axis=[1]) # Infer shape of result as much as possible. inferred_batch_size, _, inferred_memory_word_size = ( memory.get_shape().as_list()) attended_memory.set_shape([inferred_batch_size, inferred_memory_word_size]) return AttentionOutput( read=attended_memory, weights=tf.squeeze(attention_weight, [2]), weight_logits=attention_weight_logits)
sonnet-1
sonnet/python/modules/attention.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for sonnet.python.modules.layer_norm.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import operator # Dependency imports from absl.testing import parameterized import numpy as np import sonnet as snt import tensorflow.compat.v1 as tf from tensorflow.contrib import layers as contrib_layers from tensorflow.python.ops import variables def _get_layer_norm_stats(data, axis): """Returns mean and variances calculated over the given axes of the data.""" if axis is None: axis = list(range(1, data.ndim)) # convert a scalar axis into a singleton list so code below can assume a list if isinstance(axis, int): axis = [axis] # Transpose to put all the normalized dimensions at the end. Well done tharley # for the one-liner. For 5D data, and example axis [1, 3] produces transpose # arg of [0, 2, 4, 1, 3] which puts all the normalization axis at the end, # suitable for flattening down to calculate statistics. transposed_data = np.transpose( data, sorted(set(range(data.ndim)) - set(axis)) + axis) # Combine the sizes of all the (now trailing) normalized_dimensions normalized_dims_total_size = functools.reduce( operator.mul, (data.shape[ax] for ax in axis)) # Do the reshape - all the non-normalized dimensions are combined by "-1" reshaped = np.reshape(transposed_data, [-1, normalized_dims_total_size]) # Return stats - should be very close to standard normal. return { "mean": np.mean(reshaped, axis=1), "std": np.std(reshaped, axis=1), } class LayerNormTest(parameterized.TestCase, tf.test.TestCase): @parameterized.named_parameters( ("Float32", tf.float32), ("Float64", tf.float64), ("Float16", tf.float16), ("BFloat16", tf.bfloat16), ) def testDataType(self, dtype): inputs = tf.placeholder(dtype, shape=[None, 64]) layer_norm = snt.LayerNorm() output = layer_norm(inputs) self.assertEqual(dtype, output.dtype) # Variables are stored in float32 for lower precision activations. expected_dtype = tf.float32 if dtype in [tf.float16, tf.bfloat16] else dtype self.assertEqual(expected_dtype, layer_norm.gamma.dtype.base_dtype) self.assertEqual(expected_dtype, layer_norm.beta.dtype.base_dtype) def testNormalization(self): """Check that inputs are approximately centered and scaled.""" inputs = tf.constant([[1., 2., 3.], [6., 4., 7.]], dtype=tf.float32) ln = snt.LayerNorm() outputs = ln(inputs) init = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init) outputs_ = sess.run(outputs) self.assertAllClose(outputs_.mean(axis=1), [0., 0.], atol=1e-04) self.assertAllClose(outputs_.var(axis=1), [1., 1.], atol=1e-04) def testSharing(self): """Check that the correct number of variables are made when sharing.""" inputs1 = tf.placeholder(tf.float32, shape=[None, 64]) inputs2 = tf.placeholder(tf.float32, shape=[None, 64]) ln = snt.LayerNorm() ln(inputs1) ln(inputs2) self.assertLen(tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES), 2) def testInvalidInitializerParameters(self): with self.assertRaisesRegexp(KeyError, "Invalid initializer keys.*"): snt.LayerNorm( initializers={"not_gamma": contrib_layers.l1_regularizer(0.5)}) err = "Initializer for 'gamma' is not a callable function" with self.assertRaisesRegexp(TypeError, err): snt.LayerNorm(initializers={"gamma": tf.zeros([1, 2, 3])}) def testInvalidPartitionerParameters(self): with self.assertRaisesRegexp(KeyError, "Invalid partitioner keys.*"): snt.LayerNorm( partitioners={"not_gamma": contrib_layers.l1_regularizer(0.5)}) err = "Partitioner for 'gamma' is not a callable function" with self.assertRaisesRegexp(TypeError, err): snt.LayerNorm(partitioners={"gamma": tf.zeros([1, 2, 3])}) def testInvalidRegularizationParameters(self): with self.assertRaisesRegexp(KeyError, "Invalid regularizer keys.*"): snt.LayerNorm( regularizers={"not_gamma": contrib_layers.l1_regularizer(0.5)}) err = "Regularizer for 'gamma' is not a callable function" with self.assertRaisesRegexp(TypeError, err): snt.LayerNorm(regularizers={"gamma": tf.zeros([1, 2, 3])}) def testInitializers(self): initializers = { "gamma": tf.constant_initializer(2.0), "beta": tf.constant_initializer(3.0), } inputs = tf.placeholder(tf.float32, shape=[None, 10]) ln = snt.LayerNorm(initializers=initializers) self.assertEqual(ln.initializers, initializers) ln(inputs) init = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init) ones_v = np.ones([inputs.get_shape()[-1]]) self.assertAllClose(ln.beta.eval(), ones_v * 3.0) self.assertAllClose(ln.gamma.eval(), ones_v * 2.0) def testRegularizersInRegularizationLosses(self): regularizers = { "gamma": contrib_layers.l1_regularizer(scale=0.5), "beta": contrib_layers.l2_regularizer(scale=0.5), } inputs = tf.placeholder(tf.float32, shape=[None, 10]) ln = snt.LayerNorm(regularizers=regularizers) self.assertEqual(ln.regularizers, regularizers) ln(inputs) graph_regularizers = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) self.assertRegexpMatches(graph_regularizers[0].name, ".*l1_regularizer.*") self.assertRegexpMatches(graph_regularizers[1].name, ".*l2_regularizer.*") def testPartitioners(self): partitioners = { "gamma": tf.fixed_size_partitioner(num_shards=2), "beta": tf.fixed_size_partitioner(num_shards=2), } inputs = tf.placeholder(tf.float32, shape=[None, 10]) ln = snt.LayerNorm(partitioners=partitioners) self.assertEqual(ln.partitioners, partitioners) ln(inputs) self.assertEqual(type(ln.gamma), variables.PartitionedVariable) self.assertEqual(type(ln.beta), variables.PartitionedVariable) @parameterized.parameters( # Default, sums over all dimensions except batch: {"axis": None, "input_shape": [2, 3]}, {"axis": None, "input_shape": [4, 5, 6]}, {"axis": None, "input_shape": [12, 13, 14, 15]}, # Specify a single axis to sum over: {"axis": [1], "input_shape": [5, 6, 7]}, {"axis": 1, "input_shape": [5, 6, 7]}, # Sum over all except final dimension - i.e. Instance Norm. {"axis": [1, 2], "input_shape": [10, 11, 12, 14]}, # Sum over non-contiguous dimensions. {"axis": [1, 3], "input_shape": [3, 4, 5, 6, 7]}, ) def testAxesDefault(self, axis, input_shape): inputs = tf.constant(np.random.rand(*input_shape)) ln = snt.LayerNorm(axis=axis, offset=False, scale=False) output = ln(inputs) init = tf.global_variables_initializer() with self.test_session() as session: session.run(init) output_np = session.run(output) statistics = _get_layer_norm_stats(output_np, axis=axis) self.assertAllClose(statistics["mean"], np.zeros_like(statistics["mean"]), atol=2e-3) self.assertAllClose(statistics["std"], np.ones_like(statistics["std"]), atol=2e-3) @parameterized.parameters( {"axis": True}, {"axis": False}, {"axis": 4.0}, {"axis": [2, "invalid"]}) def testInvalidAxes(self, axis): msg = "axis should be an int or an iterable of ints" with self.assertRaisesRegexp(ValueError, msg): snt.LayerNorm(axis=axis) @parameterized.parameters( {"scale": True, "offset": True}, {"scale": True, "offset": False}, {"scale": False, "offset": True}, {"scale": False, "offset": False}) def testScaleAndOffset(self, scale, offset): inputs = tf.random_uniform([2, 4, 6]) module = snt.LayerNorm(scale=scale, offset=offset) _ = module(inputs) variables_dict = {v.name: v for v in module.get_variables()} if scale: self.assertEqual(variables_dict["layer_norm/gamma:0"].shape, (6,)) else: self.assertNotIn("layer_norm/gamma:0", variables_dict) if offset: self.assertEqual(variables_dict["layer_norm/beta:0"].shape, (6,)) else: self.assertNotIn("layer_norm/beta:0", variables_dict) if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/modules/layer_norm_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =================================================== """Testing the spectral_normalization module.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools # Dependency imports import numpy as np import sonnet as snt from sonnet.python.modules import base from sonnet.python.modules import basic from sonnet.python.modules import spectral_normalization import tensorflow.compat.v1 as tf _ACCEPTABLE_ERROR = 1e-3 class MinimalClass(base.AbstractModule): def _build(self, input_): sn_linear = spectral_normalization.wrap_with_spectral_norm( basic.Linear, {'eps': 1e-4}) linear1 = sn_linear(16) linear2 = sn_linear(16) return linear1(input_), linear2(input_) class SpectralNormalizationTest(tf.test.TestCase): def test_raw_spectral_norm(self): with tf.Graph().as_default(): ones_weight = 4 * tf.eye(8, 8) sigma = spectral_normalization.spectral_norm(ones_weight)['sigma'] with tf.Session() as sess: sess.run(tf.global_variables_initializer()) _, sigma_v = sess.run([ones_weight, sigma]) self.assertLess(abs(4.0 - sigma_v), _ACCEPTABLE_ERROR) def test_raw_spectral_norm_bfloat16(self): with tf.Graph().as_default(): ones_weight = 4 * tf.eye(8, 8, dtype=tf.bfloat16) sigma = spectral_normalization.spectral_norm(ones_weight)['sigma'] with tf.Session() as sess: sess.run(tf.global_variables_initializer()) _, sigma_v = sess.run([ones_weight, sigma]) self.assertEqual(tf.bfloat16, sigma_v.dtype) self.assertLess(abs(4.0 - float(sigma_v)), _ACCEPTABLE_ERROR) def test_spectral_norm_creates_variables(self): with tf.Graph().as_default(): ones_weight = 4 * tf.eye(8, 8) pre_spec_norm_vars = tf.global_variables() _ = spectral_normalization.spectral_norm(ones_weight) post_spec_norm_vars = tf.global_variables() self.assertEmpty(pre_spec_norm_vars) self.assertLen(post_spec_norm_vars, 1) self.assertEqual(post_spec_norm_vars[0].name.split('/')[-1], 'u0:0') def test_wrapper_creates_variables(self): with tf.Graph().as_default(): SNLinear = functools.partial( # pylint: disable=invalid-name spectral_normalization.SpectralNormWrapper, snt.Linear, {}, None) input_ = tf.zeros((8, 8), dtype=tf.float32) linear_layer_with_sn = SNLinear(16) _ = linear_layer_with_sn(input_) vars_ = tf.global_variables() self.assertLen(vars_, 3) def test_update_sn(self): with tf.Graph().as_default(): SNLinear = functools.partial( # pylint: disable=invalid-name spectral_normalization.SpectralNormWrapper, snt.Linear, {}, 'POWER_ITERATION_OPS') input_ = tf.zeros((8, 8), dtype=tf.float32) linear_layer_with_sn = SNLinear(16) output_update = linear_layer_with_sn(input_) output_no_update = linear_layer_with_sn( input_, enable_power_iteration=False) run_update_ops = tf.get_collection('POWER_ITERATION_OPS') singular_val_w = [v for v in tf.global_variables() if 'u0' in v.name][0] w_ph = tf.placeholder(singular_val_w.dtype, singular_val_w.shape) reset_sing_val = tf.assign(singular_val_w, w_ph) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) original_sing_val_v = sess.run(singular_val_w) sing_val_v_implicit = sess.run(output_update) sess.run(reset_sing_val, {w_ph: original_sing_val_v}) sing_val_v_explicit, _ = sess.run([output_no_update, run_update_ops]) self.assertTrue( np.equal(sing_val_v_implicit, sing_val_v_explicit).all()) self.assertFalse( np.equal(original_sing_val_v, sing_val_v_explicit).all()) def test_update_sn_compatible_with_bfloat16(self): with tf.Graph().as_default(): SNLinear = functools.partial( # pylint: disable=invalid-name spectral_normalization.SpectralNormWrapper, snt.Linear, {}, 'POWER_ITERATION_OPS') input_ = tf.zeros((8, 8), dtype=tf.float32) linear_layer_with_sn = SNLinear(16) output_update = linear_layer_with_sn(input_) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) sess.run(output_update) def test_conflicting_names_no_scope(self): with tf.Graph().as_default(): sn_linear = spectral_normalization.wrap_with_spectral_norm( basic.Linear, {'eps': 1e-4}) linear1 = sn_linear(16) linear2 = sn_linear(16) input_ = tf.zeros((48, 12)) # Random [batch, dim] shape. linear1(input_) linear2(input_) def test_conflicting_names_with_enclosing_scope(self): with tf.Graph().as_default(): input_ = tf.zeros((48, 12)) # Random [batch, dim] shape. MinimalClass()(input_) if __name__ == '__main__': tf.test.main()
sonnet-1
sonnet/python/modules/spectral_normalization_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Cores for RNNs with varying number of unrolls. This file contains implementations for: * ACT (Adaptive Computation Time) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools from sonnet.python.modules import basic from sonnet.python.modules import rnn_core from sonnet.python.ops import nest import tensorflow.compat.v1 as tf def _nested_add(nested_a, nested_b): """Add two arbitrarily nested `Tensors`.""" return nest.map(lambda a, b: a + b, nested_a, nested_b) def _nested_unary_mul(nested_a, p): """Multiply `Tensors` in arbitrarily nested `Tensor` `nested_a` with `p`.""" def mul_with_broadcast(tensor): ndims = tensor.shape.ndims if ndims != 2: p_reshaped = tf.reshape(p, [-1] + [1] * (ndims - 1)) return p_reshaped * tensor else: return p * tensor return nest.map(mul_with_broadcast, nested_a) def _nested_zeros_like(nested_a): return nest.map(tf.zeros_like, nested_a) class ACTCore(rnn_core.RNNCore): """Adaptive computation time core. Implementation of the model described in "Adaptive Computation Time for Recurrent Neural Networks" paper, https://arxiv.org/abs/1603.08983. The `ACTCore` incorporates the pondering RNN of ACT, with different computation times for each element in the mini batch. Each pondering step is performed by the `core` passed to the constructor of `ACTCore`. The output of the `ACTCore` is made of `(act_out, (iteration, remainder)`, where * `iteration` counts the number of pondering step in each batch element; * `remainder` is the remainder as defined in the ACT paper; * `act_out` is the weighted average output of all pondering steps (see ACT paper for more info). """ def __init__(self, core, output_size, threshold, get_state_for_halting, max_steps=0, name="act_core"): """Constructor. Args: core: A `sonnet.RNNCore` object. This should only take a single `Tensor` in input, and output only a single flat `Tensor`. output_size: An integer. The size of each output in the sequence. threshold: A float between 0 and 1. Probability to reach for ACT to stop pondering. get_state_for_halting: A callable that can take the `core` state and return the input to the halting function. max_steps: Integer >= 0, that controls the maximum number of ponder steps. If equal to 0, then this disables control. name: A string. The name of this module. Raises: ValueError: if `threshold` is not between 0 and 1. ValueError: if `core` has either nested outputs or outputs that are not one dimensional. """ super(ACTCore, self).__init__(name=name) self._core = core self._output_size = output_size self._threshold = threshold self._get_state_for_halting = get_state_for_halting self._max_steps = max_steps if not isinstance(self._core.output_size, tf.TensorShape): raise ValueError("Output of core should be single Tensor.") if self._core.output_size.ndims != 1: raise ValueError("Output of core should be 1D.") if not 0 <= self._threshold <= 1: raise ValueError("Threshold should be between 0 and 1, but found {}". format(self._threshold)) def initial_state(self, *args, **kwargs): return self._core.initial_state(*args, **kwargs) @property def output_size(self): return tf.TensorShape([self._output_size]), (tf.TensorShape([1]), tf.TensorShape([1])) @property def state_size(self): return self._core.state_size @property def batch_size(self): self._ensure_is_connected() return self._batch_size @property def dtype(self): self._ensure_is_connected() return self._dtype def _cond(self, unused_x, unused_cumul_out, unused_prev_state, unused_cumul_state, cumul_halting, unused_iteration, unused_remainder): """The `cond` of the `tf.while_loop`.""" return tf.reduce_any(cumul_halting < 1) def _body(self, x, cumul_out, prev_state, cumul_state, cumul_halting, iteration, remainder, halting_linear, x_ones): """The `body` of `tf.while_loop`.""" # Increase iteration count only for those elements that are still running. all_ones = tf.constant(1, shape=(self._batch_size, 1), dtype=self._dtype) is_iteration_over = tf.equal(cumul_halting, all_ones) next_iteration = tf.where(is_iteration_over, iteration, iteration + 1) out, next_state = self._core(x, prev_state) # Get part of state used to compute halting values. halting_input = halting_linear(self._get_state_for_halting(next_state)) halting = tf.sigmoid(halting_input, name="halting") next_cumul_halting_raw = cumul_halting + halting over_threshold = next_cumul_halting_raw > self._threshold if self._max_steps > 0: at_max_steps = tf.greater_equal(next_iteration, self._max_steps) over_threshold = tf.logical_or(over_threshold, at_max_steps) next_cumul_halting = tf.where(over_threshold, all_ones, next_cumul_halting_raw) next_remainder = tf.where(over_threshold, remainder, 1 - next_cumul_halting_raw) p = next_cumul_halting - cumul_halting next_cumul_state = _nested_add(cumul_state, _nested_unary_mul(next_state, p)) next_cumul_out = cumul_out + p * out return (x_ones, next_cumul_out, next_state, next_cumul_state, next_cumul_halting, next_iteration, next_remainder) def _build(self, x, prev_state): """Connects the core to the graph. Args: x: Input `Tensor` of shape `(batch_size, input_size)`. prev_state: Previous state. This could be a `Tensor`, or a tuple of `Tensor`s. Returns: The tuple `(output, state)` for this core. Raises: ValueError: if the `Tensor` `x` does not have rank 2. """ x.get_shape().with_rank(2) self._batch_size = x.get_shape().as_list()[0] self._dtype = x.dtype x_zeros = tf.concat( [x, tf.zeros( shape=(self._batch_size, 1), dtype=self._dtype)], 1) x_ones = tf.concat( [x, tf.ones( shape=(self._batch_size, 1), dtype=self._dtype)], 1) # Weights for the halting signal halting_linear = basic.Linear(name="halting_linear", output_size=1) body = functools.partial( self._body, halting_linear=halting_linear, x_ones=x_ones) cumul_halting_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype) iteration_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype) core_output_size = [x.value for x in self._core.output_size] out_init = tf.zeros(shape=(self._batch_size,) + tuple(core_output_size), dtype=self._dtype) cumul_state_init = _nested_zeros_like(prev_state) remainder_init = tf.zeros(shape=(self._batch_size, 1), dtype=self._dtype) (unused_final_x, final_out, unused_final_state, final_cumul_state, unused_final_halting, final_iteration, final_remainder) = tf.while_loop( self._cond, body, [x_zeros, out_init, prev_state, cumul_state_init, cumul_halting_init, iteration_init, remainder_init]) act_output = basic.Linear( name="act_output_linear", output_size=self._output_size)(final_out) return (act_output, (final_iteration, final_remainder)), final_cumul_state
sonnet-1
sonnet/python/modules/pondering_rnn.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Modules for dealing with block matrices.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports from six.moves import xrange # pylint: disable=redefined-builtin from sonnet.python.modules import base import tensorflow.compat.v1 as tf class BlockTriangularMatrix(base.AbstractModule): """Module for constructing a block triangular matrix from a vector. This module takes a vector and builds a block (upper or lower) triangular matrix from it. The blocks have equal shape, `block_shape`, and the number of rows (and, hence, the number of columns) needs to be specified in advance. The diagonal may be excluded by setting the argument `include_diagonal` to False. Example: suppose that we choose `block_shape = (2, 2)` and `block_rows = 3`. Then, the input vector `[1 2 3 ... 24]` is mapped to the matrix: ``` M = [ 1 2 0 0 0 0 3 4 0 0 0 0 5 6 7 8 0 0 9 10 11 12 0 0 13 14 15 16 17 18 19 20 21 22 23 24]. ``` """ def __init__(self, block_shape, block_rows, include_diagonal=True, include_off_diagonal=True, upper=False, name='block_triangular_matrix'): """Constructs a new `BlockTriangularMatrix` module. Args: block_shape: tuple, 2-dimensional tuple indicating the shape of each individual block. block_rows: int, the number of blocks in each row (and column) of the output matrix. include_diagonal: boolean, indicates whether or not blocks on the diagonal entries should be included. include_off_diagonal: boolean, indicates whether or not only the off-diagonal entries should be included. If set to False, the value of `upper` is ignored. upper: boolean, if True then the output matrix is block upper triangular; if False, it is block lower triangular. name: string, name of the module. Raises: ValueError: if `include_diagonal` and `include_off_diagonal` are both False. """ super(BlockTriangularMatrix, self).__init__(name=name) if not include_diagonal and not include_off_diagonal: raise ValueError('Arguments include_diagonal and include_off_diagonal ' 'cannot both be False.') self._block_shape = tuple(block_shape) self._block_rows = block_rows self._include_diagonal = include_diagonal self._include_off_diagonal = include_off_diagonal self._upper = upper self._num_blocks = sum( self._content_blocks(r) for r in xrange(self._block_rows)) @property def num_blocks(self): """The total number of blocks in the output matrix.""" return self._num_blocks @property def block_size(self): """The number of entries of each block.""" return self._block_shape[0] * self._block_shape[1] @property def block_shape(self): """The shape of each block.""" return self._block_shape @property def output_shape(self): """The shape of the output matrix.""" return (self._block_shape[0] * self._block_rows, self._block_shape[1] * self._block_rows) @property def input_size(self): """The expected length of the input vector.""" return self.block_size * self.num_blocks def _build(self, vector): vector.get_shape().assert_is_compatible_with((None, self.input_size)) n = tf.shape(vector)[0] # Get batch size. rows = [] start_index = 0 block_height, block_width = self._block_shape # Construct the individual block rows. for r in xrange(self._block_rows): # Construct an individual block row as a concatenation of a block of # zeros (left zeros), the actual content (coming from the input), and # another block of zeros (right zeros). Each of these blocks can be empty. left_zero_blocks = self._left_zero_blocks(r) right_zero_blocks = self._right_zero_blocks(r) content_blocks = self._content_blocks(r) assert (left_zero_blocks + content_blocks + right_zero_blocks == self._block_rows) assert left_zero_blocks >= 0 assert right_zero_blocks >= 0 assert content_blocks >= 0 # Take the next chunk of entries from the input vector # and increase the starting index into the input vector. end_index = start_index + content_blocks * self.block_size input_chunk = vector[:, start_index:end_index] start_index = end_index # Reshape the entries from the input vector. content = tf.reshape( input_chunk, shape=(n, block_height, content_blocks * block_width), name='content' + str(r)) paddings = [[0, 0], [0, 0], [left_zero_blocks * block_width, right_zero_blocks * block_width]] # Concatenate content and zeros to form the next block row. rows.append(tf.pad(content, paddings, name='block_row' + str(r))) # Concatenate all rows together to get the final block matrix. return tf.concat(rows, 1) def _left_zero_blocks(self, r): """Number of blocks with zeros from the left in block row `r`.""" if not self._include_off_diagonal: return r elif not self._upper: return 0 elif self._include_diagonal: return r else: return r + 1 def _right_zero_blocks(self, r): """Number of blocks with zeros from the right in block row `r`.""" if not self._include_off_diagonal: return self._block_rows - r - 1 elif self._upper: return 0 elif self._include_diagonal: return self._block_rows - r - 1 else: return self._block_rows - r def _content_blocks(self, r): """Number of content blocks in block row `r`.""" return (self._block_rows - self._left_zero_blocks(r) - self._right_zero_blocks(r)) class BlockDiagonalMatrix(BlockTriangularMatrix): """Module for constructing a block diagonal matrix from a vector. This module takes a vector and builds a block diagonal matrix from it. The blocks have equal shape, `block_shape`, and the number of rows (and, hence, the number of columns) needs to be specified in advance. Example: suppose that we choose `block_shape = (2, 2)` and `block_rows = 3`. Then, the input vector `[1 2 3 ... 12]` is mapped to the matrix: ``` M = [ 1 2 0 0 0 0 3 4 0 0 0 0 0 0 5 6 0 0 0 0 7 8 0 0 0 0 0 0 9 10 0 0 0 0 11 12]. ``` """ def __init__(self, block_shape, block_rows, name='block_diagonal_matrix'): """Constructs a new `BlockDiagonalMatrix` module. Args: block_shape: tuple, 2-dimensional tuple indicating the shape of each individual block. block_rows: int, the number of blocks in each row (and column) of the output matrix. name: string, name of the module. """ super(BlockDiagonalMatrix, self).__init__( block_shape=block_shape, block_rows=block_rows, include_diagonal=True, include_off_diagonal=False, name=name)
sonnet-1
sonnet/python/modules/block_matrix.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Modules for embedding integer ids.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math # Dependency imports from sonnet.python.modules import base from sonnet.python.modules import util import tensorflow.compat.v1 as tf def _embedding_dim(vocab_size): """Calculate a reasonable embedding size for a vocabulary. Rule of thumb is 6 * 4th root of vocab_size. Args: vocab_size: Size of the input vocabulary. Returns: The embedding size to use. Raises: ValueError: if `vocab_size` is invalid. """ if not vocab_size or (vocab_size <= 0): raise ValueError("Invalid vocab_size %g." % vocab_size) return int(round(6.0 * math.sqrt(math.sqrt(vocab_size)))) class Embed(base.AbstractModule): """Module for embedding tokens in a low-dimensional space.""" EMBEDDINGS = "embeddings" POSSIBLE_INITIALIZER_KEYS = {EMBEDDINGS} def __init__(self, vocab_size=None, embed_dim=None, existing_vocab=None, densify_gradients=False, initializers=None, partitioners=None, regularizers=None, trainable=True, custom_getter=None, name="embed"): """Constructs an Embed module. Args: vocab_size: int. Number of unique tokens to embed. If not provided, an existing vocabulary matrix from which vocab_size can be inferred must be provided as existing_vocab. embed_dim: int or None. Number of dimensions to assign to each embedding. If not specified, a sensible default is chosen based on `vocab_size`. If an existing vocabulary matrix initializes the module, this should not be provided as it will be inferred. existing_vocab: a [vocab_size, embed_dim] vocabulary matrix. Will be converted to a tf.float32 tensor. If provided, neither or vocab_size or embed_dim should be provided as they are inferred. densify_gradients: if True, we convert the embedding gradient from an indexed-slices to a regular tensor before sending it back to the parameter server. This avoids excess computation on the parameter server. Use this option for moderately sized embeddings, e.g., a vocabulary size on the order of up to thousands. For embeddings larger than these, e.g. a vocabulary size on the order of tens or hundreds of thousands, set this to False. initializers: Optional dict containing initializers for embeddings (with key 'embeddings'). As a default, embeddings are initialized via a truncated normal distribution. partitioners: Optional dict containing partitioners for embeddings (with key 'embeddings'). As a default, no partitioners are used. regularizers: Optional dict containing regularizers for embeddings (with key 'embeddings'). As a default, no regularizers are used. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. trainable: if True, the embeddings will be updated during training. If False, they are fixed to their initial values. If `trainable=False` and a regularizer is given, the resulting loss stays constant. custom_getter: Callable or dictionary of callables to use as custom getters inside the module. If a dictionary, the keys correspond to regexes to match variable names. See the `tf.get_variable` documentation for information about the custom_getter API. name: string. Name for this module. Raises: ValueError: if neither one of vocab_size or existing_vocab is provided, or if existing_vocab is provided along with vocab_size, embedding_dim, initializers, partitioners or regularizers (as these should be inferred). """ if vocab_size is None and existing_vocab is None: raise ValueError("Must provide on of vocab_size or existing_vocab.") if existing_vocab is not None and not all( x is None for x in [vocab_size, embed_dim, initializers, partitioners]): raise ValueError("If existing_vocab is provided, none of vocab_size, " "embedding_dim, initializers, or partitioners is " "needed.") super(Embed, self).__init__(custom_getter=custom_getter, name=name) self._existing_vocab = None if existing_vocab is None: self._vocab_size = vocab_size self._embed_dim = embed_dim or _embedding_dim(self._vocab_size) else: self._existing_vocab = tf.convert_to_tensor( existing_vocab, dtype=tf.float32) existing_vocab_shape = self._existing_vocab.get_shape().with_rank(2) existing_vocab_shape.assert_is_fully_defined() self._vocab_size, self._embed_dim = existing_vocab_shape.as_list() self._initializers = util.check_initializers( initializers, self.POSSIBLE_INITIALIZER_KEYS) self._partitioners = util.check_partitioners( partitioners, self.POSSIBLE_INITIALIZER_KEYS) self._regularizers = util.check_regularizers( regularizers, self.POSSIBLE_INITIALIZER_KEYS) self._trainable = trainable self._densify_gradients = densify_gradients def _build(self, ids): """Lookup embeddings. Looks up an embedding vector for each value in `ids`. All ids must be within [0, vocab_size), else an `InvalidArgumentError` is raised at runtime. Args: ids: Tensor of dtype int64. Returns: Tensor of tf.shape(ids) + [embedding_dim] and dtype float32. """ # Construct embeddings. if self._existing_vocab is None: if self.EMBEDDINGS not in self._initializers: self._initializers[self.EMBEDDINGS] = tf.initializers.random_normal() self._embeddings = tf.get_variable( "embeddings", shape=[self._vocab_size, self._embed_dim], dtype=tf.float32, initializer=self._initializers[self.EMBEDDINGS], partitioner=self._partitioners.get(self.EMBEDDINGS, None), regularizer=self._regularizers.get(self.EMBEDDINGS, None), trainable=self._trainable) else: self._embeddings = tf.get_variable( "embeddings", dtype=tf.float32, initializer=self._existing_vocab, regularizer=self._regularizers.get(self.EMBEDDINGS, None), trainable=self._trainable) if self._densify_gradients: # On the backwards pass, we convert the gradient from indexed-slices to a # regular tensor before sending it back to the parameter server. # This avoids excess computation on the parameter server. # In eager mode we do not need the conversion. # Add a check whether we are in eager mode when it is supported. embeddings = util.convert_gradient_to_tensor(self._embeddings) else: embeddings = self._embeddings # Lookup embeddings return tf.nn.embedding_lookup(embeddings, ids, name="embedding_lookup") @property def vocab_size(self): """Size of input vocabulary.""" return self._vocab_size @property def embed_dim(self): """Size of embedding vectors.""" return self._embed_dim @property def embeddings(self): """Returns the Variable containing embeddings. Returns: A 2D Variable containing one embedding vector per row, constructed in the most recent __call__. Raises: base.NotConnectedError: If the module has not been connected to the graph yet, meaning the variables do not exist. """ self._ensure_is_connected() return self._embeddings
sonnet-1
sonnet/python/modules/embed.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for Recurrent cores in sonnet.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports from absl.testing import parameterized import mock import numpy as np import sonnet as snt import tensorflow.compat.v1 as tf from tensorflow.contrib import framework as contrib_framework from tensorflow.contrib import layers as contrib_layers from tensorflow.contrib.eager.python import tfe as contrib_eager nest = contrib_framework.nest BATCH_SIZE = 5 MASK_TUPLE = (True, (False, True)) _state_size_tuple = (3, (4, 5)) _state_size_element = 6 # Use patch to instantiate RNNCore @mock.patch.multiple(snt.RNNCore, __abstractmethods__=set()) @contrib_eager.run_all_tests_in_graph_and_eager_modes class RNNCoreTest(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters( (False, False, _state_size_tuple), (False, True, _state_size_tuple), (True, False, _state_size_tuple), (True, True, _state_size_tuple), (False, False, _state_size_element), (False, True, _state_size_element), (True, False, _state_size_element), (True, True, _state_size_element)) def testInitialStateTuple(self, trainable, use_custom_initial_value, state_size): batch_size = 6 # Set the attribute to the class since it we can't set properties of # abstract classes snt.RNNCore.state_size = state_size flat_state_size = nest.flatten(state_size) core = snt.RNNCore(name="dummy_core") if use_custom_initial_value: flat_initializer = [tf.constant_initializer(2)] * len(flat_state_size) trainable_initializers = nest.pack_sequence_as( structure=state_size, flat_sequence=flat_initializer) else: trainable_initializers = None initial_state = core.initial_state( batch_size, dtype=tf.float32, trainable=trainable, trainable_initializers=trainable_initializers) nest.assert_same_structure(initial_state, state_size) flat_initial_state = nest.flatten(initial_state) for state, size in zip(flat_initial_state, flat_state_size): self.assertEqual(state.get_shape(), [batch_size, size]) self.evaluate(tf.global_variables_initializer()) flat_initial_state_value = self.evaluate(flat_initial_state) for value, size in zip(flat_initial_state_value, flat_state_size): expected_initial_state = np.empty([batch_size, size]) if not trainable: expected_initial_state.fill(0) elif use_custom_initial_value: expected_initial_state.fill(2) else: value_row = value[0] expected_initial_state = np.tile(value_row, (batch_size, 1)) self.assertAllClose(value, expected_initial_state) @parameterized.parameters( (False, _state_size_tuple), (True, _state_size_tuple), (False, _state_size_element), (True, _state_size_element)) def testRegularizers(self, trainable, state_size): batch_size = 6 # Set the attribute to the class since it we can't set properties of # abstract classes snt.RNNCore.state_size = state_size flat_state_size = nest.flatten(state_size) core = snt.RNNCore(name="dummy_core") flat_regularizer = ([contrib_layers.l1_regularizer(scale=0.5)] * len(flat_state_size)) trainable_regularizers = nest.pack_sequence_as( structure=state_size, flat_sequence=flat_regularizer) core.initial_state(batch_size, dtype=tf.float32, trainable=trainable, trainable_regularizers=trainable_regularizers) graph_regularizers = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) if not trainable: self.assertFalse(graph_regularizers) else: self.assertEqual(len(graph_regularizers), len(flat_state_size)) if not tf.executing_eagerly(): for i in range(len(flat_state_size)): self.assertRegexpMatches( graph_regularizers[i].name, ".*l1_regularizer.*") @contrib_eager.run_all_tests_in_graph_and_eager_modes class TrainableInitialState(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters((True, MASK_TUPLE), (True, None), (False, False), (False, None)) def testInitialStateComputation(self, tuple_state, mask): if tuple_state: initial_state = (tf.fill([BATCH_SIZE, 6], 2), (tf.fill([BATCH_SIZE, 7], 3), tf.fill([BATCH_SIZE, 8], 4))) else: initial_state = tf.fill([BATCH_SIZE, 9], 10) trainable_state_module = snt.TrainableInitialState(initial_state, mask=mask) trainable_state = trainable_state_module() flat_trainable_state = nest.flatten(trainable_state) nest.assert_same_structure(initial_state, trainable_state) flat_initial_state = nest.flatten(initial_state) if mask is not None: flat_mask = nest.flatten(mask) else: flat_mask = (True,) * len(flat_initial_state) self.evaluate(tf.global_variables_initializer()) # Check all variables are initialized correctly and return a state that # has the same as it is provided. for trainable_state, initial_state in zip(flat_trainable_state, flat_initial_state): self.assertAllEqual( self.evaluate(trainable_state), self.evaluate(initial_state)) # Change the value of all the trainable variables to ones. for variable in tf.trainable_variables(): self.evaluate(tf.assign(variable, tf.ones_like(variable))) # In eager mode to re-evaluate the module we must re-connect it. trainable_state = trainable_state_module() flat_trainable_state = nest.flatten(trainable_state) # Check that the values of the initial_states have changed if and only if # they are trainable. for trainable_state, initial_state, mask in zip(flat_trainable_state, flat_initial_state, flat_mask): trainable_state_value = self.evaluate(trainable_state) initial_state_value = self.evaluate(initial_state) if mask: expected_value = np.ones_like(initial_state_value) else: expected_value = initial_state_value self.assertAllEqual(trainable_state_value, expected_value) def testBadArguments(self): initial_state = (tf.random_normal([BATCH_SIZE, 6]), (tf.random_normal([BATCH_SIZE, 7]), tf.random_normal([BATCH_SIZE, 8]))) with self.assertRaises(TypeError): snt.TrainableInitialState(initial_state, mask=(True, (False, "foo"))) with self.assertRaises(tf.errors.InvalidArgumentError): snt.TrainableInitialState(initial_state, mask=(True, (False, True)))() # Check that the class checks that the elements of initial_state have # identical rows. self.evaluate(tf.global_variables_initializer()) if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/modules/rnn_core_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Wrappers to add residual and skip connections to Sonnet modules.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports from sonnet.python.modules import base from sonnet.python.modules import rnn_core import tensorflow.compat.v1 as tf from tensorflow.contrib import framework as contrib_framework nest = contrib_framework.nest class Residual(base.AbstractModule): """Adds a residual connection to a base module. This module wraps a module M, where if M with traditionally output M(X), Residual(M)(x) = M(x) + x. """ def __init__(self, base_module, name="residual"): super(Residual, self).__init__(name=name) self._base_module = base_module def _build(self, inputs, **kwargs): outputs = self._base_module(inputs, **kwargs) residual = nest.map_structure(lambda inp, out: inp + out, inputs, outputs) return residual class ResidualCore(rnn_core.RNNCore): """Adds a residual connection to a base RNN core. This module wraps a module M, where if M with traditionally output M(X), Residual(M)(x) = M(x) + x. """ def __init__(self, base_core, name="residual_core"): super(ResidualCore, self).__init__(name=name) self._base_core = base_core def _build(self, inputs, prev_state, **kwargs): outputs, new_state = self._base_core(inputs, prev_state, **kwargs) residual = nest.map_structure(lambda inp, out: inp + out, inputs, outputs) return residual, new_state @property def output_size(self): return self._base_core.output_size @property def state_size(self): return self._base_core.state_size def initial_state(self, *args, **kwargs): return self._base_core.initial_state(*args, **kwargs) def zero_state(self, *args, **kwargs): return self._base_core.zero_state(*args, **kwargs) class SkipConnectionCore(rnn_core.RNNCore): """Adds a skip connection to the base RNN core. The output of the wrapped core is the concatenation of the output of the base core with its input. The state of the wrapped core is the state of the base core. """ def __init__(self, base_core, input_shape=None, name="skip_connection_core"): """Construct a SkipConnectionCore. Args: base_core: Base RNNCore to wrap. input_shape: Shape of the input as tuple, excluding the batch size. name: Name of the module. """ super(SkipConnectionCore, self).__init__(name=name) self._base_core = base_core self._input_shape = input_shape def _build(self, inputs, prev_state, **kwargs): if not self._input_shape: self._input_shape = inputs.get_shape()[1:] outputs, new_state = self._base_core(inputs, prev_state, **kwargs) outputs = nest.map_structure(lambda inp, out: tf.concat((inp, out), -1), inputs, outputs) return outputs, new_state @property def output_size(self): if not self._input_shape: raise ValueError( "Output size unknown. You must provide the input_shape to the class' " "constructor or connect the module into the graph." ) leading_dims = tuple(self._input_shape[:-1]) final_input_dim = self._input_shape[-1] return tf.TensorShape(leading_dims + (self._base_core.output_size[-1] + final_input_dim,)) @property def state_size(self): return self._base_core.state_size def initial_state(self, *args, **kwargs): return self._base_core.initial_state(*args, **kwargs) def zero_state(self, *args, **kwargs): return self._base_core.zero_state(*args, **kwargs)
sonnet-1
sonnet/python/modules/residual.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for sonnet.python.modules.batch_norm_v2.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os # Dependency imports from absl.testing import parameterized import numpy as np import sonnet as snt import tensorflow.compat.v1 as tf from tensorflow.contrib import layers as contrib_layers def _add_fused_and_unknown_batch_params(test_case_parameters): for params in test_case_parameters: yield dict(fused=False, batch_unknown=False, **params) yield dict(fused=True, batch_unknown=False, **params) yield dict(fused=False, batch_unknown=True, **params) yield dict(fused=True, batch_unknown=True, **params) class BatchNormV2Test(parameterized.TestCase, tf.test.TestCase): def testConstruct(self): inputs = tf.placeholder(tf.float32, shape=[None, 64, 64, 3]) batch_norm1 = snt.BatchNormV2(offset=False, scale=False, fused=False) batch_norm1(inputs, is_training=True) err = "Batch normalization doesn't have an offset, so no beta" with self.assertRaisesRegexp(snt.Error, err): _ = batch_norm1.beta err = "Batch normalization doesn't have a scale, so no gamma" with self.assertRaisesRegexp(snt.Error, err): _ = batch_norm1.gamma batch_norm2 = snt.BatchNormV2(offset=True, scale=False) batch_norm2(inputs, is_training=True) _ = batch_norm2.beta batch_norm3 = snt.BatchNormV2(offset=False, scale=True) batch_norm3(inputs, is_training=True) _ = batch_norm3.gamma batch_norm4 = snt.BatchNormV2(offset=True, scale=True) batch_norm4(inputs, is_training=True) _ = batch_norm4.beta _ = batch_norm4.gamma batch_norm4(inputs, is_training=True, test_local_stats=True) batch_norm4(inputs, is_training=tf.constant(True), test_local_stats=tf.constant(True)) is_training_ph = tf.placeholder(tf.bool) test_local_stats_ph = tf.placeholder(tf.bool) batch_norm4(inputs, is_training=is_training_ph, test_local_stats=test_local_stats_ph) @parameterized.parameters( ["NC", "NWC", "NHWC", "NDHWC", "NCW", "NCHW", "NCDHW"]) def testDataFormats(self, data_format): """Check that differing data formats give the correct output shape.""" dim_sizes = { "N": None, "D": 10, "H": 64, "W": 32, "C": 3 } inputs = tf.placeholder_with_default( tf.zeros([dim_sizes[dim_name] or 5 for dim_name in data_format]), [dim_sizes[dim_name] for dim_name in data_format]) bn_data_formats = [data_format] if data_format.endswith("C"): bn_data_formats.append(None) for bn_data_format in bn_data_formats: bn = snt.BatchNormV2(data_format=bn_data_format, offset=False) bn(inputs, is_training=True) mean_shape = bn.moving_mean.get_shape() correct_mean_shape = [ dim_sizes["C"] if dim_name == "C" else 1 for dim_name in data_format ] self.assertEqual(mean_shape, correct_mean_shape) for use_gpu in [True, False]: with self.test_session(use_gpu=use_gpu) as sess: for bn_data_format in "NC NWC NHWC NDHWC NCW NCHW NCDHW".split(): if len(data_format) != len(bn_data_format): bn = snt.BatchNormV2(data_format=bn_data_format, offset=False) err = r"Incorrect data format {} for input shape .*".format( bn_data_format) with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): outputs = bn(inputs, is_training=True) sess.run(outputs) @parameterized.named_parameters( ("Float32", tf.float32), ) def testDataType(self, dtype): inputs = tf.placeholder(dtype, shape=[None, 64, 32, 3]) batch_norm = snt.BatchNormV2(offset=True, scale=True) output = batch_norm(inputs, is_training=True) self.assertEqual(dtype, output.dtype) self.assertEqual(dtype, batch_norm.moving_mean.dtype) self.assertEqual(dtype, batch_norm.moving_variance.dtype) self.assertEqual(dtype, batch_norm.gamma.dtype) self.assertEqual(dtype, batch_norm.beta.dtype) @parameterized.named_parameters( ("Float16", tf.float16), ("BFloat16", tf.bfloat16), ) def test16Bit(self, dtype): inputs = tf.placeholder(dtype, shape=[None, 64, 32, 3]) batch_norm = snt.BatchNormV2(offset=True, scale=True, fused=False) output = batch_norm(inputs, is_training=True) self.assertEqual(dtype, output.dtype) self.assertEqual(tf.float32, batch_norm.moving_mean.dtype) self.assertEqual(tf.float32, batch_norm.moving_variance.dtype) self.assertEqual(dtype, batch_norm.gamma.dtype) self.assertEqual(dtype, batch_norm.beta.dtype) def _get_inputs(self, dtype=tf.float32): v = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0], dtype=dtype.as_numpy_dtype) input_v = np.array([v, v, v, v, v, v, v]) inputs = tf.constant(input_v) return v, input_v, inputs def testUpdateImproveStatistics(self): """Test that updating the moving_mean improves statistics.""" _, _, inputs = self._get_inputs() # Use small decay_rate to update faster. bn = snt.BatchNormV2( offset=False, scale=False, decay_rate=0.1, update_ops_collection=tf.GraphKeys.UPDATE_OPS) out1 = bn(inputs, is_training=False, test_local_stats=False) # Build the update ops. bn(inputs, is_training=True) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) out_v = sess.run(out1) # Before updating the moving_mean the results are off. self.assertBetween(np.max(np.abs(np.zeros([7, 6]) - out_v)), 2, 5) sess.run(tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS))) # After updating the moving_mean the results are better. out_v = sess.run(out1) self.assertBetween(np.max(np.abs(np.zeros([7, 6]) - out_v)), 1, 2) @parameterized.named_parameters( ("Float16", tf.float16), ("Float32", tf.float32), ) def testCheckStatsDouble(self, dtype): """The correct statistics are being computed for double connection. Connected in parallel, it's ill-defined what order the updates will happen in. A double update could happen, or two sequential updates. E.g. If decay_rate is 0.9, the start value is 1.0, and the target value is 0.0, the value could progress as 1.00 -> 0.90 -> 0.81, if the second update uses the fresh second value. Or as 1.00 -> 0.90 -> 0.80 if the second update uses the stale first value. We fix this here by running them in sequential run calls to ensure that this test is deterministic. The two situations are minimally different, especially if decay_rate is close to one (e.g. the default of 0.999). Args: dtype: TensorFlow datatype of input test batch. """ v, _, inputs = self._get_inputs(dtype) bn = snt.BatchNormV2( offset=False, scale=False, decay_rate=0.9, update_ops_collection=tf.GraphKeys.UPDATE_OPS) with tf.name_scope("net1"): bn(inputs, is_training=True) with tf.name_scope("net2"): bn(inputs, is_training=True) update_ops_1 = tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS, "net1")) self.assertLen(update_ops_1, 2) update_ops_2 = tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS, "net2")) self.assertLen(update_ops_2, 2) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) mm, mv = sess.run([bn.moving_mean, bn.moving_variance]) self.assertAllClose(np.zeros([1, 6]), mm) self.assertAllClose(np.ones([1, 6]), mv) sess.run(update_ops_1) sess.run(update_ops_2) mm, mv = sess.run([bn.moving_mean, bn.moving_variance]) correct_mm = (1.0 - bn._decay_rate) * v correct_mm = (1.0 - bn._decay_rate) * v + bn._decay_rate * correct_mm correct_mv = np.ones([1, 6]) * bn._decay_rate**2 atol = 1.e-2 if dtype == tf.float16 else 1.e-6 self.assertAllClose(np.reshape(correct_mm, [1, 6]), mm, atol=atol) self.assertAllClose(np.reshape(correct_mv, [1, 6]), mv, atol=atol) def testCheckStatsPython(self): """The correct normalization is being used for different Python flags.""" v, input_v, inputs = self._get_inputs() bn = snt.BatchNormV2( offset=False, scale=False, decay_rate=0.5, update_ops_collection=tf.GraphKeys.UPDATE_OPS ) out1 = bn(inputs, is_training=True, test_local_stats=True) out2 = bn(inputs, is_training=False, test_local_stats=True) out3 = bn(inputs, is_training=False, test_local_stats=False) update_ops = tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) self.assertLen(update_ops, 2) with tf.control_dependencies(update_ops): out1 = tf.identity(out1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) out_v = sess.run(out1) mm, mv = sess.run([bn.moving_mean, bn.moving_variance]) # Single moving average steps should have happened. correct_mm = (1.0 - bn._decay_rate) * v correct_mv = np.ones([1, 6]) * bn._decay_rate self.assertAllClose(np.reshape(correct_mm, [1, 6]), mm) self.assertAllClose(np.reshape(correct_mv, [1, 6]), mv) self.assertAllClose(np.zeros([7, 6]), out_v, rtol=1e-6, atol=1e-5) out2_, out3_ = sess.run([out2, out3]) # Out2: Tested using local batch stats. # Better numerical precision due to using shifted estimators. self.assertAllClose(np.zeros([7, 6]), out2_, rtol=1e-6, atol=1e-5) # Out3: Tested using moving average stats. self.assertAllClose( (input_v - mm) / np.sqrt(mv + bn._eps), out3_) @parameterized.named_parameters( ("UseUpdateCollection", tf.GraphKeys.UPDATE_OPS), ("UseDifferentUpdateCollection", "my_update_ops"), ("UseControlDependencies", None), ) def testCheckStatsInGraph(self, update_ops_collection): """The correct normalization is being used for different TF flags.""" v, input_v, inputs = self._get_inputs() bn = snt.BatchNormV2( offset=False, scale=False, decay_rate=0.5, update_ops_collection=update_ops_collection) is_training = tf.placeholder(tf.bool) test_local_stats = tf.placeholder(tf.bool) out = bn(inputs, is_training=is_training, test_local_stats=test_local_stats) if update_ops_collection is not None: update_ops = tuple(tf.get_collection(update_ops_collection)) self.assertLen(update_ops, 2) with tf.control_dependencies(update_ops): out = tf.identity(out) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) # Run with `is_training=True`, `test_local_stats=True`. out_v = sess.run(out, feed_dict={is_training: True, test_local_stats: True}) # Moving averages not updated until after calculation so shifted # stats are poor. self.assertAllClose(np.zeros([7, 6]), out_v, rtol=1e-6, atol=1e-5) ops = (bn.moving_mean, bn.moving_variance) mm1, mv1 = sess.run(ops) # Single moving average step should have happened. correct_mm = (1.0 - bn._decay_rate) * v correct_mv = np.ones([1, 6]) * bn._decay_rate self.assertAllClose(np.reshape(correct_mm, [1, 6]), mm1) self.assertAllClose(np.reshape(correct_mv, [1, 6]), mv1) # Run with `is_training=False`, `test_local_stats=True`. # Should have used local batch stats. out_v = sess.run(out, feed_dict={is_training: False, test_local_stats: True}) # Moving averages should not have changed. mm2, mv2 = sess.run(ops) self.assertAllClose(mm1, mm2) self.assertAllClose(mv1, mv2) self.assertAllClose(np.zeros([7, 6]), out_v, rtol=1e-6, atol=1e-5) # Run with `is_training=False`, `test_local_stats=False`. # Should have used moving average stats. out_v = sess.run(out, feed_dict={is_training: False, test_local_stats: False}) # Moving averages should not have changed. mm3, mv3 = sess.run(ops) self.assertAllClose(mm1, mm3) self.assertAllClose(mv1, mv3) self.assertAllClose( (input_v - mm3) / np.sqrt(mv3 + bn._eps), out_v) def testSharing(self): """Check that the correct number of variables are made when sharing.""" inputs1 = tf.placeholder(tf.float32, shape=[None, 64, 64, 3]) inputs2 = tf.placeholder(tf.float32, shape=[None, 64, 64, 3]) bn = snt.BatchNormV2( offset=True, scale=True, update_ops_collection=tf.GraphKeys.UPDATE_OPS) bn(inputs1, is_training=True) bn(inputs2, is_training=False) self.assertLen(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES), 4) # We should have one set of update ops update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertLen(update_ops, 2) def testUpdatesInsideCond(self): """Demonstrate that updates inside a cond fail.""" _, input_v, inputs = self._get_inputs() bn = snt.BatchNormV2( offset=False, scale=False, decay_rate=0.5, update_ops_collection=tf.GraphKeys.UPDATE_OPS) condition = tf.placeholder(tf.bool) cond = tf.cond(condition, lambda: bn(inputs, is_training=True), lambda: inputs) init = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init) out_v = sess.run(cond, feed_dict={condition: False}) self.assertAllClose(input_v, out_v) out_v = sess.run(cond, feed_dict={condition: True}) self.assertAllClose(np.zeros([7, 6]), out_v, rtol=1e-4, atol=1e-4) # Variables are accessible outside the tf.cond() mm, mv = sess.run([bn.moving_mean, bn.moving_variance]) self.assertAllClose(np.zeros([1, 6]), mm) self.assertAllClose(np.ones([1, 6]), mv) # Tensors are not accessible outside the tf.cond() with self.assertRaisesRegexp(ValueError, "Operation"): sess.run(tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS))) def testVariableBatchSize(self): """Check the inputs batch_size can change.""" inputs_shape = [10, 10] inputs = tf.placeholder(tf.float32, shape=[None] + inputs_shape) bn = snt.BatchNormV2( offset=False, scale=False) # Outputs should be equal to inputs. out = bn(inputs, is_training=False, test_local_stats=False) init = tf.global_variables_initializer() update_ops = tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) with self.test_session() as sess: sess.run(init) for batch_size in [1, 3, 10]: input_data = np.random.rand(batch_size, *inputs_shape) out_v = sess.run(out, feed_dict={inputs: input_data}) self.assertAllClose(input_data / np.sqrt(1.0 + bn._eps), out_v) sess.run(update_ops, feed_dict={inputs: input_data}) def testInvalidInitializerParameters(self): with self.assertRaisesRegexp(KeyError, "Invalid initializer keys.*"): snt.BatchNormV2( initializers={"not_gamma": contrib_layers.l1_regularizer(0.5)}) err = "Initializer for 'gamma' is not a callable function" with self.assertRaisesRegexp(TypeError, err): snt.BatchNormV2(initializers={"gamma": tf.zeros([1, 2, 3])}) def testInvalidPartitionerParameters(self): with self.assertRaisesRegexp(KeyError, "Invalid partitioner keys.*"): snt.BatchNormV2( partitioners={"not_gamma": contrib_layers.l1_regularizer(0.5)}) err = "Partitioner for 'gamma' is not a callable function" with self.assertRaisesRegexp(TypeError, err): snt.BatchNormV2(partitioners={"gamma": tf.zeros([1, 2, 3])}) def testInvalidRegularizationParameters(self): with self.assertRaisesRegexp(KeyError, "Invalid regularizer keys.*"): snt.BatchNormV2( regularizers={"not_gamma": contrib_layers.l1_regularizer(0.5)}) err = "Regularizer for 'gamma' is not a callable function" with self.assertRaisesRegexp(TypeError, err): snt.BatchNormV2(regularizers={"gamma": tf.zeros([1, 2, 3])}) @parameterized.named_parameters( ("BNNoOffsetScale", False, True), ("BNNoOffsetNoScale", False, False), ("BNOffsetScale", True, True), ("BNOffsetNoScale", True, False), ) def testInitializers(self, offset, scale): initializers = { "moving_mean": tf.constant_initializer(2.0), "moving_variance": tf.constant_initializer(3.0), } if scale: initializers["gamma"] = tf.constant_initializer(4.0) if offset: initializers["beta"] = tf.constant_initializer(5.0) inputs_shape = [10, 10] inputs = tf.placeholder(tf.float32, shape=[None] + inputs_shape) bn = snt.BatchNormV2( offset=offset, scale=scale, initializers=initializers) self.assertEqual(bn.initializers, initializers) bn(inputs, is_training=True) init = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init) ones_v = np.ones([1, 1, inputs_shape[-1]]) self.assertAllClose(bn.moving_mean.eval(), ones_v * 2.0) self.assertAllClose(bn.moving_variance.eval(), ones_v * 3.0) if scale: self.assertAllClose(bn.gamma.eval(), ones_v * 4.0) if offset: self.assertAllClose(bn.beta.eval(), ones_v * 5.0) @parameterized.named_parameters( ("BNNoOffsetScale", False, True), ("BNNoOffsetNoScale", False, False), ("BNOffsetScale", True, True), ("BNOffsetNoScale", True, False), ) def testRegularizersInRegularizationLosses(self, offset, scale): regularizers = {} if offset: regularizers["beta"] = contrib_layers.l1_regularizer(scale=0.5) if scale: regularizers["gamma"] = contrib_layers.l2_regularizer(scale=0.5) inputs_shape = [10, 10] inputs = tf.placeholder(tf.float32, shape=[None] + inputs_shape) bn = snt.BatchNormV2( offset=offset, scale=scale, regularizers=regularizers) self.assertEqual(bn.regularizers, regularizers) bn(inputs, is_training=True) graph_regularizers = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) if not offset and not scale: self.assertFalse(graph_regularizers) if offset and not scale: self.assertRegexpMatches(graph_regularizers[0].name, ".*l1_regularizer.*") if scale and not offset: self.assertRegexpMatches(graph_regularizers[0].name, ".*l2_regularizer.*") if scale and offset: self.assertRegexpMatches(graph_regularizers[0].name, ".*l1_regularizer.*") self.assertRegexpMatches(graph_regularizers[1].name, ".*l2_regularizer.*") @parameterized.named_parameters( ("BNNoOffsetScale", False, True), ("BNNoOffsetNoScale", False, False), ("BNOffsetScale", True, True), ("BNOffsetNoScale", True, False), ) def testPartitioners(self, offset, scale): partitioners = {} if scale: partitioners["gamma"] = tf.fixed_size_partitioner(num_shards=2) if offset: partitioners["beta"] = tf.fixed_size_partitioner(num_shards=2) inputs_shape = [10, 10] inputs = tf.placeholder(tf.float32, shape=[None] + inputs_shape) bn = snt.BatchNormV2( offset=offset, scale=scale, partitioners=partitioners) self.assertEqual(bn.partitioners, partitioners) bn(inputs, is_training=True) if scale: self.assertLen(tf.global_variables("batch_norm/gamma"), 2) if offset: self.assertLen(tf.global_variables("batch_norm/beta"), 2) @parameterized.named_parameters( ("IsTrainingBoolVal", True, False, False, True), ("IsTestingBoolVal", False, True, False, True), ("IsTestingBoolValMovingAverage", False, False, False, True), ("IsTrainingScaleBoolVal", True, False, True, True), ("IsTestingScaleBoolVal", False, True, True, True), ("IsTestingScaleBoolValMovingAverage", False, False, True, True), ("IsTrainingTensorVal", True, False, False, False), ("IsTestingTensorVal", False, True, False, False), ("IsTestingTensorValMovingAverage", False, False, False, False), ("IsTrainingScaleTensorVal", True, False, True, False), ("IsTestingScaleTensorVal", False, True, True, False), ("IsTestingScaleTensorValMovingAverage", False, False, True, False)) def testFusedBatchNormV2(self, is_training, test_local_stats, scale, is_training_python_bool): input_shape = (32, 9, 9, 8) iterations = 5 x = tf.placeholder(tf.float32, shape=input_shape) bn1 = snt.BatchNormV2(scale=scale) bn2 = snt.BatchNormV2(fused=False, scale=scale) xx = np.random.random(input_shape) feed_dict = {x: xx} if not is_training_python_bool: is_training_node = tf.placeholder(tf.bool, shape=()) feed_dict.update({is_training_node: is_training}) is_training = is_training_node test_local_stats_node = tf.placeholder(tf.bool, shape=()) feed_dict.update({test_local_stats_node: test_local_stats}) test_local_stats = test_local_stats_node o1 = bn1(x, is_training=is_training, test_local_stats=test_local_stats) o2 = bn2(x, is_training=is_training, test_local_stats=test_local_stats) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) params = [ o1, o2, bn1._moving_mean, bn1._moving_variance, bn2._moving_mean, bn2._moving_variance ] for _ in range(iterations): y1, y2, mean1, var1, mean2, var2 = sess.run(params, feed_dict=feed_dict) self.assertAllClose(y1, y2, atol=1e-4) self.assertAllClose(mean1, mean2, atol=1e-4) self.assertAllClose(var1, var2, atol=1e-4) @parameterized.named_parameters( ("IsTraining", True, False), ("IsTesting", False, True), ("IsTestingMovingAverage", False, False)) def testFusedBatchNormFloat16(self, is_training, test_local_stats): input_shape = (31, 7, 7, 5) iterations = 3 x = tf.placeholder(tf.float16, shape=input_shape) bn1 = snt.BatchNormV2(fused=False) bn2 = snt.BatchNormV2() feed_dict = {x: np.random.random(input_shape)} o1 = bn1(x, is_training=is_training, test_local_stats=test_local_stats) o2 = bn2(x, is_training=is_training, test_local_stats=test_local_stats) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) params = [ o1, o2, bn1._moving_mean, bn1._moving_variance, bn2._moving_mean, bn2._moving_variance ] for _ in range(iterations): y1, y2, mean1, var1, mean2, var2 = sess.run(params, feed_dict=feed_dict) self.assertAllClose(y1, y2, atol=1e-2) self.assertAllClose(mean1, mean2, atol=1e-2) self.assertAllClose(var1, var2, atol=1e-2) def testCheckpointCompatibility(self): save_path = os.path.join(self.get_temp_dir(), "basic_save_restore") input_shape_1 = (31, 7, 7, 5) input_shape_2 = (31, 5, 7, 7) x1 = tf.placeholder(tf.float32, shape=input_shape_1) bn1 = snt.BatchNormV2(data_format="NHWC") bn1(x1, is_training=True) saver1 = snt.get_saver(bn1) x2 = tf.placeholder(tf.float32, shape=input_shape_2) bn2 = snt.BatchNormV2(data_format="NCHW") bn2(x2, is_training=False) saver2 = snt.get_saver(bn2) x3 = tf.placeholder(tf.float32, shape=input_shape_1) bn3 = snt.BatchNormV2(data_format="NCHW") bn3(x3, is_training=False) saver3 = snt.get_saver(bn3) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) saver1.save(sess, save_path) saver2.restore(sess, save_path) with self.assertRaises(tf.errors.InvalidArgumentError): saver3.restore(sess, save_path) @parameterized.parameters(*_add_fused_and_unknown_batch_params([ {"shape": [2, 10], "data_format": "NC"}, {"shape": [2, None, 3], "data_format": "NWC"}, {"shape": [2, 64, None], "data_format": "NCW"}, {"shape": [8, None, None, 3], "data_format": "NHWC"}, {"shape": [8, 10, None, None], "data_format": "NCHW"}, {"shape": [4, None, None, None, 10], "data_format": "NDHWC"}, {"shape": [4, 42, None, None, None], "data_format": "NCDHW"}, # We also check that tensors which are fully defined work correctly, as # the new codepath for unknown spatial size has a likelihood of causing # bugs where the output shape is unknown, but it previously was known. {"shape": [2, 640, 3], "data_format": "NWC"}, {"shape": [2, 64, 480], "data_format": "NCW"}, {"shape": [2, 32, 32, 3], "data_format": "NHWC"}, {"shape": [2, 3, 72, 96], "data_format": "NCHW"}, {"shape": [4, 84, 83, 82, 10], "data_format": "NDHWC"}, {"shape": [4, 42, 10, 48, 64], "data_format": "NCDHW"}])) def testDynamicImageShape(self, shape, data_format, fused, batch_unknown): """Check that tensors with unknown spatial dimensions work.""" if batch_unknown: shape[0] = None input_ph = tf.placeholder(tf.float32, shape=shape) bn = snt.BatchNormV2(data_format=data_format, fused=fused) output_train = bn(input_ph, is_training=True) output_test = bn(input_ph, is_training=False) self.assertEqual(output_train.get_shape().as_list(), output_test.get_shape().as_list()) # Check that no information about the shape has been erased from the input. self.assertEqual(output_train.get_shape().as_list(), input_ph.get_shape().as_list()) if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/modules/batch_norm_v2_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for sonnet.python.modules.batch_norm.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports from absl.testing import parameterized import numpy as np import sonnet as snt import tensorflow.compat.v1 as tf from tensorflow.contrib import layers as contrib_layers from tensorflow.python.ops import variables class BatchNormTest(parameterized.TestCase, tf.test.TestCase): def testConstruct(self): inputs = tf.placeholder(tf.float32, shape=[None, 64, 64, 3]) batch_norm1 = snt.BatchNorm(offset=False, scale=False) batch_norm1(inputs, is_training=True) err = "Batch normalization doesn't have an offset, so no beta" with self.assertRaisesRegexp(snt.Error, err): _ = batch_norm1.beta err = "Batch normalization doesn't have a scale, so no gamma" with self.assertRaisesRegexp(snt.Error, err): _ = batch_norm1.gamma batch_norm2 = snt.BatchNorm(offset=True, scale=False) batch_norm2(inputs, is_training=True) _ = batch_norm2.beta batch_norm3 = snt.BatchNorm(offset=False, scale=True) batch_norm3(inputs, is_training=True) _ = batch_norm3.gamma batch_norm4 = snt.BatchNorm(offset=True, scale=True) batch_norm4(inputs, is_training=True) _ = batch_norm4.beta _ = batch_norm4.gamma batch_norm4(inputs, is_training=True, test_local_stats=True) batch_norm4(inputs, is_training=tf.constant(True), test_local_stats=tf.constant(True)) is_training_ph = tf.placeholder(tf.bool) test_local_stats_ph = tf.placeholder(tf.bool) batch_norm4(inputs, is_training=is_training_ph, test_local_stats=test_local_stats_ph) def testReductionIndices(self): """Check that differing reduction indices give the correct output shape.""" inputs = tf.placeholder(tf.float32, shape=[None, 64, 32, 3]) bn1 = snt.BatchNorm(axis=[0], offset=False) bn1(inputs, is_training=True) self.assertEqual(bn1.moving_mean.get_shape(), (1, 64, 32, 3)) bn2 = snt.BatchNorm(axis=[0, 1], offset=False) bn2(inputs, is_training=True) self.assertEqual(bn2.moving_mean.get_shape(), (1, 1, 32, 3)) bn3 = snt.BatchNorm(axis=[0, 2], offset=False) bn3(inputs, is_training=True) self.assertEqual(bn3.moving_mean.get_shape(), (1, 64, 1, 3)) bn4 = snt.BatchNorm(offset=False) bn4(inputs, is_training=True) self.assertEqual(bn4.moving_mean.get_shape(), (1, 1, 1, 3)) err = (r"Too many indices specified in axis: " r"len\(\[0, 1, 2, 3, 0\]\) > len\(\(\?, 64, 32, 3\)\)") with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): bn5 = snt.BatchNorm(axis=[0, 1, 2, 3, 0]) bn5(inputs, is_training=True) err = r"One or more index in axis is too large for input shape: \[4\] >= 4" with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): bn6 = snt.BatchNorm(axis=[4]) bn6(inputs, is_training=True) err = r"Indices in axis must be non-negative: \[-1\] < 0" with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): bn7 = snt.BatchNorm(axis=[-1]) bn7(inputs, is_training=True) @parameterized.named_parameters( ("Float32", tf.float32), ("Float64", tf.float64), ) def testDataType(self, dtype): inputs = tf.placeholder(dtype, shape=[None, 64, 32, 3]) batch_norm = snt.BatchNorm(offset=True, scale=True) output = batch_norm(inputs, is_training=True) self.assertEqual(dtype, output.dtype) self.assertEqual(dtype, batch_norm.moving_mean.dtype.base_dtype) self.assertEqual(dtype, batch_norm.moving_variance.dtype.base_dtype) self.assertEqual(dtype, batch_norm.gamma.dtype.base_dtype) self.assertEqual(dtype, batch_norm.beta.dtype.base_dtype) @parameterized.named_parameters( ("Float16", tf.float16), ("BFloat16", tf.bfloat16), ) def test16Bit(self, dtype): inputs = tf.placeholder(dtype, shape=[None, 64, 32, 3]) batch_norm = snt.BatchNorm(offset=True, scale=True) output = batch_norm(inputs, is_training=True) self.assertEqual(dtype, output.dtype) self.assertEqual(tf.float32, batch_norm.moving_mean.dtype.base_dtype) self.assertEqual(tf.float32, batch_norm.moving_variance.dtype.base_dtype) self.assertEqual(dtype, batch_norm.gamma.dtype.base_dtype) self.assertEqual(dtype, batch_norm.beta.dtype.base_dtype) def _get_inputs(self, dtype=tf.float32): v = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0], dtype=dtype.as_numpy_dtype) input_v = np.array([v, v, v, v, v, v, v]) inputs = tf.constant(input_v) return v, input_v, inputs def testUpdateImproveStatistics(self): """Test that updating the moving_mean improves statistics.""" _, _, inputs = self._get_inputs() # Use small decay_rate to update faster. bn = snt.BatchNorm(offset=False, scale=False, decay_rate=0.1) out1 = bn(inputs, is_training=False, test_local_stats=False) # Build the update ops. bn(inputs, is_training=True) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) out_v = sess.run(out1) # Before updating the moving_mean the results are off. self.assertBetween(np.max(np.abs(np.zeros([7, 6]) - out_v)), 2, 5) sess.run(tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS))) # After updating the moving_mean the results are better. out_v = sess.run(out1) self.assertBetween(np.max(np.abs(np.zeros([7, 6]) - out_v)), 1, 2) @parameterized.named_parameters( ("Float16", tf.float16), ("Float32", tf.float32), ("Float64", tf.float64), ) def testCheckStatsDouble(self, dtype): """The correct statistics are being computed for double connection. Connected in parallel, it's ill-defined what order the updates will happen in. A double update could happen, or two sequential updates. E.g. If decay_rate is 0.9, the start value is 1.0, and the target value is 0.0, the value could progress as 1.00 -> 0.90 -> 0.81, if the second update uses the fresh second value. Or as 1.00 -> 0.90 -> 0.80 if the second update uses the stale first value. We fix this here by running them in sequential run calls to ensure that this test is deterministic. The two situations are minimally different, especially if decay_rate is close to one (e.g. the default of 0.999). Args: dtype: TensorFlow datatype of input test batch. """ v, _, inputs = self._get_inputs(dtype) bn = snt.BatchNorm(offset=False, scale=False, decay_rate=0.9) with tf.name_scope("net1"): bn(inputs, is_training=True) with tf.name_scope("net2"): bn(inputs, is_training=True) update_ops_1 = tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS, "net1")) self.assertEqual(len(update_ops_1), 2) update_ops_2 = tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS, "net2")) self.assertEqual(len(update_ops_2), 2) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) mm, mv = sess.run([bn.moving_mean, bn.moving_variance]) self.assertAllClose(np.zeros([1, 6]), mm) self.assertAllClose(np.ones([1, 6]), mv) sess.run(update_ops_1) sess.run(update_ops_2) mm, mv = sess.run([bn.moving_mean, bn.moving_variance]) correct_mm = (1.0 - bn._decay_rate) * v correct_mm = (1.0 - bn._decay_rate) * v + bn._decay_rate * correct_mm correct_mv = np.ones([1, 6]) * bn._decay_rate**2 atol = 1.e-2 if dtype == tf.float16 else 1.e-6 self.assertAllClose(np.reshape(correct_mm, [1, 6]), mm, atol=atol) self.assertAllClose(np.reshape(correct_mv, [1, 6]), mv, atol=atol) def testCheckStatsPython(self): """The correct normalization is being used for different Python flags.""" v, input_v, inputs = self._get_inputs() bn = snt.BatchNorm(offset=False, scale=False, decay_rate=0.5) out1 = bn(inputs, is_training=True, test_local_stats=True) out2 = bn(inputs, is_training=False, test_local_stats=True) out3 = bn(inputs, is_training=False, test_local_stats=False) update_ops = tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) self.assertEqual(len(update_ops), 2) with tf.control_dependencies(update_ops): out1 = tf.identity(out1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) out_v = sess.run(out1) mm, mv = sess.run([bn.moving_mean, bn.moving_variance]) # Single moving average steps should have happened. correct_mm = (1.0 - bn._decay_rate) * v correct_mv = np.ones([1, 6]) * bn._decay_rate self.assertAllClose(np.reshape(correct_mm, [1, 6]), mm) self.assertAllClose(np.reshape(correct_mv, [1, 6]), mv) self.assertAllClose(np.zeros([7, 6]), out_v, rtol=1e-6, atol=1e-5) out2_, out3_ = sess.run([out2, out3]) # Out2: Tested using local batch stats. # Better numerical precision due to using shifted estimators. self.assertAllClose(np.zeros([7, 6]), out2_) # Out3: Tested using moving average stats. self.assertAllClose( (input_v - mm) / np.sqrt(mv + bn._eps), out3_) @parameterized.named_parameters( ("UseUpdateCollection", tf.GraphKeys.UPDATE_OPS), ("UseDifferentUpdateCollection", "my_update_ops"), ("UseControlDependencies", None), ) def testCheckStatsInGraph(self, update_ops_collection): """The correct normalization is being used for different TF flags.""" v, input_v, inputs = self._get_inputs() bn = snt.BatchNorm(offset=False, scale=False, decay_rate=0.5, update_ops_collection=update_ops_collection) is_training = tf.placeholder(tf.bool) test_local_stats = tf.placeholder(tf.bool) out = bn(inputs, is_training=is_training, test_local_stats=test_local_stats) if update_ops_collection is not None: update_ops = tuple(tf.get_collection(update_ops_collection)) self.assertEqual(len(update_ops), 2) with tf.control_dependencies(update_ops): out = tf.identity(out) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) # Run with `is_training=True`, `test_local_stats=True`. out_v = sess.run(out, feed_dict={is_training: True, test_local_stats: True}) # Moving averages not updated until after calculation so shifted # stats are poor. self.assertAllClose(np.zeros([7, 6]), out_v, rtol=1e-6, atol=1e-5) ops = (bn.moving_mean, bn.moving_variance) mm1, mv1 = sess.run(ops) # Single moving average step should have happened. correct_mm = (1.0 - bn._decay_rate) * v correct_mv = np.ones([1, 6]) * bn._decay_rate self.assertAllClose(np.reshape(correct_mm, [1, 6]), mm1) self.assertAllClose(np.reshape(correct_mv, [1, 6]), mv1) # Run with `is_training=False`, `test_local_stats=True`. # Should have used local batch stats. out_v = sess.run(out, feed_dict={is_training: False, test_local_stats: True}) # Moving averages should not have changed. mm2, mv2 = sess.run(ops) self.assertAllClose(mm1, mm2) self.assertAllClose(mv1, mv2) self.assertAllClose(np.zeros([7, 6]), out_v) # Run with `is_training=False`, `test_local_stats=False`. # Should have used moving average stats. out_v = sess.run(out, feed_dict={is_training: False, test_local_stats: False}) # Moving averages should not have changed. mm3, mv3 = sess.run(ops) self.assertAllClose(mm1, mm3) self.assertAllClose(mv1, mv3) self.assertAllClose( (input_v - mm3) / np.sqrt(mv3 + bn._eps), out_v) def testSharing(self): """Check that the correct number of variables are made when sharing.""" inputs1 = tf.placeholder(tf.float32, shape=[None, 64, 64, 3]) inputs2 = tf.placeholder(tf.float32, shape=[None, 64, 64, 3]) bn = snt.BatchNorm(offset=True, scale=True) bn(inputs1, is_training=True) bn(inputs2, is_training=False) self.assertLen(tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES), 4) # We should have one set of update ops update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) self.assertEqual(len(update_ops), 2) def testUpdatesInsideCond(self): """Demonstrate that updates inside a cond fail. """ _, input_v, inputs = self._get_inputs() bn = snt.BatchNorm(offset=False, scale=False, decay_rate=0.5) condition = tf.placeholder(tf.bool) cond = tf.cond(condition, lambda: bn(inputs, is_training=True), lambda: inputs) init = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init) out_v = sess.run(cond, feed_dict={condition: False}) self.assertAllClose(input_v, out_v) out_v = sess.run(cond, feed_dict={condition: True}) self.assertAllClose(np.zeros([7, 6]), out_v, rtol=1e-4, atol=1e-4) # Variables are accessible outside the tf.cond() mm, mv = sess.run([bn.moving_mean, bn.moving_variance]) self.assertAllClose(np.zeros([1, 6]), mm) self.assertAllClose(np.ones([1, 6]), mv) # Tensors are not accessible outside the tf.cond() with self.assertRaisesRegexp(ValueError, "Operation"): sess.run(tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS))) def testVariableBatchSize(self): """Check the inputs batch_size can change.""" inputs_shape = [10, 10] inputs = tf.placeholder(tf.float32, shape=[None] + inputs_shape) bn = snt.BatchNorm(offset=False, scale=False) # Outputs should be equal to inputs. out = bn(inputs, is_training=False, test_local_stats=False) init = tf.global_variables_initializer() update_ops = tuple(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) with self.test_session() as sess: sess.run(init) for batch_size in [1, 3, 10]: input_data = np.random.rand(batch_size, *inputs_shape) out_v = sess.run(out, feed_dict={inputs: input_data}) self.assertAllClose(input_data / np.sqrt(1.0 + bn._eps), out_v) sess.run(update_ops, feed_dict={inputs: input_data}) def testInvalidInitializerParameters(self): with self.assertRaisesRegexp(KeyError, "Invalid initializer keys.*"): snt.BatchNorm( initializers={"not_gamma": contrib_layers.l1_regularizer(0.5)}) err = "Initializer for 'gamma' is not a callable function" with self.assertRaisesRegexp(TypeError, err): snt.BatchNorm(initializers={"gamma": tf.zeros([1, 2, 3])}) def testInvalidPartitionerParameters(self): with self.assertRaisesRegexp(KeyError, "Invalid partitioner keys.*"): snt.BatchNorm( partitioners={"not_gamma": contrib_layers.l1_regularizer(0.5)}) err = "Partitioner for 'gamma' is not a callable function" with self.assertRaisesRegexp(TypeError, err): snt.BatchNorm(partitioners={"gamma": tf.zeros([1, 2, 3])}) def testInvalidRegularizationParameters(self): with self.assertRaisesRegexp(KeyError, "Invalid regularizer keys.*"): snt.BatchNorm( regularizers={"not_gamma": contrib_layers.l1_regularizer(0.5)}) err = "Regularizer for 'gamma' is not a callable function" with self.assertRaisesRegexp(TypeError, err): snt.BatchNorm(regularizers={"gamma": tf.zeros([1, 2, 3])}) @parameterized.named_parameters( ("BNNoOffsetScale", False, True), ("BNNoOffsetNoScale", False, False), ("BNOffsetScale", True, True), ("BNOffsetNoScale", True, False), ) def testInitializers(self, offset, scale): initializers = { "moving_mean": tf.constant_initializer(2.0), "moving_variance": tf.constant_initializer(3.0), } if scale: initializers["gamma"] = tf.constant_initializer(4.0) if offset: initializers["beta"] = tf.constant_initializer(5.0) inputs_shape = [10, 10] inputs = tf.placeholder(tf.float32, shape=[None] + inputs_shape) bn = snt.BatchNorm(offset=offset, scale=scale, initializers=initializers) self.assertEqual(bn.initializers, initializers) bn(inputs, is_training=True) init = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init) ones_v = np.ones([1, 1, inputs_shape[-1]]) self.assertAllClose(bn.moving_mean.eval(), ones_v * 2.0) self.assertAllClose(bn.moving_variance.eval(), ones_v * 3.0) if scale: self.assertAllClose(bn.gamma.eval(), ones_v * 4.0) if offset: self.assertAllClose(bn.beta.eval(), ones_v * 5.0) @parameterized.named_parameters( ("BNNoOffsetScale", False, True), ("BNNoOffsetNoScale", False, False), ("BNOffsetScale", True, True), ("BNOffsetNoScale", True, False), ) def testRegularizersInRegularizationLosses(self, offset, scale): regularizers = {} if offset: regularizers["beta"] = contrib_layers.l1_regularizer(scale=0.5) if scale: regularizers["gamma"] = contrib_layers.l2_regularizer(scale=0.5) inputs_shape = [10, 10] inputs = tf.placeholder(tf.float32, shape=[None] + inputs_shape) bn = snt.BatchNorm(offset=offset, scale=scale, regularizers=regularizers) self.assertEqual(bn.regularizers, regularizers) bn(inputs, is_training=True) graph_regularizers = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) if not offset and not scale: self.assertFalse(graph_regularizers) if offset and not scale: self.assertRegexpMatches(graph_regularizers[0].name, ".*l1_regularizer.*") if scale and not offset: self.assertRegexpMatches(graph_regularizers[0].name, ".*l2_regularizer.*") if scale and offset: self.assertRegexpMatches(graph_regularizers[0].name, ".*l1_regularizer.*") self.assertRegexpMatches(graph_regularizers[1].name, ".*l2_regularizer.*") @parameterized.named_parameters( ("BNNoOffsetScale", False, True), ("BNNoOffsetNoScale", False, False), ("BNOffsetScale", True, True), ("BNOffsetNoScale", True, False), ) def testPartitioners(self, offset, scale): partitioners = {} if scale: partitioners["gamma"] = tf.fixed_size_partitioner(num_shards=2) if offset: partitioners["beta"] = tf.fixed_size_partitioner(num_shards=2) inputs_shape = [10, 10] inputs = tf.placeholder(tf.float32, shape=[None] + inputs_shape) bn = snt.BatchNorm(offset=offset, scale=scale, partitioners=partitioners) self.assertEqual(bn.partitioners, partitioners) bn(inputs, is_training=True) if scale: self.assertEqual(type(bn.gamma), variables.PartitionedVariable) if offset: self.assertEqual(type(bn.beta), variables.PartitionedVariable) @parameterized.named_parameters( ("IsTrainingBoolVal", True, False, False, True), ("IsTestingBoolVal", False, True, False, True), ("IsTestingBoolValMovingAverage", False, False, False, True), ("IsTrainingScaleBoolVal", True, False, True, True), ("IsTestingScaleBoolVal", False, True, True, True), ("IsTestingScaleBoolValMovingAverage", False, False, True, True), ("IsTrainingTensorVal", True, False, False, False), ("IsTestingTensorVal", False, True, False, False), ("IsTestingTensorValMovingAverage", False, False, False, False), ("IsTrainingScaleTensorVal", True, False, True, False), ("IsTestingScaleTensorVal", False, True, True, False), ("IsTestingScaleTensorValMovingAverage", False, False, True, False)) def testFusedBatchNorm(self, is_training, test_local_stats, scale, is_training_python_bool): input_shape = (32, 9, 9, 8) iterations = 5 x = tf.placeholder(tf.float32, shape=input_shape) bn1 = snt.BatchNorm(scale=scale, update_ops_collection=None) with self.assertRaises(NotImplementedError): # Input does not have 4 dimensions but fused is True. xlinear = tf.placeholder(tf.float32, shape=(2, 3)) snt.BatchNorm(fused=True, scale=scale)(xlinear, is_training=True) with self.assertRaises(ValueError): # The axis is incorrect snt.BatchNorm(axis=(1, 2, 3), fused=True, scale=scale)( x, is_training=True) bn2 = snt.BatchNorm(scale=scale, fused=True, update_ops_collection=None) xx = np.random.random(input_shape) feed_dict = {x: xx} if not is_training_python_bool: is_training_node = tf.placeholder(tf.bool, shape=()) feed_dict.update({is_training_node: is_training}) is_training = is_training_node test_local_stats_node = tf.placeholder(tf.bool, shape=()) feed_dict.update({test_local_stats_node: test_local_stats}) test_local_stats = test_local_stats_node o1 = bn1(x, is_training=is_training, test_local_stats=test_local_stats) o2 = bn2(x, is_training=is_training, test_local_stats=test_local_stats) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) params = [ o1, o2, bn1._moving_mean, bn1._moving_variance, bn2._moving_mean, bn2._moving_variance ] for _ in range(iterations): y1, y2, mean1, var1, mean2, var2 = sess.run(params, feed_dict=feed_dict) self.assertAllClose(y1, y2, atol=1e-4) self.assertAllClose(mean1, mean2, atol=1e-4) self.assertAllClose(var1, var2, atol=1e-4) @parameterized.named_parameters( ("IsTraining", True, False), ("IsTesting", False, True), ("IsTestingMovingAverage", False, False)) def testFusedBatchNormFloat16(self, is_training, test_local_stats): input_shape = (31, 7, 7, 5) iterations = 3 x = tf.placeholder(tf.float16, shape=input_shape) bn1 = snt.BatchNorm(update_ops_collection=None) bn2 = snt.BatchNorm(fused=True, update_ops_collection=None) feed_dict = {x: np.random.random(input_shape)} o1 = bn1(x, is_training=is_training, test_local_stats=test_local_stats) o2 = bn2(x, is_training=is_training, test_local_stats=test_local_stats) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) params = [ o1, o2, bn1._moving_mean, bn1._moving_variance, bn2._moving_mean, bn2._moving_variance ] for _ in range(iterations): y1, y2, mean1, var1, mean2, var2 = sess.run(params, feed_dict=feed_dict) self.assertAllClose(y1, y2, atol=1e-2) self.assertAllClose(mean1, mean2, atol=1e-2) self.assertAllClose(var1, var2, atol=1e-2) if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/modules/batch_norm_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Base class for TensorFlow Sonnet recurrent cores. This file contains the Abstract Base Class for defining Recurrent Cores in TensorFlow. A Recurrent Core is an object that holds the properties of other `snt.Module`s and also satisfies the interface of any RNNCell in tensorflow. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import warnings # Dependency imports import six from six.moves import xrange # pylint: disable=redefined-builtin from sonnet.python.modules import base from sonnet.python.modules import basic import tensorflow.compat.v1 as tf import wrapt from tensorflow.contrib import framework as contrib_framework # pylint: disable=g-direct-tensorflow-import from tensorflow.python.ops import rnn_cell_impl # pylint: enable=g-direct-tensorflow-import nest = contrib_framework.nest def _single_learnable_state(state, state_id=0, learnable=True): """Returns an initial (maybe learnable) state. This function does not create any variable scopes, and it should be called from a Sonnet module. This function also makes sure that all the rows of its `state` argument have the same value. Args: state: initial value of the initial state. It should be a tensor of at least two dimensions, of which the first dimension corresponds to the batch_size dimension. All rows of such tensor should have the same value. state_id: integer that uniquely identifies this state. learnable: boolean that indicates whether the state is learnable. Returns: The initial learnable state `Tensor`. """ unpacked_state = tf.unstack(state) # Assert that all rows have the same values. assert_rows_equal = [tf.assert_equal(s, unpacked_state[0]) for s in unpacked_state] # We wish to have all the graph assertions in the graph's critical path, # so we include them even if the initial state is left unmodified (i.e. when # the state is not learnable). # Note: All these assertions will be run every time that data flows # through the graph. At that point, the control_dependencies context manager # makes sure that such assertions are run, and will raise an exception if any # fails. with tf.control_dependencies(assert_rows_equal): if not learnable: return state else: state_shape = state.get_shape() state_shape.assert_is_fully_defined() state_shape_list = state_shape.as_list() batch_size, trailing_shape = state_shape_list[0], state_shape_list[1:] initial_value = tf.reshape(unpacked_state[0], [1] + trailing_shape) initial_state_variable = tf.get_variable( "initial_state_%d" % state_id, dtype=initial_value.dtype, initializer=initial_value) trailing_size_repeat = [1] * len(trailing_shape) return tf.tile(initial_state_variable, tf.constant([batch_size] + trailing_size_repeat)) def trainable_initial_state(batch_size, state_size, dtype, initializers=None, regularizers=None, name=None): """Creates an initial state consisting of trainable variables. The trainable variables are created with the same shapes as the elements of `state_size` and are tiled to produce an initial state. Args: batch_size: An int, or scalar int32 Tensor representing the batch size. state_size: A `TensorShape` or nested tuple of `TensorShape`s to use for the shape of the trainable variables. dtype: The data type used to create the variables and thus initial state. initializers: An optional container of the same structure as `state_size` containing initializers for the variables. regularizers: An optional container of the same structure as `state_size` containing regularizers for the variables. name: optional string used to prefix the initial state variable names. Returns: A `Tensor` or nested tuple of `Tensor`s with the same size and structure as `state_size`, where each `Tensor` is a tiled trainable `Variable`. Raises: ValueError: if the user passes initializers that are not functions. ValueError: if the user passes regularizers that are not functions. """ flat_state_size = nest.flatten(state_size) if not initializers: flat_initializer = tuple(tf.zeros_initializer() for _ in flat_state_size) else: nest.assert_same_structure(initializers, state_size) flat_initializer = nest.flatten(initializers) if not all([callable(init) for init in flat_initializer]): raise ValueError("Not all the passed initializers are callable objects.") if not regularizers: flat_regularizer = tuple({} for _ in flat_state_size) else: nest.assert_same_structure(regularizers, state_size) flat_regularizer = nest.flatten(regularizers) if not all([callable(regularizer) for regularizer in flat_regularizer]): raise ValueError("Not all the passed regularizers are callable objects.") # Produce names for the variables. In the case of a tuple or nested tuple, # this is just a sequence of numbers, but for a flat `namedtuple`, we use # the field names. NOTE: this could be extended to nested `namedtuple`s, # but for now that's extra complexity that's not used anywhere. name_prefix = name or "initial_state" try: name_suffixes = [ state_size._fields[i] for i in xrange(len(flat_state_size))] except (AttributeError, IndexError): name_suffixes = range(len(flat_state_size)) flat_initial_state = [] for name_suffix, size, init, regularizer in zip( name_suffixes, flat_state_size, flat_initializer, flat_regularizer): shape_with_batch_dim = [1] + tf.TensorShape(size).as_list() variable_name = "{}_{}".format(name_prefix, name_suffix) initial_state_module = basic.TrainableVariable( shape_with_batch_dim, dtype=dtype, initializers={"w": init}, regularizers={"w": regularizer}, name=variable_name) initial_state_variable = initial_state_module() tiled_name = "state_{}_tiled".format(name_suffix) initial_state_variable_dims = initial_state_variable.get_shape().ndims tile_dims = [batch_size] + [1] * (initial_state_variable_dims - 1) flat_initial_state.append( tf.tile(initial_state_variable, tile_dims, name=tiled_name)) return nest.pack_sequence_as(structure=state_size, flat_sequence=flat_initial_state) @six.add_metaclass(abc.ABCMeta) class RNNCore(base.AbstractModule): """Superclass for Recurrent Neural Network Cores. This class defines the basic functionality that every core should implement, mainly the `initial_state` method which will return an example of their initial state. It also inherits from the interface `snt.AbstractModule`. As with any other `snt.Module` any subclass must implement a `_build` method that constructs the graph that corresponds to a core. Such a `_build` method should always have the same interface, which is the following: output, next_state = self._build(input, prev_state) where output, next_state, input, and prev_state are arbitrarily nested tensors. Such structures can be defined according to the following grammar: element = tuple(element*) | list(element*) | tf.Tensor This class is to be used with tensorflow containers such as `rnn` in tensorflow.python.ops.rnn. These containers only accept inputs which are compatible with the `tf.contrib.rnn.RNNCell` API, so that all the RNNCores should expose `state_size` and `output_size` properties. """ __metaclass__ = abc.ABCMeta def _initial_state_scope(self, name): """Defines the name scope of the initial_state ops.""" return name if name else "%s_initial_state" % self.scope_name def initial_state(self, batch_size, dtype=tf.float32, trainable=False, trainable_initializers=None, trainable_regularizers=None, name=None, **unused_kwargs): """Builds the default start state for an RNNCore. Args: batch_size: An int, or scalar int32 Tensor representing the batch size. dtype: The data type to use for the state. trainable: Boolean that indicates whether to learn the initial state. Note that intializers and regularizers will be ignored if `trainable=False`. trainable_initializers: An initializer function or nested structure of functions with same structure as the `state_size` property of the core, to be used as initializers of the initial state variable. trainable_regularizers: Optional regularizer function or nested structure of functions with the same structure as the `state_size` property of the core, to be used as regularizers of the initial state variable. As a default, no regularizers are used. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. name: Optional string used to prefix the initial state variable names, in the case of a trainable initial state. If not provided, defaults to the name of the module. Returns: A tensor or nested tuple of tensors with same structure and shape as the `state_size` property of the core. Raises: ValueError: if the user passes initializers that are not functions. ValueError: if the user passes regularizers that are not functions. """ with tf.name_scope(self._initial_state_scope(name)): if not trainable: return self.zero_state(batch_size, dtype) else: return trainable_initial_state( batch_size, self.state_size, dtype, initializers=trainable_initializers, regularizers=trainable_regularizers, name=self._initial_state_scope(name)) @property def state_size(self): """size(s) of state(s) used by this cell. It can be represented by an Integer, a TensorShape or a tuple of Integers or TensorShapes. """ raise NotImplementedError("Abstract method") @property def output_size(self): """Integer or TensorShape: size of outputs produced by this cell.""" raise NotImplementedError("Abstract method") def zero_state(self, batch_size, dtype): """Return zero-filled state tensor(s). Args: batch_size: int, float, or unit Tensor representing the batch size. dtype: the data type to use for the state. Returns: If `state_size` is an int or TensorShape, then the return value is a `N-D` tensor of shape `[batch_size x state_size]` filled with zeros. If `state_size` is a nested list or tuple, then the return value is a nested list or tuple (of the same structure) of `2-D` tensors with the shapes `[batch_size x s]` for each s in `state_size`. """ # Keep scope for backwards compatibility. with tf.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]): return rnn_cell_impl._zero_state_tensors( # pylint: disable=protected-access self.state_size, batch_size, dtype) class TrainableInitialState(base.AbstractModule): """Helper Module that creates a learnable initial state for an RNNCore. This class receives an example (possibly nested) initial state of an RNNCore, and returns a state that has the same shape, structure, and values, but is trainable. Additionally, the user may specify a boolean mask that indicates which parts of the initial state should be trainable. This allows users to train an unrolled RNNCore with a learnable initial state in the following way: core = ... # Any RNNCore module object. initial_state = core.initial_state(batch_size, dtype) trainable_initial_state = snt.TrainableInitialState(initial_state)() output, final_state = tf.nn.dynamic_rnn( core, input_sequence, initial_state=trainable_initial_state) """ def __init__(self, initial_state, mask=None, name="trainable_initial_state"): """Constructs the Module that introduces a trainable state in the graph. It receives an initial state that will be used as the initial values for the trainable variables that the module contains, and optionally a mask that indicates the parts of the initial state that should be learnable. Args: initial_state: tensor or arbitrarily nested iterables of tensors. mask: optional boolean mask. It should have the same nested structure as the given initial_state. name: module name. Raises: TypeError: if mask is not a list of booleans or None. """ super(TrainableInitialState, self).__init__(name=name) # Since python 2.7, DeprecationWarning is ignored by default. # Turn on the warning: warnings.simplefilter("always", DeprecationWarning) warnings.warn("Use the trainable flag in initial_state instead.", DeprecationWarning, stacklevel=2) if mask is not None: flat_mask = nest.flatten(mask) if not all([isinstance(m, bool) for m in flat_mask]): raise TypeError("Mask should be None or a list of boolean values.") nest.assert_same_structure(initial_state, mask) self._mask = mask self._initial_state = initial_state def _build(self): """Connects the module to the graph. Returns: The learnable state, which has the same type, structure and shape as the `initial_state` passed to the constructor. """ flat_initial_state = nest.flatten(self._initial_state) if self._mask is not None: flat_mask = nest.flatten(self._mask) flat_learnable_state = [ _single_learnable_state(state, state_id=i, learnable=mask) for i, (state, mask) in enumerate(zip(flat_initial_state, flat_mask))] else: flat_learnable_state = [_single_learnable_state(state, state_id=i) for i, state in enumerate(flat_initial_state)] return nest.pack_sequence_as(structure=self._initial_state, flat_sequence=flat_learnable_state) class RNNCellWrapper(RNNCore): """RNN core that delegates to a `tf.contrib.rnn.RNNCell`.""" def __init__(self, cell_ctor, *args, **kwargs): """Constructs the cell, within this module's variable scope. Args: cell_ctor: Callable that instantiates a `tf.contrib.rnn.RNNCell`. *args: Arguments to pass to `cell_ctor`. **kwargs: Keyword arguments to pass to `cell_ctor`. If `name` is provided, it is passed to `RNNCore.__init__` as well. If `custom_getter` is provided, it is passed to `RNNCore.__init__` but not to `cell_ctor`. """ super(RNNCellWrapper, self).__init__( name=kwargs.get("name"), custom_getter=kwargs.pop("custom_getter", None)) with self._enter_variable_scope(): self._cell = cell_ctor(*args, **kwargs) def _build(self, inputs, prev_state): return self._cell(inputs, prev_state) @property def output_size(self): return self._cell.output_size @property def state_size(self): return self._cell.state_size def with_doc(fn_with_doc_to_copy): """Returns a decorator to copy documentation from the given function. Docstring is copied, including *args and **kwargs documentation. Args: fn_with_doc_to_copy: Function whose docstring, including *args and **kwargs documentation, is to be copied. Returns: Decorated version of `wrapper_init` with documentation copied from `fn_with_doc_to_copy`. """ def decorator(wrapper_init): # Wrap the target class's constructor (to assume its docstring), # but invoke the wrapper class's constructor. @wrapt.decorator def wrapping_fn(unused_wrapped, instance, args, kwargs): wrapper_init(instance, *args, **kwargs) return wrapping_fn(fn_with_doc_to_copy) # pylint: disable=no-value-for-parameter return decorator
sonnet-1
sonnet/python/modules/rnn_core.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Utility functions for dealing with Sonnet Modules.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import contextlib import functools import importlib import inspect import re import weakref # Dependency imports from absl import logging import six import tensorflow.compat.v1 as tf import wrapt from tensorflow.python.ops import variable_scope as variable_scope_ops # pylint: disable=g-direct-tensorflow-import from tensorflow.python.util import tf_inspect # pylint: disable=g-direct-tensorflow-import def get_variable_scope_name(value): """Returns the name of the variable scope indicated by the given value. Args: value: String, variable scope, or object with `variable_scope` attribute (e.g., Sonnet module). Returns: The name (a string) of the corresponding variable scope. Raises: ValueError: If `value` does not identify a variable scope. """ # If the object has a "variable_scope" property, use it. value = getattr(value, "variable_scope", value) if isinstance(value, tf.VariableScope): return value.name elif isinstance(value, six.string_types): return value else: raise ValueError("Not a variable scope: {}".format(value)) def get_variables_in_scope( scope, collection=tf.GraphKeys.TRAINABLE_VARIABLES): """Returns a tuple `tf.Variable`s in a scope for a given collection. Args: scope: `tf.VariableScope` or string to retrieve variables from. collection: Collection to restrict query to. By default this is `tf.GraphKeys.TRAINABLE_VARIABLES`, which doesn't include non-trainable variables such as moving averages. Returns: A tuple of `tf.Variable` objects. """ scope_name = get_variable_scope_name(scope) if scope_name: # Escape the name in case it contains any "." characters. Add a closing # slash so we will not search any scopes that have this scope name as a # prefix. scope_name = re.escape(scope_name) + "/" return tuple(tf.get_collection(collection, scope_name)) def get_variables_in_module( module, collection=tf.GraphKeys.TRAINABLE_VARIABLES): """Returns tuple of `tf.Variable`s declared inside an `snt.Module`. Note that this operates by searching the variable scope a module contains, and so does not know about any modules which were constructed elsewhere but used inside this module. Args: module: `snt.Module` instance to query the scope of. collection: Collection to restrict query to. By default this is `tf.GraphKeys.TRAINABLE_VARIABLES`, which doesn't include non-trainable variables such as moving averages. Returns: A tuple of `tf.Variable` objects. Raises: NotConnectedError: If the module is not connected to the Graph. """ return module.get_variables(collection=collection) def _check_nested_callables(dictionary, object_name): """Checks if all items in the dictionary and in subdictionaries are callables. Args: dictionary: Dictionary of callables or other dictionaries with callables. object_name: The name of the object that is expected in the dictionary. E.g. 'Initializer', 'Partitioner' or 'Regularizer'. The first letter should be capitalised as this will be the first word in the error message. Raises: TypeError: If the dictionary contains something that is not either a dictionary or a callable. """ for key, entry in six.iteritems(dictionary): if hasattr(entry, "items"): _check_nested_callables(entry, object_name) elif not callable(entry): raise TypeError( "{} for '{}' is not a callable function or dictionary" .format(object_name, key)) def _assert_is_dictlike(maybe_dictlike, valid_keys): """Raises a TypeError iff `maybe_dictlike` is not a dictlike object.""" # This covers a common mistake when people use incorrect dictionary nesting # for initializers / partitioners etc. The previous error message was quite # opaque, this should be much clearer. if not hasattr(maybe_dictlike, "__getitem__"): raise TypeError( "Expected a dict-like object with possible keys %s, received %s" % (str(valid_keys), str(maybe_dictlike))) def check_initializers(initializers, keys): """Checks the given initializers. This checks that `initializers` is a dictionary that only contains keys in `keys`, and furthermore the entries in `initializers` are functions or further dictionaries (the latter used, for example, in passing initializers to modules inside modules) that must satisfy the same constraints. Args: initializers: Dictionary of initializers (allowing nested dictionaries) or None. keys: Iterable of valid keys for `initializers`. Returns: Copy of checked dictionary of initializers. If `initializers=None`, an empty dictionary will be returned. Raises: KeyError: If an initializer is provided for a key not in `keys`. TypeError: If a provided initializer is not a callable function, or `initializers` is not a Mapping. """ if initializers is None: return {} _assert_is_dictlike(initializers, valid_keys=keys) keys = set(keys) if not set(initializers) <= keys: extra_keys = set(initializers) - keys raise KeyError( "Invalid initializer keys {}, initializers can only " "be provided for {}".format( ", ".join("'{}'".format(key) for key in extra_keys), ", ".join("'{}'".format(key) for key in keys))) _check_nested_callables(initializers, "Initializer") return dict(initializers) def check_partitioners(partitioners, keys): """Checks the given partitioners. This checks that `partitioners` is a dictionary that only contains keys in `keys`, and furthermore the entries in `partitioners` are functions or further dictionaries (the latter used, for example, in passing partitioners to modules inside modules) that must satisfy the same constraints. Args: partitioners: Dictionary of partitioners (allowing nested dictionaries) or None. keys: Iterable of valid keys for `partitioners`. Returns: Checked dictionary of partitioners. If `partitioners=None`, an empty dictionary will be returned. Raises: KeyError: If an partitioner is provided for a key not in `keys`. TypeError: If a provided partitioner is not a callable function, or `partitioners` is not a Mapping. """ if partitioners is None: return {} _assert_is_dictlike(partitioners, valid_keys=keys) keys = set(keys) if not set(partitioners) <= keys: extra_keys = set(partitioners) - keys raise KeyError( "Invalid partitioner keys {}, partitioners can only " "be provided for {}".format( ", ".join("'{}'".format(key) for key in extra_keys), ", ".join("'{}'".format(key) for key in keys))) _check_nested_callables(partitioners, "Partitioner") return partitioners def check_regularizers(regularizers, keys): """Checks the given regularizers. This checks that `regularizers` is a dictionary that only contains keys in `keys`, and furthermore the entries in `regularizers` are functions or further dictionaries (the latter used, for example, in passing regularizers to modules inside modules) that must satisfy the same constraints. Args: regularizers: Dictionary of regularizers (allowing nested dictionaries) or None. keys: Iterable of valid keys for `regularizers`. Returns: Copy of checked dictionary of regularizers. If `regularizers=None`, an empty dictionary will be returned. Raises: KeyError: If an regularizers is provided for a key not in `keys`. TypeError: If a provided regularizer is not a callable function, or `regularizers` is not a Mapping. """ if regularizers is None: return {} _assert_is_dictlike(regularizers, valid_keys=keys) keys = set(keys) if not set(regularizers) <= keys: extra_keys = set(regularizers) - keys raise KeyError( "Invalid regularizer keys {}, regularizers can only " "be provided for {}".format( ", ".join("'{}'".format(key) for key in extra_keys), ", ".join("'{}'".format(key) for key in keys))) _check_nested_callables(regularizers, "Regularizer") return dict(regularizers) def _is_scope_prefix(scope_name, prefix_name): """Checks that `prefix_name` is a proper scope prefix of `scope_name`.""" if not prefix_name: return True if not scope_name.endswith("/"): scope_name += "/" if not prefix_name.endswith("/"): prefix_name += "/" return scope_name.startswith(prefix_name) # pylint: disable=protected-access def _get_sliced_variables(var_list): """Separates the sliced (partitioned) and unsliced variables in var_list. Args: var_list: a list of variables. Returns: A list of unsliced variables in var_list, and a dict mapping names to parts for the sliced variables in var_list. """ unsliced_variables = [] sliced_variables = collections.defaultdict(lambda: []) for var in var_list: if var._save_slice_info: sliced_variables[var._save_slice_info.full_name].append(var) else: unsliced_variables.append(var) return unsliced_variables, sliced_variables # pylint: enable=protected-access def custom_getter_router(custom_getter_map, name_fn): """Creates a custom getter than matches requests to dict of custom getters. Custom getters are callables which implement the [custom getter API] (https://www.tensorflow.org/versions/r1.0/api_docs/python/tf/get_variable). The returned custom getter dispatches calls based on pattern matching the name of the requested variable to the keys of custom_getter_map. For example, { ".*/w": snt.custom_getters.stop_gradient, } will match all variables named with the suffix "/w". The `name_fn` is provided to allow processing of the name, such as stripping off a scope prefix before matching. Args: custom_getter_map: Mapping of regular expressions to custom getter functions. name_fn: Callable to map variable name through before matching to regular expressions. This might, for example, strip off a scope prefix. Returns: A custom getter. Raises: TypeError: If an entry in `custom_getter_map` is not a callable function. """ for custom_getter in custom_getter_map.values(): if not callable(custom_getter): raise TypeError("Given custom_getter is not callable.") def _custom_getter(getter, name, *args, **kwargs): """A custom getter that routes based on pattern matching the variable name. Args: getter: The true getter to call. name: The fully qualified variable name, i.e. including all scopes. *args: Arguments, in the same format as tf.get_variable. **kwargs: Keyword arguments, in the same format as tf.get_variable. Returns: The return value of the appropriate custom getter. If there are no matches, it returns the return value of `getter`. Raises: KeyError: If more than one pattern matches the variable name. """ bare_name = name_fn(name) matches = [ (custom_getter, pattern) for pattern, custom_getter in custom_getter_map.items() if re.match(pattern, bare_name) is not None] num_matches = len(matches) if num_matches == 0: return getter(name, *args, **kwargs) elif num_matches == 1: custom_getter, pattern = matches[0] return custom_getter(getter, name, *args, **kwargs) else: raise KeyError("More than one custom_getter matched {} ({}): {}".format( name, bare_name, [pattern for _, pattern in matches])) return _custom_getter def get_normalized_variable_map( scope_or_module, collection=tf.GraphKeys.GLOBAL_VARIABLES, context=None, group_sliced_variables=True): """Builds map of `tf.Variable`s in scope or module with normalized names. The names of the variables are normalized to remove the scope prefix. Args: scope_or_module: Scope or module to build map from. collection: Collection to restrict query to. By default this is `tf.GraphKeys.GLOBAL_VARIABLES`, which includes non-trainable variables such as moving averages. context: Scope or module, identical to or parent of `scope`. If given, this will be used as the stripped prefix. By default `None`, which means `context=scope`. group_sliced_variables: Boolean, if set to True, sliced variables are grouped together in the returned map; if set to False, each partition of a sliced variable is a separate (key, value) pair. Returns: Dictionary mapping normalized variable name to `tf.Variable`, or a list of `tf.Variables` if the variable is a sliced (partitioned) variable. Raises: ValueError: If `context` is given but is not a proper prefix of `scope`. """ scope_name = get_variable_scope_name(scope_or_module) if context is None: context = scope_or_module prefix = get_variable_scope_name(context) prefix_length = len(prefix) + 1 if prefix else 0 if not _is_scope_prefix(scope_name, prefix): raise ValueError("Scope '{}' is not prefixed by '{}'.".format( scope_name, prefix)) variables = get_variables_in_scope(scope_name, collection) if not group_sliced_variables: single_vars = variables grouped_vars = dict() else: single_vars, grouped_vars = _get_sliced_variables(variables) var_map = {var.op.name[prefix_length:]: var for var in single_vars} for full_name, var_group in grouped_vars.items(): name = full_name[prefix_length:] if name in var_map: raise ValueError("Mixing slices and non-slices with the same name: " + str(name)) var_map[name] = var_group return var_map def get_saver(scope, collections=(tf.GraphKeys.GLOBAL_VARIABLES,), # pylint: disable=redefined-outer-name context=None, **kwargs): """Builds a `tf.train.Saver` for the scope or module, with normalized names. The names of the variables are normalized to remove the scope prefix. This allows the same variables to be restored into another similar scope or module using a complementary `tf.train.Saver` object. Args: scope: Scope or module. Variables within will be saved or restored. collections: Sequence of collections of variables to restrict `tf.train.Saver` to. By default this is `tf.GraphKeys.GLOBAL_VARIABLES` which includes moving averages variables as well as trainable variables. context: Scope or module, identical to or parent of `scope`. If given, this will be used as the stripped prefix. **kwargs: Extra keyword arguments to pass to tf.train.Saver. Returns: A `tf.train.Saver` object for Variables in the scope or module. """ variable_map = {} for collection in collections: variable_map.update(get_normalized_variable_map(scope, collection, context)) return tf.train.Saver(var_list=variable_map, **kwargs) def has_variable_scope(obj): """Determines whether the given object has a variable scope.""" return "variable_scope" in dir(obj) def _format_table(rows, join_lines=True): format_str = "" for col in range(len(rows[0])): column_width = max(len(row[col]) for row in rows) format_str += "{:<" + str(column_width) + "} " output_rows = (format_str.format(*row).strip() for row in rows) return "\n".join(output_rows) if join_lines else output_rows def variable_map_items(variable_map): """Yields an iterator over (string, variable) pairs in the variable map. In general, variable maps map variable names to either a `tf.Variable`, or list of `tf.Variable`s (in case of sliced variables). Args: variable_map: dict, variable map over which to iterate. Yields: (string, tf.Variable) pairs. """ for key, var_or_vars in six.iteritems(variable_map): if isinstance(var_or_vars, (list, tuple)): for variable in var_or_vars: yield key, variable else: yield key, var_or_vars def _get_vars_to_collections(variables): """Returns a dict mapping variables to the collections they appear in.""" var_to_collections = collections.defaultdict(lambda: []) if isinstance(variables, dict): variables = list(v for _, v in variable_map_items(variables)) for graph in set(v.graph for v in variables): for collection_name in list(graph.collections): entries = set(entry for entry in graph.get_collection(collection_name) if isinstance(entry, tf.Variable)) # For legacy reasons, # tf.GraphKeys.GLOBAL_VARIABLES == "variables". # Correcting for this here, to avoid confusion. if collection_name == tf.GraphKeys.GLOBAL_VARIABLES: collection_name = "global_variables" for var in entries.intersection(variables): var_to_collections[var].append(collection_name) return var_to_collections def _format_device(var): """Returns the device with an annotation specifying `ResourceVariable`. "legacy" means a normal tf.Variable while "resource" means a ResourceVariable. For example: `(legacy)` `(resource)` `/job:learner/task:0/device:CPU:* (legacy)` `/job:learner/task:0/device:CPU:* (resource)` Args: var: The Tensorflow Variable to print. """ if var.dtype.name.endswith("_ref"): resource_var_annotation = "(legacy)" else: resource_var_annotation = "(resource)" if var.device: return "{} {}".format(var.device, resource_var_annotation) else: return resource_var_annotation def format_variables(variables, join_lines=True): """Takes a collection of variables and formats it as a table.""" rows = [("Variable", "Shape", "Type", "Collections", "Device")] var_to_collections = _get_vars_to_collections(variables) for var in sorted(variables, key=lambda var: var.op.name): if var.get_shape().is_fully_defined(): shape = "x".join(str(dim) for dim in var.get_shape().as_list()) else: shape = "undefined" dtype = repr(var.dtype.base_dtype).replace("tf.", "") coll = ", ".join(sorted(var_to_collections[var])) rows.append((var.op.name, shape, dtype, coll, _format_device(var))) return _format_table(rows, join_lines) def format_variable_map(variable_map, join_lines=True): """Takes a key-to-variable map and formats it as a table.""" rows = [("Key", "Variable", "Shape", "Type", "Collections", "Device")] var_to_collections = _get_vars_to_collections(variable_map) sort_key = lambda item: (item[0], item[1].name) for key, var in sorted(variable_map_items(variable_map), key=sort_key): shape = "x".join(str(dim) for dim in var.get_shape().as_list()) dtype = repr(var.dtype.base_dtype).replace("tf.", "") coll = ", ".join(sorted(var_to_collections[var])) rows.append((key, var.op.name, shape, dtype, coll, _format_device(var))) return _format_table(rows, join_lines) def log_variables(variables=None): """Logs variable information. This function logs the name, shape, type, collections, and device for either all variables or a given iterable of variables. In the "Device" columns, the nature of the variable (legacy or resource (for ResourceVariables)) is also specified in parenthesis. Args: variables: iterable of variables; if not provided, then all variables (in the default graph) are logged. """ if variables is None: variables = tf.global_variables() + tf.local_variables() for row in format_variables(variables, join_lines=False): logging.info(row) def _num_bytes_to_human_readable(num_bytes): """Returns human readable string of how much memory `num_bytes` fills.""" if num_bytes < (2 ** 10): return "%d B" % num_bytes elif num_bytes < (2 ** 20): return "%.3f KB" % (float(num_bytes) / (2 ** 10)) elif num_bytes < (2 ** 30): return "%.3f MB" % (float(num_bytes) / (2 ** 20)) else: return "%.3f GB" % (float(num_bytes) / (2 ** 30)) def summarize_variables(variables=None): """Logs a summary of variable information. This function groups Variables by dtype and prints out the number of Variables and the total number of scalar values for each datatype, as well as the total memory consumed. For Variables of type tf.string, the memory usage cannot be accurately calculated from the Graph as the memory requirements change based on what strings are actually stored, which can only be determined inside a session. In this case, the amount of memory used to stored the pointers to the strings is logged, along with a warning. Args: variables: iterable of variables; if not provided, then all variables (in the default graph) are summarized. """ variable_counts = count_variables_by_type(variables=variables) total_num_scalars = 0 total_num_bytes = 0 # Sort by string representation of type name, so output is deterministic. for dtype in sorted(variable_counts, key=lambda dtype: "%r" % dtype): var_info_for_type = variable_counts[dtype] num_bytes = var_info_for_type["num_scalars"] * dtype.size total_num_scalars += var_info_for_type["num_scalars"] total_num_bytes += num_bytes logging.info("%r: %d variables comprising %d scalars, %s", dtype, var_info_for_type["num_variables"], var_info_for_type["num_scalars"], _num_bytes_to_human_readable(num_bytes)) def count_variables_by_type(variables=None): """Returns a dict mapping dtypes to number of variables and scalars. Args: variables: iterable of `tf.Variable`s, or None. If None is passed, then all global and local variables in the current graph are used. Returns: A dict mapping tf.dtype keys to a dict containing the keys 'num_scalars' and 'num_variables'. """ if variables is None: variables = tf.global_variables() + tf.local_variables() unique_types = set(v.dtype.base_dtype for v in variables) results_dict = {} for dtype in unique_types: if dtype == tf.string: logging.warning( "NB: string Variables present. The memory usage for these Variables " "will not be accurately computed as it depends on the exact strings " "stored in a particular session.") vars_of_type = [v for v in variables if v.dtype.base_dtype == dtype] num_scalars = sum(v.shape.num_elements() for v in vars_of_type) results_dict[dtype] = { "num_variables": len(vars_of_type), "num_scalars": num_scalars } return results_dict def reuse_variables(method): """Wraps an arbitrary method so it does variable sharing. This decorator creates variables the first time it calls `method`, and reuses them for subsequent calls. The object that calls `method` provides a `tf.VariableScope`, either as a `variable_scope` attribute or as the return value of an `_enter_variable_scope()` method. The first time the wrapped method is invoked, it enters the caller's `tf.VariableScope` with `reuse=False`. On all subsequent calls it enters the same variable scope with `reuse=True`. Variables are created in the context of the `tf.VariableScope` provided by the caller object. Ops are created with an additional `tf.name_scope()`, which adds a scope for the wrapped method name. For example: ```python class MyClass(object): def __init__(self, name): with tf.variable_scope(None, default_name=name) as variable_scope: self.variable_scope = variable_scope @snt.reuse_variables def add_x(self, tensor): x = tf.get_variable("x", shape=tensor.get_shape()) return tensor + x module = MyClass("my_module_name") input_tensor = tf.zeros(shape=(5,)) # This creates the variable "my_module_name/x" # and op "my_module_name/add_x/add" output = module.add_x(input_tensor) ``` For performance when executing eagerly it may be desirable to additionally annotate these methods using `defun`, such that they are encapsulated as graph functions. This is not recommended if your method returns a variable since the output of `defun` would be an op that returned the variable's value when evaluated (rather than the variable instance). ```python class FooModule(snt.AbstractModule): def _build(self, inputs): return complex_math(inputs) @tfe.defun @snt.reuse_variables def more_complex_stuff(self, inputs): return more_complex_math(inputs) ``` Args: method: The method to wrap. Returns: The wrapped method. """ initialized_variable_scopes_eager = set() initialized_variable_scopes_graph = weakref.WeakKeyDictionary() # Ensure that the argument passed in is really a method by checking that the # first positional argument to it is "self". arg_spec = tf_inspect.getargspec(method) is_method = arg_spec.args and arg_spec.args[0] == "self" if not is_method: raise TypeError("reuse_variables can only be used with methods.") @wrapt.decorator def eager_test(method, obj, args, kwargs): """Validates runtime state in eager mode.""" # If @reuse_variables is combined with @property, obj is passed in args # and method is still unbound at this stage. if obj is None: obj = args[0] if tf.executing_eagerly() and not hasattr(obj, "_template"): raise ValueError( "reuse_variables is not supported in eager mode except in Sonnet " "modules.") return method(*args, **kwargs) @wrapt.decorator def call_method(method, obj, args, kwargs): """Calls `method` with a variable scope whose reuse flag is set correctly. The first time the wrapper is called it creates a `(tf.Graph, tf.VariableScope)` key and checks it for membership in `initialized_variable_scopes`. The check is `False` if and only if this is the first time the wrapper has been called with the key, otherwise the check is `True`. The result of this check is used as the `reuse` flag for entering the provided variable scope before calling `method`. Here are two examples of how to use the reuse_variables decorator. 1. Decorate an arbitrary instance method with a `variable_scope` attribute: ```python class Reusable(object): def __init__(self, name): with tf.variable_scope(None, default_name=name) as vs: self.variable_scope = vs @snt.reuse_variables def add_a(self, input_tensor): a = tf.get_variable("a", shape=input_tensor.get_shape()) return a + input_tensor obj = Reusable("reusable") x = tf.constant(5.0) out1 = obj.add_a(x) out2 = obj.add_a(x) # out1 == out2 ``` 2. Decorating a snt.AbstractModule instance method: ```python class ReusableModule(snt.AbstractModule): @snt.reuse_variables def add_a(self, input_tensor): a = tf.get_variable("a", shape=input_tensor.get_shape()) return a + input_tensor # We don't need @snt.reuse_variables here because build is wrapped by # `tf.make_template` inside `snt.AbstractModule`. def _build(self, input_tensor): b = tf.get_variable("b", shape=input_tensor.get_shape()) return b + self.add_a(input_tensor) obj = Reusable("reusable") x = tf.constant(5.0) out1 = obj(x) out2 = obj(x) # out1 == out2 ``` Args: method: The method to wrap. obj: The object instance passed to the wrapped method. args: The positional arguments (Tensors) passed to the wrapped method. kwargs: The keyword arguments passed to the wrapped method. Returns: Output of the wrapped method. Raises: ValueError: If no variable scope is provided or if `method` is a method and a variable_scope keyword argument is also provided. """ # If @reuse_variables is combined with @property, obj is passed in args # and method is still unbound at this stage. if obj is None: obj = args[0] def default_context_manager(reuse=None): variable_scope = obj.variable_scope return tf.variable_scope(variable_scope, reuse=reuse) variable_scope_context_manager = getattr(obj, "_enter_variable_scope", default_context_manager) with tf.init_scope(): # We need `init_scope` incase we're running inside a defun. In that case # what we want is information about where the function will be called not # where the function is being built. graph = tf.get_default_graph() will_call_in_eager_context = tf.executing_eagerly() if will_call_in_eager_context: initialized_variable_scopes = initialized_variable_scopes_eager else: if graph not in initialized_variable_scopes_graph: initialized_variable_scopes_graph[graph] = set() initialized_variable_scopes = initialized_variable_scopes_graph[graph] # Temporarily enter the variable scope to capture it with variable_scope_context_manager() as tmp_variable_scope: variable_scope = tmp_variable_scope reuse = variable_scope.name in initialized_variable_scopes # Enter the pure variable scope with reuse correctly set with variable_scope_ops._pure_variable_scope( # pylint:disable=protected-access variable_scope, reuse=reuse) as pure_variable_scope: current_name_scope = tf.get_default_graph().get_name_scope() # Force tf.name_scope to treat current_name_scope as an "absolute" scope # so we can re-enter it. if current_name_scope and current_name_scope[-1] != "/": current_name_scope += "/" with tf.name_scope(current_name_scope): module_name = pure_variable_scope.name method_name = to_snake_case(method.__name__) method_name_scope = "{}/{}".format(module_name, method_name) with tf.name_scope(method_name_scope) as scope: if hasattr(obj, "_capture_variables"): with obj._capture_variables(): # pylint: disable=protected-access out_ops = method(*args, **kwargs) else: out_ops = method(*args, **kwargs) initialized_variable_scopes.add(pure_variable_scope.name) try: # If `obj` is a Sonnet module, let it know it's been connected # to the TF graph. obj._is_connected = True # pylint: disable=protected-access if not tf.executing_eagerly(): obj._add_connected_subgraph( # pylint: disable=protected-access method, out_ops, scope, args, kwargs) except AttributeError: pass return out_ops return eager_test(call_method(method)) # pylint: disable=no-value-for-parameter def name_for_callable(func): """Returns a module name for a callable or `None` if no name can be found.""" if isinstance(func, functools.partial): return name_for_callable(func.func) try: name = func.__name__ except AttributeError: return None if name == "<lambda>": return None else: return to_snake_case(name) def to_snake_case(camel_case): """Returns a CamelCase string as a snake_case string.""" if not re.match(r"^[A-Za-z_]\w*$", camel_case): raise ValueError( "Input string %s is not a valid Python identifier." % camel_case) # Add underscore at word start and ends. underscored = re.sub(r"([A-Z][a-z])", r"_\1", camel_case) underscored = re.sub(r"([a-z])([A-Z])", r"\1_\2", underscored) # Add underscore before alphanumeric chunks. underscored = re.sub(r"([a-z])([0-9][^_]*)", r"\1_\2", underscored) # Remove any underscores at start or end of name and convert to lowercase. return underscored.strip("_").lower() @tf.custom_gradient def convert_gradient_to_tensor(x): """Identity operation whose gradient is converted to a `Tensor`. Currently, the gradient to `tf.concat` is particularly expensive to compute if dy is an `IndexedSlices` (a lack of GPU implementation forces the gradient operation onto CPU). This situation occurs when the output of the `tf.concat` is eventually passed to `tf.gather`. It is sometimes faster to convert the gradient to a `Tensor`, so as to get the cheaper gradient for `tf.concat`. To do this, replace `tf.concat(x)` with `convert_gradient_to_tensor(tf.concat(x))`. Args: x: A `Tensor`. Returns: The input `Tensor`. """ return x, tf.convert_to_tensor def sort_by_name(variables): """Returns a tuple of `variables` sorted ascending by name.""" return tuple(sorted(variables, key=lambda v: v.name)) @contextlib.contextmanager def notify_about_new_variables(callback): """Calls `callback(var)` for all newly created variables. Callback should not modify the variable passed in. Use cases that require variables to be modified should use `variable_creator_scope` directly and sit within the variable creator stack. >>> variables = [] >>> with notify_about_new_variables(variables.append): ... v = tf.Variable(1.0, name='v') ... w = tf.get_variable('w', []) >>> assert variables == [v, w] Args: callback: a callable taking a single argument which is a tf.Variable. Yields: `None` - used for contextmanager API. """ def _tracking_creator(getter, **kwargs): v = getter(**kwargs) callback(v) return v with tf.variable_creator_scope(_tracking_creator): yield def deprecation_warning(deprecation_message): """Log a warning message the user is using deprecated functionality.""" logging.log_first_n(logging.WARN, deprecation_message, 1) def _recursive_getattr(module, path): """Recursively gets attributes inside `module` as specified by `path`.""" if "." not in path: return getattr(module, path) else: first, rest = path.split(".", 1) return _recursive_getattr(getattr(module, first), rest) def parse_string_to_constructor(ctor_string): """Returns a callable which corresponds to the constructor string. Various modules (eg, ConvNet2D) take constructor arguments which are callables, indicating a submodule to build. These can be passed as actual constructors, eg `snt.LayerNorm`, however that makes the config for that module not trivially serializable. This function tries to map a string representation to the underlying callable, allowing configs to remain serializable where necessary. Args: ctor_string: string representing some module in Sonnet. If the string is provided with no dots, we assume it is a member of Sonnet available at top level, i.e. "LayerNorm" maps to `snt.LayerNorm`. Raises: ValueError: if no matching constructor can be found. Returns: Callable constructor which corresponds to `ctor_string`. """ orig_ctor_string = ctor_string if "." not in ctor_string: # No module specified - assume part of Sonnet ctor_string = "sonnet." + ctor_string if ctor_string.startswith("snt."): # Replace common short name with full name ctor_string = "sonnet." + ctor_string[len("snt."):] # Cannot just use importlib directly because of the way we alias subpackages, # i.e. 'sonnet.nets.ConvNet2D' does not work because 'sonnet.nets' is actually # stored as 'sonnet.python.modules.nets'. To support these aliases we use # importlib only for the top level package, and then recursive getattr. package_name, rest = ctor_string.split(".", 1) package = importlib.import_module(package_name) try: return _recursive_getattr(package, rest) except AttributeError: raise ValueError("could not find `{}`, after normalizing to `{}`".format( orig_ctor_string, ctor_string)) # Pseudo-enum for the return valeus from supports_kwargs. # The kwargs in quesion are definitely accepted by the module / function. SUPPORTED = "supported" # The kwargs in question are definitely not suppored by the module / function. NOT_SUPPORTED = "not_supported" # The kwargs in question may be supported by the module / function, but we # cannot say for sure because the function takes **kwargs. MAYBE_SUPPORTED = "maybe_supported" def supports_kwargs(module_or_fn, kwargs_list): """Determines whether the provided callable supports all the kwargs. This is useful when you have a module that might or might not support a kwarg such as `is_training`. Rather than calling the module and catching the error, risking the potential modification of underlying state, this function introspects the module to see what kwargs are actually supported, using the python `inspect` module. Note that many TF functions do not export a valid argspec object, rather they have a generic *args, **kwargs signature due to various layers of wrapping (deprecation decorators, etc). In those circumstances we return MAYBE_SUPPORTED, and users will have to use another method to tell whether the kwargs are supported (e.g. by just calling the function). Args: module_or_fn: some callable, generally an object or a method of some object. If an object is provided, we check wither `module_or_fn.__call__` supports the provided kwargs, which for a Sonnet module will automatically check the signature of _build. If `module_or_fn` is a function/method, then we check its signature directly, so non-Sonnet functions can be used. kwargs_list: string or iterable of strings of keyword arg names to test for. If an empty iterable is provided this function will always return True. Raises: ValueError: if a non-string is provided in `kwargs_list`. Returns: a string, one of 'supported', 'not_supported' or 'maybe_supported'. """ if isinstance(kwargs_list, six.string_types): kwargs_list = [kwargs_list] # If it's not a function or method, then assume it's a module, so introspect # the __call__ method. wrapt ensures that for Sonnet modules the _build # signature is available here. if not (inspect.isfunction(module_or_fn) or inspect.ismethod(module_or_fn)): module_or_fn = module_or_fn.__call__ arg_spec = tf_inspect.getargspec(module_or_fn) # If there is a keywords element, then an arbitrary kwargs will work, as far # as we can tell from here. takes_arbitrary_kwargs = (arg_spec.keywords is not None) for kwarg in kwargs_list: if not isinstance(kwarg, six.string_types): raise ValueError("kwargs should be strings, instead got {}".format( kwarg)) if kwarg not in arg_spec.args: if not takes_arbitrary_kwargs: # The function doesn't take **kwargs, and this name is not in the # regular args, so it would definitely cause an error to call this. return NOT_SUPPORTED else: # The function may accept the kwarg, but we can't say for sure. Even # though this is only one kwarg, we can't be certain about the whole # lot, so the combined answer is now "maybe". return MAYBE_SUPPORTED # All the kwargs must actually be present in the specific args list return SUPPORTED def remove_unsupported_kwargs(module_or_fn, all_kwargs_dict): """Removes any kwargs not supported by `module_or_fn` from `all_kwargs_dict`. A new dict is return with shallow copies of keys & values from `all_kwargs_dict`, as long as the key is accepted by module_or_fn. The returned dict can then be used to connect `module_or_fn` (along with some other inputs, ie non-keyword arguments, in general). `snt.supports_kwargs` is used to tell whether a given kwarg is supported. Note that this method may give false negatives, which would lead to extraneous removals in the result of this function. Please read the docstring for `snt.supports_kwargs` for details, and manually inspect the results from this function if in doubt. Args: module_or_fn: some callable which can be interrogated by `snt.supports_kwargs`. Generally a Sonnet module or a method (wrapped in `@reuse_variables`) of a Sonnet module. all_kwargs_dict: a dict containing strings as keys, or None. Raises: ValueError: if `all_kwargs_dict` is not a dict. Returns: A dict containing some subset of the keys and values in `all_kwargs_dict`. This subset may be empty. If `all_kwargs_dict` is None, this will be an empty dict. """ if all_kwargs_dict is None: all_kwargs_dict = {} if not isinstance(all_kwargs_dict, dict): raise ValueError("all_kwargs_dict must be a dict with string keys.") return { kwarg: value for kwarg, value in all_kwargs_dict.items() if supports_kwargs(module_or_fn, kwarg) != NOT_SUPPORTED }
sonnet-1
sonnet/python/modules/util.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for sonnet.python.modules.spatial_transformer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools # Dependency imports from absl.testing import parameterized import numpy as np import sonnet as snt import tensorflow.compat.v1 as tf no_constraints = snt.AffineWarpConstraints.no_constraints scale_2d = snt.AffineWarpConstraints.scale_2d scale_3d = snt.AffineWarpConstraints.scale_3d translation_2d = snt.AffineWarpConstraints.translation_2d translation_3d = snt.AffineWarpConstraints.translation_3d translation_2d = snt.AffineWarpConstraints.translation_2d translation_3d = snt.AffineWarpConstraints.translation_3d shear_2d = snt.AffineWarpConstraints.shear_2d no_shear_2d = snt.AffineWarpConstraints.no_shear_2d no_shear_3d = snt.AffineWarpConstraints.no_shear_3d class AffineGridWarperTest(parameterized.TestCase, tf.test.TestCase): def testShapeInferenceAndChecks(self): output_shape2d = (2, 3) source_shape2d = (6, 9) constraints = scale_2d(y=1) & translation_2d(x=-2, y=7) agw2d = snt.AffineGridWarper(source_shape=source_shape2d, output_shape=output_shape2d, constraints=constraints) input_params2d = tf.placeholder(tf.float32, [None, constraints.num_free_params]) warped_grid2d = agw2d(input_params2d) self.assertEqual(warped_grid2d.get_shape().as_list()[1:], [2, 3, 2]) output_shape2d = (2, 3) source_shape3d = (100, 200, 50) agw3d = snt.AffineGridWarper(source_shape=source_shape3d, output_shape=output_shape2d, constraints=[[None, 0, None, None], [0, 1, 0, None], [0, None, 0, None]]) input_params3d = tf.placeholder(tf.float32, [None, agw3d.constraints.num_free_params]) warped_grid3d = agw3d(input_params3d) self.assertEqual(warped_grid3d.get_shape().as_list()[1:], [2, 3, 3]) output_shape3d = (2, 3, 4) source_shape3d = (100, 200, 50) agw3d = snt.AffineGridWarper(source_shape=source_shape3d, output_shape=output_shape3d, constraints=[[None, 0, None, None], [0, 1, 0, None], [0, None, 0, None]]) input_params3d = tf.placeholder(tf.float32, [None, agw3d.constraints.num_free_params]) warped_grid3d = agw3d(input_params3d) self.assertEqual(warped_grid3d.get_shape().as_list()[1:], [2, 3, 4, 3]) with self.assertRaisesRegexp(snt.Error, "Incompatible set of constraints provided.*"): snt.AffineGridWarper(source_shape=source_shape3d, output_shape=output_shape3d, constraints=no_constraints(2)) with self.assertRaisesRegexp(snt.Error, "Output domain dimensionality.*"): snt.AffineGridWarper(source_shape=source_shape2d, output_shape=output_shape3d, constraints=no_constraints(2)) @parameterized.named_parameters( ("2d_a", [13, 17], [7, 11], no_constraints(2)), ("2d_b", [11, 5], [2, 8], scale_2d(x=.7)), ("2d_c", [9, 23], [3, 11], scale_2d(y=1.2)), ("2d_d", [2, 23], [9, 13], snt.AffineWarpConstraints([[1]*3, [None]*3])), ("3d_a", [13, 17, 3], [7, 11, 3], no_constraints(3)), ("3d_b", [11, 5, 6], [2, 8, 9], scale_3d(x=.7, z=2)), ("3d_c", [9, 23, 8], [3, 11, 2], scale_3d(y=1.2)), ("3d_d", [2, 23, 2], [9, 13, 33], snt.AffineWarpConstraints([[1]*4, [None]*4, [None, 1, None, 1]])), ("2d_3d_a", [13, 17], [7, 11, 3], no_constraints(3)), ("2d_3d_b", [11, 5], [2, 8, 9], scale_3d(y=.7, z=2)), ("2d_3d_c", [9, 23], [3, 11, 2], scale_3d(x=1.2)), ("2d_3d_d", [2, 23], [9, 13, 33], snt.AffineWarpConstraints([[None] * 4, [1] * 4, [1, None, None, 1]]))) def testSameAsNumPyReference(self, output_shape, source_shape, constraints): def chain(x): return itertools.chain(*x) def predict(output_shape, source_shape, inputs): ranges = [np.linspace(-1, 1, x, dtype=np.float32) for x in reversed(output_shape)] n = len(source_shape) grid = np.meshgrid(*ranges, indexing="xy") for _ in range(len(output_shape), len(source_shape)): grid.append(np.zeros_like(grid[0])) grid.append(np.ones_like(grid[0])) grid = np.array([x.reshape(1, -1) for x in grid]).squeeze() predicted_output = [] for i in range(0, batch_size): x = np.dot(inputs[i, :].reshape(n, n+1), grid) for k, s in enumerate(reversed(source_shape)): s = (s - 1) * 0.5 x[k, :] = x[k, :] * s + s x = np.concatenate([v.reshape(v.shape + (1,)) for v in x], -1) predicted_output.append(x.reshape(tuple(output_shape) + (n,))) return predicted_output batch_size = 20 agw = snt.AffineGridWarper(source_shape=source_shape, output_shape=output_shape, constraints=constraints) inputs = tf.placeholder(tf.float32, [None, constraints.num_free_params]) warped_grid = agw(inputs) full_size = constraints.num_dim * (constraints.num_dim + 1) full_input_np = np.random.rand(batch_size, full_size) con_i = [i for i, x in enumerate(chain(constraints.mask)) if not x] con_val = [x for x in chain(constraints.constraints) if x is not None] for i, v in zip(con_i, con_val): full_input_np[:, i] = v uncon_i = [i for i, x in enumerate(chain(constraints.mask)) if x] with self.test_session() as sess: output = sess.run(warped_grid, feed_dict={inputs: full_input_np[:, uncon_i]}) self.assertAllClose(output, predict(output_shape, source_shape, full_input_np), rtol=1e-05, atol=1e-05) def testIdentity(self): constraints = snt.AffineWarpConstraints.no_constraints() warper = snt.AffineGridWarper([3, 3], [3, 3], constraints=constraints) p = tf.placeholder(tf.float64, (None, constraints.num_free_params)) grid = warper(p) with self.test_session() as sess: warp_p = np.array([1, 0, 0, 0, 1, 0]).reshape([1, constraints.num_free_params]) output = sess.run(grid, feed_dict={p: warp_p}) # Check that output matches expected result for a known transformation. self.assertAllClose(output, np.array([[[[0.0, 0.0], [1.0, 0.0], [2.0, 0.0]], [[0.0, 1.0], [1.0, 1.0], [2.0, 1.0]], [[0.0, 2.0], [1.0, 2.0], [2.0, 2.0]]]])) @parameterized.named_parameters( ("2d_a", [13, 17], [7, 11], no_constraints(2)), ("2d_b", [11, 5], [2, 8], scale_2d(x=.7)), ("2d_c", [9, 23], [3, 11], scale_2d(y=1.2)), ("2d_d", [2, 23], [9, 13], snt.AffineWarpConstraints([[1]*3, [None]*3]))) def testInvSameAsNumPyRef(self, output_shape, source_shape, constraints): def chain(x): return itertools.chain(*x) def predict(output_shape, source_shape, inputs): ranges = [np.linspace(-1, 1, x, dtype=np.float32) for x in reversed(source_shape)] n = len(output_shape) grid = np.meshgrid(*ranges, indexing="xy") for _ in range(len(source_shape), len(output_shape)): grid.append(np.zeros_like(grid[0])) grid.append(np.ones_like(grid[0])) grid = np.array([x.reshape(1, -1) for x in grid]).squeeze() predicted_output = [] for i in range(0, batch_size): affine_matrix = inputs[i, :].reshape(n, n+1) inv_matrix = np.linalg.inv(affine_matrix[:2, :2]) inv_transform = np.concatenate( [inv_matrix, -np.dot(inv_matrix, affine_matrix[:, 2].reshape(2, 1))], 1) x = np.dot(inv_transform, grid) for k, s in enumerate(reversed(output_shape)): s = (s - 1) * 0.5 x[k, :] = x[k, :] * s + s x = np.concatenate([v.reshape(v.shape + (1,)) for v in x], -1) predicted_output.append(x.reshape(tuple(source_shape) + (n,))) return predicted_output batch_size = 20 agw = snt.AffineGridWarper(source_shape=source_shape, output_shape=output_shape, constraints=constraints).inverse() inputs = tf.placeholder(tf.float32, [None, constraints.num_free_params]) warped_grid = agw(inputs) full_size = constraints.num_dim * (constraints.num_dim + 1) # Adding a bit of mass to the matrix to avoid singular matrices full_input_np = np.random.rand(batch_size, full_size) + 0.1 con_i = [i for i, x in enumerate(chain(constraints.mask)) if not x] con_val = [x for x in chain(constraints.constraints) if x is not None] for i, v in zip(con_i, con_val): full_input_np[:, i] = v uncon_i = [i for i, x in enumerate(chain(constraints.mask)) if x] with self.test_session() as sess: output = sess.run(warped_grid, feed_dict={inputs: full_input_np[:, uncon_i]}) self.assertAllClose(output, predict(output_shape, source_shape, full_input_np), rtol=1e-05, atol=1e-05) class AffineWarpConstraintsTest(tf.test.TestCase): def assertConstraintsEqual(self, warp_constraints, expected): self.assertEqual(warp_constraints.constraints, expected) def testCreateMasks(self): self.assertConstraintsEqual(no_constraints(1), ((None,) * 2,) * 1) self.assertConstraintsEqual(no_constraints(2), ((None,) * 3,) * 2) self.assertConstraintsEqual(no_constraints(3), ((None,) * 4,) * 3) self.assertConstraintsEqual(translation_2d(x=11, y=12), ((None, None, 11), (None, None, 12))) self.assertConstraintsEqual(translation_2d(x=11), ((None, None, 11), (None, None, None))) self.assertConstraintsEqual(translation_2d(y=12), ((None, None, None), (None, None, 12))) self.assertConstraintsEqual(translation_3d(x=11, y=12, z=13), ((None, None, None, 11), (None, None, None, 12), (None, None, None, 13))) self.assertConstraintsEqual(translation_3d(x=11), ((None, None, None, 11), (None, None, None, None), (None, None, None, None))) self.assertConstraintsEqual(translation_3d(y=12), ((None, None, None, None), (None, None, None, 12), (None, None, None, None))) self.assertConstraintsEqual(translation_3d(z=13), ((None, None, None, None), (None, None, None, None), (None, None, None, 13))) self.assertConstraintsEqual(scale_2d(x=11, y=12), ((11, None, None), (None, 12, None))) self.assertConstraintsEqual(scale_2d(x=11), ((11, None, None), (None, None, None))) self.assertConstraintsEqual(scale_2d(y=12), ((None, None, None), (None, 12, None))) self.assertConstraintsEqual(scale_3d(x=11, y=12, z=13), ((11, None, None, None), (None, 12, None, None), (None, None, 13, None))) self.assertConstraintsEqual(scale_3d(x=11), ((11, None, None, None), (None, None, None, None), (None, None, None, None))) self.assertConstraintsEqual(scale_3d(y=12), ((None, None, None, None), (None, 12, None, None), (None, None, None, None))) self.assertConstraintsEqual(scale_3d(z=13), ((None, None, None, None), (None, None, None, None), (None, None, 13, None))) self.assertConstraintsEqual(shear_2d(x=11, y=12), ((None, 11, None), (12, None, None))) self.assertConstraintsEqual(shear_2d(x=11), ((None, 11, None), (None, None, None))) self.assertConstraintsEqual(shear_2d(y=12), ((None, None, None), (12, None, None))) self.assertConstraintsEqual(no_shear_2d(), ((None, 0, None), (0, None, None))) self.assertConstraintsEqual(no_shear_3d(), ((None, 0, 0, None), (0, None, 0, None), (0, 0, None, None))) def testConstraintsOperations(self): self.assertEqual(no_constraints(2).num_free_params, 6) self.assertEqual(scale_2d(2, 4).num_free_params, 4) self.assertConstraintsEqual(scale_2d(2, 4) & translation_2d(x=2), ((2, None, 2), (None, 4, None))) self.assertEqual(scale_2d(2, 4).mask, ((False, True, True), (True, False, True))) with self.assertRaisesRegexp(ValueError, "Incompatible set of constraints provided."): _ = scale_2d(2) & scale_2d(3) if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/modules/spatial_transformer_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for snt.scale_gradient. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools # Dependency imports from absl.testing import parameterized import sonnet as snt import tensorflow.compat.v1 as tf from tensorflow.contrib.eager.python import tfe as contrib_eager tfe = contrib_eager class ScaleGradientTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters( *itertools.product(range(6), [0.0, 0.1, 0.9, 1.0]) ) def testOpScale(self, x_, scale): x = tf.placeholder(tf.float32, [1]) y = x * x y = snt.scale_gradient(y, scale) dydx = tf.gradients([y], [x])[0] if scale == 0.0: self.assertEqual(y.op.type, "StopGradient") self.assertIs(dydx, None) else: if scale == 1.0: self.assertEqual(y.op.type, "Identity") else: self.assertEqual(y.op.type, "IdentityN") with self.test_session() as sess: dydx_, y_ = sess.run([dydx, y], feed_dict={x: [x_]}) self.assertAlmostEqual(dydx_[0], 2 * scale * x_, places=6) self.assertAlmostEqual(y_[0], x_ ** 2, places=6) def testTwoOps(self): """Tests that the op can be instantiated twice with appropriate results. Implementations with inappropriate global registration of gradients will fail this test. """ x = tf.placeholder(tf.float32, [1]) y = x * x y = snt.scale_gradient(y, 0.1) y = snt.scale_gradient(y, 0.1) dydx = tf.gradients([y], [x])[0] with self.test_session() as sess: dydx_, y_ = sess.run([dydx, y], feed_dict={x: [3.0]}) self.assertAlmostEqual(dydx_[0], 2 * 0.1**2 * 3.0, places=6) self.assertAlmostEqual(y_[0], 3.0 ** 2, places=6) def testShape(self): x = tf.placeholder(tf.float32, [None, 10, 13]) y = snt.scale_gradient(x, 0.1) shape = tuple(y.get_shape().as_list()) self.assertEqual(shape, (None, 10, 13)) def testOpScaleDifferentDtypes(self): x_1 = tf.placeholder(tf.float16, shape=()) snt.scale_gradient(x_1, 0.1) # clip_gradient throws here if the Defun func_name does not use the dtype. x_2 = tf.placeholder(tf.float32, shape=()) snt.scale_gradient(x_2, 0.1) if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/modules/scale_gradient_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =================================================== """Module that calculates a differentiable decaying moving average.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports from sonnet.python.modules import base import tensorflow.compat.v1 as tf from tensorflow.python.framework import function # pylint: disable=g-direct-tensorflow-import from tensorflow.python.training import moving_averages # pylint: disable=g-direct-tensorflow-import class MovingAverage(base.AbstractModule): """Calculates a differentiable decaying moving average. The moving average is kept in a variable that can either be local or global. The initial moving average value is set to the first value that is received by the module. The module lets gradients flow through the last element added to the moving average. """ def __init__(self, decay=0.99, local=False, name="moving_average"): """Constructor. Args: decay: float in range [0, 1], decay of the moving average. local: bool, specifies whether the variables are local or not. name: string, name of the Sonnet module. Default is 'moving_average'. Raises: ValueError: if decay is not in the valid range [0, 1]. """ super(MovingAverage, self).__init__(name=name) if decay < 0.0 or decay > 1.0: raise ValueError("Decay must be a float in the [0, 1] range, " "but is {}.".format(decay)) self._decay = decay if local: self._collection = tf.GraphKeys.LOCAL_VARIABLES else: self._collection = tf.GraphKeys.GLOBAL_VARIABLES def reset(self): return tf.group( self._initialized.initializer, self._moving_average.initializer ) def _build(self, inputs): """Returns the moving average of the values that went through `inputs`. Args: inputs: tensor. Returns: A moving average calculated as `(1 - decay) * inputs + decay * average`. """ # This trivial op helps correct execution of control flow when inputs is # not a resource variable. See, for example, # MovingAverageTest.testAverage(use_resource_vars=False) in # moving_averate_test.py. # Note that inputs = tf.identity(inputs) does NOT have the same effect. inputs = 1 * inputs self._initialized = tf.get_variable( "initialized", shape=(), dtype=tf.bool, initializer=tf.constant_initializer(False), trainable=False, use_resource=True, collections=[self._collection]) self._moving_average = tf.get_variable( "moving_average", shape=inputs.get_shape(), initializer=tf.zeros_initializer(), trainable=False, use_resource=True, collections=[self._collection]) update_op = moving_averages.assign_moving_average( variable=self._moving_average, value=inputs, decay=self._decay, zero_debias=False, name="update_moving_average") def update(): return tf.identity(update_op) def initialize(): with tf.control_dependencies([update_op]): value = tf.assign(self._moving_average, inputs) with tf.control_dependencies([value]): update_initialized = tf.assign(self._initialized, True) with tf.control_dependencies([update_initialized]): value = tf.identity(value) return value moving_avg = tf.cond(self._initialized, update, initialize) return _pass_through_gradients(inputs, moving_avg) def _pass_through_gradients(x, moving_avg, name="pass_through_gradients"): """Defines a custom backward pass, only differentiating through x. Returns an op returning the current value of the moving average in the forward pass, whilst allowing gradients to flow through the last entry to the moving average, operating in a similar fashion to ``` x + tf.stop_gradient(moving_avg - x) ``` but avoiding the related numerical issues. Args: x: the last entry to the moving average. moving_avg: the current value of the moving average. name: name for name scope of the pass through gradient operation. Returns: An op returning the current value of the moving average for the forward pass, allowing gradients to flow through the last op added to the moving average. """ with tf.name_scope(name, "pass_through_gradients", values=[x, moving_avg]): x_dtype = x.dtype.base_dtype # Convert ref dtypes to regular dtypes. moving_avg_dtype = moving_avg.dtype.base_dtype if x_dtype != moving_avg_dtype: raise TypeError( "Inputs to _differentiate_last_step are expected to be of the same " "type, but were {} and {}.".format(x_dtype, moving_avg_dtype)) differentiate_last_step_op = _get_pass_through_gradients_op(x_dtype) output = differentiate_last_step_op(x, moving_avg) output.set_shape(x.get_shape()) return output # Implement simple memoization mechanism using a global dict. _op_ctors = dict() def _get_pass_through_gradients_op(dtype): """Creates an op switching between two ops for the forward and backward pass. This method produces a new op the first time it is called with a given `dtype` argument, and then uses the cached op each time it is called after that with the same `dtype`. Args: dtype: the dtype of the inputs. Returns: The switching op. """ def _instantiate_op(dtype): """Instantiate pass through gradients op constructor for given dtype.""" def _forward(x, moving_avg): del x return tf.identity(moving_avg) def _backward(op, grad): """Forwards the gradients to the op's inputs.""" del op return grad, None func_name = "PassThroughGradients_{}".format(dtype.name) return function.Defun( dtype, dtype, python_grad_func=_backward, func_name=func_name)(_forward) if dtype.name not in _op_ctors: _op_ctors[dtype.name] = _instantiate_op(dtype) return _op_ctors[dtype.name]
sonnet-1
sonnet/python/modules/moving_average.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for block_matrix.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import numpy as np from sonnet.python.modules import block_matrix import tensorflow.compat.v1 as tf def create_input(size, batch_size=1): x = tf.range(size * batch_size) return tf.reshape(tf.to_float(x), shape=(batch_size, -1)) class BlockTriangularMatrixTest(tf.test.TestCase): def _check_output_size(self, btm, result, batch_size=1): self.assertEqual(result.shape, (batch_size,) + btm.output_shape) def test_lower(self): """Tests block lower-triangular matrix.""" btm = block_matrix.BlockTriangularMatrix( block_shape=(2, 3), block_rows=3, upper=False) self.assertEqual(btm.num_blocks, 6) self.assertEqual(btm.block_size, 6) self.assertEqual(btm.input_size, 36) output = btm(create_input(btm.input_size)) with self.test_session() as sess: result = sess.run(output) self._check_output_size(btm, result) expected = np.array([[[0, 1, 2, 0, 0, 0, 0, 0, 0], [3, 4, 5, 0, 0, 0, 0, 0, 0], [6, 7, 8, 9, 10, 11, 0, 0, 0], [12, 13, 14, 15, 16, 17, 0, 0, 0], [18, 19, 20, 21, 22, 23, 24, 25, 26], [27, 28, 29, 30, 31, 32, 33, 34, 35]]]) self.assertAllEqual(result, expected) def test_lower_no_diagonal(self): """Tests block lower-triangular matrix without diagonal.""" btm = block_matrix.BlockTriangularMatrix( block_shape=(2, 3), block_rows=3, include_diagonal=False) self.assertEqual(btm.num_blocks, 3) self.assertEqual(btm.block_size, 6) self.assertEqual(btm.input_size, 18) output = btm(create_input(btm.input_size)) with self.test_session() as sess: result = sess.run(output) self._check_output_size(btm, result) expected = np.array([[[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 0, 0, 0, 0, 0], [3, 4, 5, 0, 0, 0, 0, 0, 0], [6, 7, 8, 9, 10, 11, 0, 0, 0], [12, 13, 14, 15, 16, 17, 0, 0, 0]]]) self.assertAllEqual(result, expected) def test_upper(self): """Tests block upper-triangular matrix.""" btm = block_matrix.BlockTriangularMatrix( block_shape=(2, 3), block_rows=3, upper=True) self.assertEqual(btm.num_blocks, 6) self.assertEqual(btm.block_size, 6) self.assertEqual(btm.input_size, 36) output = btm(create_input(btm.input_size)) with self.test_session() as sess: result = sess.run(output) self._check_output_size(btm, result) expected = np.array([[[0, 1, 2, 3, 4, 5, 6, 7, 8], [9, 10, 11, 12, 13, 14, 15, 16, 17], [0, 0, 0, 18, 19, 20, 21, 22, 23], [0, 0, 0, 24, 25, 26, 27, 28, 29], [0, 0, 0, 0, 0, 0, 30, 31, 32], [0, 0, 0, 0, 0, 0, 33, 34, 35]]]) self.assertAllEqual(result, expected) def test_upper_no_diagonal(self): """Tests block upper-triangular matrix without diagonal.""" btm = block_matrix.BlockTriangularMatrix( block_shape=(2, 3), block_rows=3, upper=True, include_diagonal=False) self.assertEqual(btm.num_blocks, 3) self.assertEqual(btm.block_size, 6) self.assertEqual(btm.input_size, 18) output = btm(create_input(btm.input_size)) with self.test_session() as sess: result = sess.run(output) self._check_output_size(btm, result) expected = np.array([[[0, 0, 0, 0, 1, 2, 3, 4, 5], [0, 0, 0, 6, 7, 8, 9, 10, 11], [0, 0, 0, 0, 0, 0, 12, 13, 14], [0, 0, 0, 0, 0, 0, 15, 16, 17], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]]]) self.assertAllEqual(result, expected) def test_batch(self): """Tests batching.""" btm = block_matrix.BlockTriangularMatrix( block_shape=(2, 2), block_rows=2, upper=False) output = btm(create_input(12, batch_size=2)) with self.test_session() as sess: result = sess.run(output) self._check_output_size(btm, result, batch_size=2) expected = np.array([ [[0, 1, 0, 0], [2, 3, 0, 0], [4, 5, 6, 7], [8, 9, 10, 11]], [[12, 13, 0, 0], [14, 15, 0, 0], [16, 17, 18, 19], [20, 21, 22, 23]]]) self.assertAllEqual(result, expected) class BlockDiagonalMatrixTest(tf.test.TestCase): def test_default(self): """Tests BlockDiagonalMatrix.""" bdm = block_matrix.BlockDiagonalMatrix(block_shape=(2, 3), block_rows=3) self.assertEqual(bdm.num_blocks, 3) self.assertEqual(bdm.block_size, 6) self.assertEqual(bdm.input_size, 18) output = bdm(create_input(bdm.input_size)) with self.test_session() as sess: result = sess.run(output) expected = np.array([[[0, 1, 2, 0, 0, 0, 0, 0, 0], [3, 4, 5, 0, 0, 0, 0, 0, 0], [0, 0, 0, 6, 7, 8, 0, 0, 0], [0, 0, 0, 9, 10, 11, 0, 0, 0], [0, 0, 0, 0, 0, 0, 12, 13, 14], [0, 0, 0, 0, 0, 0, 15, 16, 17]]]) self.assertAllEqual(result, expected) def test_properties(self): """Tests properties of BlockDiagonalMatrix.""" bdm = block_matrix.BlockDiagonalMatrix(block_shape=(3, 5), block_rows=7) self.assertEqual(bdm.num_blocks, 7) self.assertEqual(bdm.block_size, 15) self.assertEqual(bdm.input_size, 105) self.assertEqual(bdm.output_shape, (21, 35)) self.assertEqual(bdm.block_shape, (3, 5)) if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/modules/block_matrix_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for sonnet.python.modules.residual.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports import numpy as np import sonnet as snt import tensorflow.compat.v1 as tf class HeterogeneousStateCore(snt.RNNCore): """Dummy core with heterogeneous state.""" def __init__(self, hidden_size, name=None, custom_getter=None): super(HeterogeneousStateCore, self).__init__( name=name, custom_getter=custom_getter) self._hidden_size = hidden_size def _build(self, inputs, prev_state): return (inputs, prev_state) @property def output_size(self): return self._hidden_size @property def state_size(self): return (tf.TensorShape([self._hidden_size]), tf.TensorShape([self._hidden_size])) def initial_state(self, batch_size): return (tf.zeros([batch_size, self._hidden_size], dtype=tf.float32), tf.zeros([batch_size, self._hidden_size], dtype=tf.int32)) class ResidualTest(tf.test.TestCase): def setUp(self): super(ResidualTest, self).setUp() self.batch_size = 3 self.in_size = 4 def testShape(self): inputs = tf.placeholder(tf.float32, shape=[self.batch_size, self.in_size]) linear = snt.Linear(self.in_size) residual_wrapper = snt.Residual(linear, name="residual") output = residual_wrapper(inputs) shape = np.ndarray((self.batch_size, self.in_size)) self.assertShapeEqual(shape, output) def testComputation(self): inputs = tf.placeholder(tf.float32, shape=[self.batch_size, self.in_size]) linear = snt.Linear(name="rnn", output_size=self.in_size) residual = snt.Residual(linear, name="residual") output = residual(inputs) w = linear.w b = linear.b with self.test_session() as sess: # With random data, check the TF calculation matches the Numpy version. input_data = np.random.randn(self.batch_size, self.in_size) tf.global_variables_initializer().run() fetches = [output, w, b] output = sess.run(fetches, {inputs: input_data}) output_v, w_v, b_v = output output = np.dot(input_data, w_v) + b_v residual_output = output + input_data self.assertAllClose(residual_output, output_v) class ResidualCoreTest(tf.test.TestCase): def setUp(self): super(ResidualCoreTest, self).setUp() self.batch_size = 3 self.in_size = 4 def testShape(self): inputs = tf.placeholder(tf.float32, shape=[self.batch_size, self.in_size]) prev_state = tf.placeholder( tf.float32, shape=[self.batch_size, self.in_size]) vanilla_rnn = snt.VanillaRNN(self.in_size) residual_wrapper = snt.ResidualCore(vanilla_rnn, name="residual") output, next_state = residual_wrapper(inputs, prev_state) shape = np.ndarray((self.batch_size, self.in_size)) self.assertEqual(self.in_size, residual_wrapper.output_size) self.assertShapeEqual(shape, output) self.assertShapeEqual(shape, next_state) def testComputation(self): inputs = tf.placeholder(tf.float32, shape=[self.batch_size, self.in_size]) prev_state = tf.placeholder(tf.float32, shape=[self.batch_size, self.in_size]) vanilla_rnn = snt.VanillaRNN(name="rnn", hidden_size=self.in_size) residual = snt.ResidualCore(vanilla_rnn, name="residual") output, new_state = residual(inputs, prev_state) in_to_hid = vanilla_rnn.in_to_hidden_variables hid_to_hid = vanilla_rnn.hidden_to_hidden_variables with self.test_session() as sess: # With random data, check the TF calculation matches the Numpy version. input_data = np.random.randn(self.batch_size, self.in_size) prev_state_data = np.random.randn(self.batch_size, self.in_size) tf.global_variables_initializer().run() fetches = [output, new_state, in_to_hid[0], in_to_hid[1], hid_to_hid[0], hid_to_hid[1]] output = sess.run(fetches, {inputs: input_data, prev_state: prev_state_data}) output_v, new_state_v, in_to_hid_w, in_to_hid_b = output[:4] hid_to_hid_w, hid_to_hid_b = output[4:] real_in_to_hid = np.dot(input_data, in_to_hid_w) + in_to_hid_b real_hid_to_hid = np.dot(prev_state_data, hid_to_hid_w) + hid_to_hid_b vanilla_output = np.tanh(real_in_to_hid + real_hid_to_hid) residual_output = vanilla_output + input_data self.assertAllClose(residual_output, output_v) self.assertAllClose(vanilla_output, new_state_v) def testHeterogeneousState(self): """Checks that the shape and type of the initial state are preserved.""" core = HeterogeneousStateCore(name="rnn", hidden_size=self.in_size) residual = snt.ResidualCore(core, name="residual") core_state = core.initial_state(self.batch_size) residual_state = residual.initial_state(self.batch_size) self.assertEqual(core_state[0].shape.as_list(), residual_state[0].shape.as_list()) self.assertEqual(core_state[1].shape.as_list(), residual_state[1].shape.as_list()) self.assertEqual(core_state[0].dtype, residual_state[0].dtype) self.assertEqual(core_state[1].dtype, residual_state[1].dtype) class SkipConnectionCoreTest(tf.test.TestCase): def setUp(self): super(SkipConnectionCoreTest, self).setUp() self.batch_size = 3 self.in_size = 4 self.hidden_size = 18 def testOutputSize(self): inputs = tf.placeholder(tf.float32, shape=[self.batch_size, self.in_size]) prev_state = tf.placeholder( tf.float32, shape=[self.batch_size, self.hidden_size]) vanilla_rnn = snt.VanillaRNN(self.hidden_size) skip_wrapper = snt.SkipConnectionCore(vanilla_rnn, name="skip") with self.assertRaises(ValueError): _ = skip_wrapper.output_size skip_wrapper(inputs, prev_state) self.assertAllEqual([self.in_size + self.hidden_size], skip_wrapper.output_size.as_list()) skip_wrapper = snt.SkipConnectionCore( vanilla_rnn, input_shape=(self.in_size,), name="skip") self.assertAllEqual([self.in_size + self.hidden_size], skip_wrapper.output_size.as_list()) def testShape(self): inputs = tf.placeholder(tf.float32, shape=[self.batch_size, self.in_size]) prev_state = tf.placeholder( tf.float32, shape=[self.batch_size, self.hidden_size]) vanilla_rnn = snt.VanillaRNN(self.hidden_size) skip_wrapper = snt.SkipConnectionCore(vanilla_rnn, name="skip") output, next_state = skip_wrapper(inputs, prev_state) output_shape = np.ndarray((self.batch_size, self.in_size + self.hidden_size)) state_shape = np.ndarray((self.batch_size, self.hidden_size)) self.assertShapeEqual(output_shape, output) self.assertShapeEqual(state_shape, next_state) def testComputation(self): inputs = tf.placeholder(tf.float32, shape=[self.batch_size, self.in_size]) prev_state = tf.placeholder(tf.float32, shape=[self.batch_size, self.in_size]) vanilla_rnn = snt.VanillaRNN(name="rnn", hidden_size=self.in_size) residual = snt.SkipConnectionCore(vanilla_rnn, name="skip") output, new_state = residual(inputs, prev_state) in_to_hid = vanilla_rnn.in_to_hidden_variables hid_to_hid = vanilla_rnn.hidden_to_hidden_variables with self.test_session() as sess: # With random data, check the TF calculation matches the Numpy version. input_data = np.random.randn(self.batch_size, self.in_size) prev_state_data = np.random.randn(self.batch_size, self.in_size) tf.global_variables_initializer().run() fetches = [output, new_state, in_to_hid[0], in_to_hid[1], hid_to_hid[0], hid_to_hid[1]] output = sess.run(fetches, {inputs: input_data, prev_state: prev_state_data}) output_v, new_state_v, in_to_hid_w, in_to_hid_b = output[:4] hid_to_hid_w, hid_to_hid_b = output[4:] real_in_to_hid = np.dot(input_data, in_to_hid_w) + in_to_hid_b real_hid_to_hid = np.dot(prev_state_data, hid_to_hid_w) + hid_to_hid_b vanilla_output = np.tanh(real_in_to_hid + real_hid_to_hid) skip_output = np.concatenate((input_data, vanilla_output), -1) self.assertAllClose(skip_output, output_v) self.assertAllClose(vanilla_output, new_state_v) def testHeterogeneousState(self): """Checks that the shape and type of the initial state are preserved.""" core = HeterogeneousStateCore(name="rnn", hidden_size=self.hidden_size) skip_wrapper = snt.SkipConnectionCore(core, name="skip") core_state = core.initial_state(self.batch_size) skip_state = skip_wrapper.initial_state(self.batch_size) self.assertEqual(core_state[0].shape.as_list(), skip_state[0].shape.as_list()) self.assertEqual(core_state[1].shape.as_list(), skip_state[1].shape.as_list()) self.assertEqual(core_state[0].dtype, skip_state[0].dtype) self.assertEqual(core_state[1].dtype, skip_state[1].dtype) if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/modules/residual_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for `sonnet.python.modules.conv`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import random # Dependency imports from absl.testing import parameterized import numpy as np import sonnet as snt from sonnet.python.modules import conv import tensorflow.compat.v1 as tf from tensorflow.contrib import layers as contrib_layers from tensorflow.python.ops import variables # pylint: disable=g-direct-tensorflow-import def create_constant_initializers(w, b, use_bias): if use_bias: return {"w": tf.constant_initializer(w), "b": tf.constant_initializer(b)} else: return {"w": tf.constant_initializer(w)} def create_separable_constant_initializers(w_dw, w_pw, b, use_bias): if use_bias: return {"w_dw": tf.constant_initializer(w_dw), "w_pw": tf.constant_initializer(w_pw), "b": tf.constant_initializer(b)} else: return {"w_dw": tf.constant_initializer(w_dw), "w_pw": tf.constant_initializer(w_pw)} def create_regularizers(use_bias, regularizer): if use_bias: return {"w": regularizer, "b": regularizer} else: return {"w": regularizer} def create_separable_regularizers(use_bias, regularizer): if use_bias: return {"w_dw": regularizer, "w_pw": regularizer, "b": regularizer} else: return {"w_dw": regularizer, "w_pw": regularizer} class FillListTest(tf.test.TestCase): def test(self): """Tests the _fill_list private function in snt.conv.""" x = random.randint(1, 10) self.assertEqual(conv._fill_shape(x, 1), (x,)) self.assertEqual(conv._fill_shape(x, 2), (x, x)) self.assertEqual(conv._fill_shape(x, 3), (x, x, x)) self.assertEqual(conv._fill_shape(x, 4), (x, x, x, x)) self.assertEqual(conv._fill_shape([x, x + 1, x + 2], 3), (x, x + 1, x + 2)) err = "n must be a positive integer" with self.assertRaisesRegexp(TypeError, err): conv._fill_shape(x, 0) err = ("must be either a positive integer or an iterable of positive " "integers of size 4") with self.assertRaisesRegexp(TypeError, err): conv._fill_shape([], 4) with self.assertRaisesRegexp(TypeError, err): conv._fill_shape([x], 4) with self.assertRaisesRegexp(TypeError, err): conv._fill_shape([x, x], 4) with self.assertRaisesRegexp(TypeError, err): conv._fill_shape(["b"], 4) class DefaultTransposeSizeTest(parameterized.TestCase, tf.test.TestCase): # Constants for use in parameterized test. input_shape = [[20], [23, 11, 13], [1, 3]] stride = [[3], [7, 1, 2], [6, 2]] kernel_shape = [[4], [1, 3, 2], [34, 2]] padding = [snt.SAME, snt.VALID, snt.VALID] output_shape = [] for i, pad in enumerate(padding): if pad == snt.SAME: output_shape.append([x * y for x, y in zip(input_shape[i], stride[i])]) if pad == snt.VALID: output_shape.append([x * y + z - 1 for x, y, z in zip(input_shape[i], stride[i], kernel_shape[i])]) @parameterized.parameters( *zip(input_shape, stride, kernel_shape, padding, output_shape)) def testFunction(self, input_shape, stride, kernel_shape, padding, output_shape): """Test output shapes are correct.""" self.assertEqual(conv._default_transpose_size(input_shape, stride, kernel_shape=kernel_shape, padding=padding), tuple(output_shape)) @parameterized.parameters( *zip(input_shape, stride, kernel_shape, padding, output_shape)) def testModules(self, input_shape, stride, kernel_shape, padding, output_shape): """Test ConvTranspose modules return expected default output shapes.""" if len(input_shape) == 1: module = snt.Conv1DTranspose elif len(input_shape) == 2: module = snt.Conv2DTranspose elif len(input_shape) == 3: module = snt.Conv3DTranspose batch_size = [1] channels = [1] inputs = tf.zeros(shape=batch_size + input_shape + channels, dtype=tf.float32) outputs = module(output_channels=1, kernel_shape=kernel_shape, stride=stride, padding=padding)(inputs) self.assertEqual(output_shape, outputs.get_shape().as_list()[1:-1]) @parameterized.parameters( *zip(input_shape, stride, kernel_shape, padding, output_shape)) def testConnectTwice(self, input_shape, stride, kernel_shape, padding, output_shape): """Test ConvTranspose modules with multiple connections.""" if len(input_shape) == 1: module = snt.Conv1DTranspose elif len(input_shape) == 2: module = snt.Conv2DTranspose elif len(input_shape) == 3: module = snt.Conv3DTranspose batch_size = [1] channels = [1] inputs = tf.zeros(shape=batch_size + input_shape + channels, dtype=tf.float32) inputs_2 = tf.zeros(shape=batch_size + input_shape + channels, dtype=tf.float32) conv1 = module(output_channels=1, kernel_shape=kernel_shape, stride=stride, padding=padding) outputs = conv1(inputs) # Connecting for the second time with the same shape should be OK. outputs_2 = conv1(inputs_2) # So should connecting with a different shape. new_input_shape = [25] * len(input_shape) new_inputs = tf.zeros(shape=batch_size + new_input_shape + channels, dtype=tf.float32) new_outputs = conv1(new_inputs) with self.test_session() as sess: tf.global_variables_initializer().run() outputs_array, outputs_array_2 = sess.run([outputs, outputs_2]) self.assertEqual(outputs_array.shape, outputs_array_2.shape) sess.run(new_outputs) class SharedConvTest(parameterized.TestCase, tf.test.TestCase): CONV_1D_KWARGS = { "output_channels": 1, "kernel_shape": 3, } CONV_1D_MASKED_KWARGS = { "output_channels": 1, "kernel_shape": 3, "mask": np.zeros((3, 10, 1), dtype=np.float64), } CONV_1D_CAUSAL_KWARGS = { "output_channels": 1, "kernel_shape": 3, "padding": snt.CAUSAL, } SEPARABLE_CONV_1D_KWARGS = { "output_channels": 10, "channel_multiplier": 1, "kernel_shape": 3, "rate": (2,), } CONV_2D_KWARGS = CONV_1D_KWARGS CONV_3D_KWARGS = CONV_1D_KWARGS CONV_3D_MIXED_PADDING_KWARGS = { "output_channels": 1, "kernel_shape": 3, "padding": [snt.CAUSAL, snt.SAME, snt.FULL] } DEPTHWISE_CONV_2D_KWARGS = { "channel_multiplier": 1, "kernel_shape": 3, } SEPARABLE_CONV_2D_KWARGS = { "output_channels": 10, "channel_multiplier": 1, "kernel_shape": 3, "rate": (2, 1), } IN_PLANE_CONV_2D_KWARGS = { "kernel_shape": 3, } CONV_1D_TRANSPOSE_KWARGS = { "output_channels": 1, "output_shape": [10], "kernel_shape": 3, } CONV_2D_TRANSPOSE_KWARGS = { "output_channels": 1, "output_shape": [10, 10], "kernel_shape": 3, } CONV_3D_TRANSPOSE_KWARGS = { "output_channels": 1, "output_shape": [10, 10, 10], "kernel_shape": 3, } modules = [ (snt.Conv1D, 1, CONV_1D_KWARGS), (snt.Conv1D, 1, CONV_1D_MASKED_KWARGS), (snt.Conv1D, 1, CONV_1D_CAUSAL_KWARGS), (snt.Conv2D, 2, CONV_2D_KWARGS), (snt.Conv3D, 3, CONV_3D_KWARGS), (snt.Conv3D, 3, CONV_3D_MIXED_PADDING_KWARGS), (snt.Conv1DTranspose, 1, CONV_1D_TRANSPOSE_KWARGS), (snt.Conv2DTranspose, 2, CONV_2D_TRANSPOSE_KWARGS), (snt.Conv3DTranspose, 3, CONV_3D_TRANSPOSE_KWARGS), (snt.DepthwiseConv2D, 2, DEPTHWISE_CONV_2D_KWARGS), (snt.InPlaneConv2D, 2, IN_PLANE_CONV_2D_KWARGS), (snt.SeparableConv1D, 1, SEPARABLE_CONV_1D_KWARGS), (snt.SeparableConv2D, 2, SEPARABLE_CONV_2D_KWARGS), ] @parameterized.parameters(*modules) def testPartitioners(self, module, num_input_dims, module_kwargs): inputs = tf.zeros((10,) * (num_input_dims + 2)) keys = module.get_possible_initializer_keys(use_bias=True) partitioners = { key: tf.variable_axis_size_partitioner(10) for key in keys } if "mask" in module_kwargs: np_dtype = inputs.dtype.as_numpy_dtype module_kwargs["mask"] = module_kwargs["mask"].astype(np_dtype) convolution = module(partitioners=partitioners, **module_kwargs) convolution(inputs) for key in keys: self.assertEqual(type(getattr(convolution, key)), variables.PartitionedVariable) try: convolution_t = convolution.transpose() except (AttributeError, snt.NotSupportedError): return self.assertEqual(convolution_t.partitioners, convolution.partitioners) @parameterized.parameters( *itertools.product(modules, (True, False), (tf.float16, tf.float32, tf.float64))) def testVariables(self, module_info, use_bias, dtype): """The correct number of variables are created.""" module, num_input_dims, module_kwargs = module_info mod_name = "module" input_shape = (10,) * (num_input_dims + 2) inputs = tf.placeholder(dtype, input_shape) if "mask" in module_kwargs: np_dtype = dtype.as_numpy_dtype module_kwargs["mask"] = module_kwargs["mask"].astype(np_dtype) with tf.variable_scope("scope"): conv_mod = module(name=mod_name, use_bias=use_bias, **module_kwargs) self.assertEqual(conv_mod.scope_name, "scope/" + mod_name) self.assertEqual(conv_mod.module_name, mod_name) with self.assertRaisesRegexp(snt.NotConnectedError, "not instantiated yet"): conv_mod.get_variables() output = conv_mod(inputs) self.assertEqual(dtype, output.dtype) # Check that the graph and module has the correct number of variables: one # two, or three, depending on module and configuration. supposed_variables = conv_mod.get_possible_initializer_keys( use_bias=use_bias) self.assertIn(len(supposed_variables), [1, 2, 3]) graph_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) self.assertEqual(len(graph_variables), len(supposed_variables)) conv_variables = conv_mod.get_variables() self.assertEqual(len(conv_variables), len(supposed_variables)) variable_names = {v.name for v in conv_variables} for var_name in supposed_variables: self.assertIn("scope/{}/{}:0".format(mod_name, var_name), variable_names) with self.test_session() as sess: tf.global_variables_initializer().run() inputs_data = np.random.rand(*input_shape) sess.run(output, feed_dict={inputs: inputs_data}) @parameterized.parameters(*itertools.product(modules, (True, False))) def testMissingChannelsError(self, module_info, use_bias): """Error is thrown if the input is missing a channel dimension.""" module, num_input_dims, module_kwargs = module_info conv_mod = module(use_bias=use_bias, **module_kwargs) inputs = tf.placeholder(tf.float32, (10,) * (num_input_dims + 1)) err = "Input Tensor must have" with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): conv_mod(inputs) @parameterized.parameters(*itertools.product(modules, (True, False))) def testNonDefinedChannelsDimension(self, module_info, use_bias): """Error is thrown if the input's channel dimension isn't defined.""" module, num_input_dims, module_kwargs = module_info conv_mod = module(use_bias=use_bias, **module_kwargs) inputs = tf.placeholder(tf.float32, (10,) * (num_input_dims + 1) + (None,)) err = "Number of input channels" with self.assertRaisesRegexp(snt.UnderspecifiedError, err): conv_mod(inputs) @parameterized.parameters(*itertools.product(modules, (True, False))) def testFlattenedError(self, module_info, use_bias): """Error is thrown if the input has been incorrectly flattened.""" module, num_input_dims, module_kwargs = module_info conv_mod = module(use_bias=use_bias, **module_kwargs) inputs = tf.placeholder(tf.float32, (10,) * (num_input_dims + 1)) inputs = snt.BatchFlatten()(inputs) err = "Input Tensor must have" with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): conv_mod(inputs) @parameterized.parameters(*modules) def testCustomGetter(self, module, num_input_dims, module_kwargs): """Check that custom_getter option works.""" def stop_gradient(getter, *args, **kwargs): return tf.stop_gradient(getter(*args, **kwargs)) inputs = tf.placeholder(tf.float32, (10,) * (num_input_dims + 2)) if "mask" in module_kwargs: np_dtype = inputs.dtype.as_numpy_dtype module_kwargs["mask"] = module_kwargs["mask"].astype(np_dtype) conv_mod1 = module(**module_kwargs) out1 = conv_mod1(inputs) conv_mod2 = module(custom_getter=stop_gradient, **module_kwargs) out2 = conv_mod2(inputs) num_variables = len(conv_mod1.get_variables()) grads1 = tf.gradients(out1, list(conv_mod1.get_variables())) grads2 = tf.gradients(out2, list(conv_mod2.get_variables())) self.assertEqual([tf.Tensor] * num_variables, [type(g) for g in grads1]) self.assertEqual([None] * num_variables, grads2) # Check that the transpose, if present, also adopts the custom getter. try: conv_mod2_transpose = conv_mod2.transpose() except (AttributeError, snt.NotSupportedError): return inputs_transpose = tf.placeholder(tf.float32, out2.get_shape()) out3 = conv_mod2_transpose(inputs_transpose) grads3 = tf.gradients(out3, list(conv_mod2_transpose.get_variables())) self.assertEqual([None] * num_variables, grads3) # These functions compute the expected output shape of a convolution of each # padding type, for a given input shape and kernel size. _PADDINGS_EXPECTED_SHAPE_TRANSFORMS = { snt.SAME: lambda in_length, kernel_size: in_length, snt.VALID: lambda in_length, kernel_size: in_length - kernel_size + 1, snt.FULL: lambda in_length, kernel_size: in_length + kernel_size - 1, snt.CAUSAL: lambda in_length, kernel_size: in_length, snt.REVERSE_CAUSAL: lambda in_length, kernel_size: in_length, } class Conv2DTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters(*itertools.product( [True, False], # use_bias conv.SUPPORTED_2D_DATA_FORMATS, # data_format conv.ALLOWED_PADDINGS, # padding_height conv.ALLOWED_PADDINGS, # padding_width conv.ALLOWED_PADDING_VALUES, # padding_value )) def testShapes(self, use_bias, data_format, padding_height, padding_width, padding_value): """The generated shapes are correct with different paddings.""" batch_size = random.randint(1, 100) in_height = random.randint(10, 288) in_width = random.randint(10, 288) in_channels = random.randint(1, 10) out_channels = random.randint(1, 32) kernel_shape_h = random.randint(1, 11) kernel_shape_w = random.randint(1, 11) if data_format == conv.DATA_FORMAT_NHWC: inputs = tf.placeholder( tf.float32, shape=[batch_size, in_height, in_width, in_channels]) else: # NCHW inputs = tf.placeholder( tf.float32, shape=[batch_size, in_channels, in_height, in_width]) conv1 = snt.Conv2D( name="conv1", output_channels=out_channels, kernel_shape=[kernel_shape_h, kernel_shape_w], padding=[padding_height, padding_width], padding_value=padding_value, data_format=data_format, stride=1, use_bias=use_bias) output = conv1(inputs) expected_out_height = _PADDINGS_EXPECTED_SHAPE_TRANSFORMS[padding_height]( in_height, kernel_shape_h) expected_out_width = _PADDINGS_EXPECTED_SHAPE_TRANSFORMS[padding_width]( in_width, kernel_shape_w) if data_format == conv.DATA_FORMAT_NHWC: self.assertTrue(output.get_shape().is_compatible_with( [batch_size, expected_out_height, expected_out_width, out_channels])) else: # NCHW self.assertTrue(output.get_shape().is_compatible_with( [batch_size, out_channels, expected_out_height, expected_out_width])) self.assertTrue( conv1.w.get_shape().is_compatible_with( [kernel_shape_h, kernel_shape_w, in_channels, out_channels])) if use_bias: self.assertTrue( conv1.b.get_shape().is_compatible_with( [out_channels])) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testShapesNotKnown(self, use_bias): """The generated shapes are correct when input shape not known.""" batch_size = 5 in_height = in_width = 32 in_channels = out_channels = 5 kernel_shape_h = kernel_shape_w = 3 inputs = tf.placeholder( tf.float32, shape=[None, None, None, in_channels], name="inputs") conv1 = snt.Conv2D( name="conv1", output_channels=out_channels, kernel_shape=[kernel_shape_h, kernel_shape_w], padding=snt.SAME, stride=1, use_bias=use_bias) output = conv1(inputs) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() output_eval = output.eval({ inputs: np.zeros([batch_size, in_height, in_width, in_channels])}) self.assertEqual( output_eval.shape, (batch_size, in_height, in_width, out_channels)) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testShapesNotKnownAtrous(self, use_bias): """No error is thrown if image shape isn't known for atrous convolution.""" inputs = tf.placeholder( tf.float32, shape=[None, None, None, 5], name="inputs") conv1 = snt.Conv2D( name="conv1", output_channels=5, kernel_shape=[3, 3], padding=snt.SAME, stride=1, rate=2, use_bias=use_bias) conv1(inputs) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testKernelShape(self, use_bias): """Errors are thrown for invalid kernel shapes.""" snt.Conv2D(output_channels=10, kernel_shape=[3, 4], name="conv1", use_bias=use_bias) snt.Conv2D(output_channels=10, kernel_shape=3, name="conv1", use_bias=use_bias) err = "Invalid kernel shape" with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): snt.Conv2D(output_channels=10, kernel_shape=[3, 3, 3], name="conv1") @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testStrideError(self, use_bias): """Errors are thrown for invalid strides.""" snt.Conv2D( output_channels=10, kernel_shape=3, stride=1, name="conv1", use_bias=use_bias) snt.Conv2D( output_channels=10, kernel_shape=3, stride=[1, 1], name="conv1", use_bias=use_bias) snt.Conv2D( output_channels=10, kernel_shape=3, stride=[1, 1, 1, 1], name="conv1", use_bias=use_bias) with self.assertRaisesRegexp(snt.IncompatibleShapeError, "Invalid stride"): snt.Conv2D(output_channels=10, kernel_shape=3, stride=[1, 1, 1], name="conv1") @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testRateError(self, use_bias): """Errors are thrown for invalid dilation rates.""" snt.Conv2D( output_channels=10, kernel_shape=3, rate=1, name="conv1", use_bias=use_bias) snt.Conv2D( output_channels=10, kernel_shape=3, rate=2, name="conv1", use_bias=use_bias) for rate in [0, 0.5, -1]: with self.assertRaisesRegexp(snt.IncompatibleShapeError, "Invalid rate shape*"): snt.Conv2D(output_channels=10, kernel_shape=3, rate=rate, name="conv1") @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testRateAndStrideError(self, use_bias): """Errors are thrown for stride > 1 when using atrous convolution.""" err = "Cannot have stride > 1 with rate > 1" with self.assertRaisesRegexp(snt.NotSupportedError, err): snt.Conv2D(output_channels=10, kernel_shape=3, stride=2, rate=2, name="conv1", use_bias=use_bias) with self.assertRaisesRegexp(snt.NotSupportedError, err): snt.Conv2D(output_channels=10, kernel_shape=3, stride=[2, 1], rate=2, name="conv1", use_bias=use_bias) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testInputTypeError(self, use_bias): """Errors are thrown for invalid input types.""" conv1 = snt.Conv2D(output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, name="conv1", use_bias=use_bias, initializers=create_constant_initializers( 1.0, 1.0, use_bias)) for dtype in (tf.uint32, tf.uint64): x = tf.constant(np.ones([1, 5, 5, 1]), dtype=dtype) err = "Input must have dtype tf.float.*" with self.assertRaisesRegexp(TypeError, err): conv1(x) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testInitializers(self, use_bias): """Test initializers work as expected.""" w = random.random() b = random.random() conv1 = snt.Conv2D( output_channels=1, kernel_shape=3, stride=1, name="conv1", use_bias=use_bias, initializers=create_constant_initializers(w, b, use_bias)) conv1(tf.placeholder(tf.float32, [1, 10, 10, 2])) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose( conv1.w.eval(), np.full([3, 3, 2, 1], w, dtype=np.float32)) if use_bias: self.assertAllClose( conv1.b.eval(), [b]) err = "Initializer for 'w' is not a callable function or dictionary" with self.assertRaisesRegexp(TypeError, err): snt.Conv2D(output_channels=10, kernel_shape=3, stride=1, name="conv1", initializers={"w": tf.ones([])}) def testInitializerMutation(self): """Test that initializers are not mutated.""" initializers = {"b": tf.constant_initializer(0)} initializers_copy = dict(initializers) conv1 = snt.Conv2D( output_channels=1, kernel_shape=3, stride=1, name="conv1", initializers=initializers) conv1(tf.placeholder(tf.float32, [1, 10, 10, 2])) self.assertAllEqual(initializers, initializers_copy) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testRegularizersInRegularizationLosses(self, use_bias): regularizers = create_regularizers(use_bias, contrib_layers.l1_regularizer(scale=0.5)) conv1 = snt.Conv2D( output_channels=1, kernel_shape=3, stride=1, regularizers=regularizers, use_bias=use_bias, name="conv1") conv1(tf.placeholder(tf.float32, [1, 10, 10, 2])) graph_regularizers = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) self.assertRegexpMatches(graph_regularizers[0].name, ".*l1_regularizer.*") if use_bias: self.assertRegexpMatches(graph_regularizers[1].name, ".*l1_regularizer.*") @parameterized.parameters(*itertools.product( [True, False], # use_bias [snt.CONSTANT_PADDING], # padding_value # (padding_h, padding_w, expected_out): [(snt.VALID, snt.VALID, [[9, 9, 9], [9, 9, 9], [9, 9, 9]]), (snt.SAME, snt.SAME, [[4, 6, 6, 6, 4], [6, 9, 9, 9, 6], [6, 9, 9, 9, 6], [6, 9, 9, 9, 6], [4, 6, 6, 6, 4]]), (snt.CAUSAL, snt.CAUSAL, [[1, 2, 3, 3, 3], [2, 4, 6, 6, 6], [3, 6, 9, 9, 9], [3, 6, 9, 9, 9], [3, 6, 9, 9, 9]]), (snt.REVERSE_CAUSAL, snt.REVERSE_CAUSAL, [[9, 9, 9, 6, 3], [9, 9, 9, 6, 3], [9, 9, 9, 6, 3], [6, 6, 6, 4, 2], [3, 3, 3, 2, 1]]), (snt.FULL, snt.FULL, [[1, 2, 3, 3, 3, 2, 1], [2, 4, 6, 6, 6, 4, 2], [3, 6, 9, 9, 9, 6, 3], [3, 6, 9, 9, 9, 6, 3], [3, 6, 9, 9, 9, 6, 3], [2, 4, 6, 6, 6, 4, 2], [1, 2, 3, 3, 3, 2, 1]]), (snt.CAUSAL, snt.FULL, [[1, 2, 3, 3, 3, 2, 1], [2, 4, 6, 6, 6, 4, 2], [3, 6, 9, 9, 9, 6, 3], [3, 6, 9, 9, 9, 6, 3], [3, 6, 9, 9, 9, 6, 3]]), (snt.SAME, snt.REVERSE_CAUSAL, [[6, 6, 6, 4, 2], [9, 9, 9, 6, 3], [9, 9, 9, 6, 3], [9, 9, 9, 6, 3], [6, 6, 6, 4, 2]])], )) def testComputation(self, use_bias, padding_value, padding_and_expected_out): """Run through for something with a known answer using different args.""" padding_h, padding_w, expected_out = padding_and_expected_out conv1 = snt.Conv2D( output_channels=1, kernel_shape=3, stride=1, padding=(padding_h, padding_w), padding_value=padding_value, name="conv1", use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32))) out = tf.squeeze(out, axis=(0, 3)) expected_out = np.asarray(expected_out, dtype=np.float32) if use_bias: expected_out += 1 with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose(out.eval(), expected_out) @parameterized.parameters(*itertools.product( # (padding_h, padding_w, padding_value, expected_out): # pylint:disable=bad-whitespace [(snt.VALID, snt.VALID, snt.CONSTANT_PADDING, [[26, 28, 23], [29, 31, 26], [25, 27, 29]]), (snt.VALID, snt.VALID, snt.REFLECT_PADDING, [[26, 28, 23], [29, 31, 26], [25, 27, 29]]), (snt.VALID, snt.VALID, snt.SYMMETRIC_PADDING, [[26, 28, 23], [29, 31, 26], [25, 27, 29]]), (snt.SAME, snt.SAME, snt.CONSTANT_PADDING, [[12, 14, 13, 12, 10], [19, 26, 28, 23, 16], [21, 29, 31, 26, 18], [16, 25, 27, 29, 20], [ 9, 13, 12, 18, 14]]), (snt.SAME, snt.SAME, snt.REFLECT_PADDING, [[36, 25, 20, 15, 18], [30, 26, 28, 23, 26], [33, 29, 31, 26, 29], [22, 25, 27, 29, 32], [16, 19, 21, 30, 33]]), (snt.SAME, snt.SAME, snt.SYMMETRIC_PADDING, [[18, 17, 19, 21, 27], [27, 26, 28, 23, 22], [30, 29, 31, 26, 25], [26, 25, 27, 29, 28], [28, 20, 15, 24, 30]]), (snt.REVERSE_CAUSAL, snt.REVERSE_CAUSAL, snt.CONSTANT_PADDING, [[26, 28, 23, 16, 6], [29, 31, 26, 18, 7], [25, 27, 29, 20, 8], [13, 12, 18, 14, 8], [ 7, 3, 6, 5, 3]]), (snt.REVERSE_CAUSAL, snt.REVERSE_CAUSAL, snt.REFLECT_PADDING, [[26, 28, 23, 26, 23], [29, 31, 26, 29, 26], [25, 27, 29, 32, 29], [19, 21, 30, 33, 30], [25, 27, 29, 32, 29]]), (snt.REVERSE_CAUSAL, snt.REVERSE_CAUSAL, snt.SYMMETRIC_PADDING, [[26, 28, 23, 22, 22], [29, 31, 26, 25, 25], [25, 27, 29, 28, 28], [20, 15, 24, 30, 30], [20, 15, 24, 30, 30]]), (snt.FULL, snt.FULL, snt.CONSTANT_PADDING, [[ 0, 1, 3, 6, 9, 7, 4], [ 5, 12, 14, 13, 12, 10, 6], [ 8, 19, 26, 28, 23, 16, 6], [ 9, 21, 29, 31, 26, 18, 7], [10, 16, 25, 27, 29, 20, 8], [ 7, 9, 13, 12, 18, 14, 8], [ 6, 6, 7, 3, 6, 5, 3]]), (snt.FULL, snt.FULL, snt.REFLECT_PADDING, [[26, 30, 26, 28, 23, 26, 23], [25, 36, 25, 20, 15, 18, 15], [26, 30, 26, 28, 23, 26, 23], [29, 33, 29, 31, 26, 29, 26], [25, 22, 25, 27, 29, 32, 29], [19, 16, 19, 21, 30, 33, 30], [25, 22, 25, 27, 29, 32, 29]]), (snt.FULL, snt.FULL, snt.SYMMETRIC_PADDING, [[18, 18, 17, 19, 21, 27, 27], [18, 18, 17, 19, 21, 27, 27], [27, 27, 26, 28, 23, 22, 22], [30, 30, 29, 31, 26, 25, 25], [26, 26, 25, 27, 29, 28, 28], [28, 28, 20, 15, 24, 30, 30], [28, 28, 20, 15, 24, 30, 30]]), (snt.CAUSAL, snt.CAUSAL, snt.CONSTANT_PADDING, [[ 0, 1, 3, 6, 9], [ 5, 12, 14, 13, 12], [ 8, 19, 26, 28, 23], [ 9, 21, 29, 31, 26], [10, 16, 25, 27, 29]]), (snt.CAUSAL, snt.CAUSAL, snt.REFLECT_PADDING, [[26, 30, 26, 28, 23], [25, 36, 25, 20, 15], [26, 30, 26, 28, 23], [29, 33, 29, 31, 26], [25, 22, 25, 27, 29]]), (snt.CAUSAL, snt.CAUSAL, snt.SYMMETRIC_PADDING, [[18, 18, 17, 19, 21], [18, 18, 17, 19, 21], [27, 27, 26, 28, 23], [30, 30, 29, 31, 26], [26, 26, 25, 27, 29]]), ], )) def testPaddingValues(self, padding_and_expected_out): """Run through for something with a known answer using different args.""" padding_h, padding_w, padding_value, expected_out = padding_and_expected_out conv1 = snt.Conv2D( output_channels=1, kernel_shape=3, stride=1, padding=(padding_h, padding_w), padding_value=padding_value, name="conv1", use_bias=False, initializers=create_constant_initializers(1.0, 1.0, False)) # Make a less trivial input test tensor. in_tensor = np.arange(5 * 5).reshape([1, 5, 5, 1]) # Do modulo 7 to not have large output values. in_tensor = in_tensor % 7 out = conv1(tf.constant(in_tensor, dtype=np.float32)) out = tf.squeeze(out, axis=(0, 3)) expected_out = np.asarray(expected_out, dtype=np.float32) with self.test_session(): tf.variables_initializer([conv1.w]).run() self.assertAllClose(out.eval(), expected_out) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testSharing(self, use_bias): """Sharing is working.""" conv1 = snt.Conv2D( output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, use_bias=use_bias, name="conv1") x = np.random.randn(1, 5, 5, 1) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose( out1.eval(), out2.eval()) # Now change the weights w = np.random.randn(3, 3, 1, 1) conv1.w.assign(w).eval() self.assertAllClose( out1.eval(), out2.eval()) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testAtrousConvValid(self, use_bias): """The atrous conv is constructed and applied correctly with snt.VALID.""" conv1 = snt.Conv2D( output_channels=1, kernel_shape=3, stride=1, rate=2, padding=snt.VALID, name="conv1", use_bias=use_bias, initializers=create_constant_initializers(1.0, 0.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32))) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose(np.reshape(out.eval(), [1, 1]), [[9]]) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testAtrousConvSame(self, use_bias): """The atrous conv 2D is constructed and applied correctly with SAME.""" conv1 = snt.Conv2D( output_channels=1, kernel_shape=3, stride=1, rate=2, padding=snt.SAME, name="conv1", use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32))) expected_out = np.array([[5, 5, 7, 5, 5], [5, 5, 7, 5, 5], [7, 7, 10, 7, 7], [5, 5, 7, 5, 5], [5, 5, 7, 5, 5]]) if not use_bias: expected_out -= 1 with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose(np.reshape(out.eval(), [5, 5]), expected_out) def testClone(self): net = snt.Conv2D(name="conv2d", output_channels=4, kernel_shape=3, stride=5) clone1 = net.clone() clone2 = net.clone(name="clone2") input_to_net = tf.placeholder(tf.float32, shape=[None, 100, 100, 3]) net_out = net(input_to_net) clone1_out = clone1(input_to_net) clone2_out = clone2(input_to_net) all_vars = tf.trainable_variables() net_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope=net.variable_scope.name + "/") clone1_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope=clone1.variable_scope.name + "/") clone2_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope=clone2.variable_scope.name + "/") self.assertEqual(net.output_channels, clone1.output_channels) self.assertEqual(net.module_name + "_clone", clone1.module_name) self.assertEqual("clone2", clone2.module_name) self.assertLen(all_vars, 3*len(net_vars)) self.assertLen(net_vars, len(clone1_vars)) self.assertLen(net_vars, len(clone2_vars)) self.assertEqual(net_out.get_shape().as_list(), clone1_out.get_shape().as_list()) self.assertEqual(net_out.get_shape().as_list(), clone2_out.get_shape().as_list()) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testTransposition(self, use_bias): """Tests if the correct output shapes are setup in transposed module.""" net = snt.Conv2D(name="conv2d", output_channels=4, kernel_shape=3, stride=1, use_bias=use_bias) net_transpose = net.transpose() input_to_net = tf.placeholder(tf.float32, shape=[None, 100, 100, 3]) err = "Variables in {} not instantiated yet, __call__ the module first." with self.assertRaisesRegexp(snt.NotConnectedError, err.format(net.scope_name)): net_transpose(input_to_net) net_transpose = net.transpose(name="another_net_transpose") net_out = net(input_to_net) net_transposed_output = net_transpose(net_out) self.assertAllEqual(net_transposed_output.get_shape().as_list(), input_to_net.get_shape().as_list()) def testMask1D(self): """1D Masks are applied properly.""" mask = np.ones((3,), dtype=np.float32) inputs = tf.constant(1.0, shape=(1, 5, 5, 2)) conv1 = snt.Conv2D( output_channels=1, kernel_shape=3, mask=mask, padding=snt.VALID, use_bias=False, initializers=create_constant_initializers(1.0, 0.0, use_bias=False)) out = conv1(inputs) expected_out = 18 * np.ones((1, 3, 3, 1)) with self.test_session(): tf.variables_initializer([conv1.w]).run() self.assertAllClose(out.eval(), expected_out) def testMask2D(self): """2D Masks are applied properly.""" # This mask, applied on an image filled with 1, should result in an image # filled with 8 (since we sum 4 elements per channel and there are 2 input # channels). mask = np.array([[1, 1, 1], [1, 0, 0], [0, 0, 0]], dtype=np.float32) inputs = tf.constant(1.0, shape=(1, 5, 5, 2)) conv1 = snt.Conv2D( output_channels=1, kernel_shape=3, mask=mask, padding=snt.VALID, use_bias=False, initializers=create_constant_initializers(1.0, 0.0, use_bias=False)) out = conv1(inputs) expected_out = np.array([[8] * 3] * 3) with self.test_session(): tf.variables_initializer([conv1.w]).run() self.assertAllClose(np.reshape(out.eval(), [3, 3]), expected_out) def testMask3D(self): """3D Masks are applied properly.""" mask = np.ones((3, 3, 2), dtype=np.float32) inputs = tf.constant(1.0, shape=(1, 5, 5, 2)) conv1 = snt.Conv2D( output_channels=1, kernel_shape=3, mask=mask, padding=snt.VALID, use_bias=False, initializers=create_constant_initializers(1.0, 0.0, use_bias=False)) out = conv1(inputs) expected_out = 18 * np.ones((1, 3, 3, 1)) with self.test_session(): tf.variables_initializer([conv1.w]).run() self.assertAllClose(out.eval(), expected_out) def testMask4D(self): """4D Masks are applied properly.""" # This mask, applied on an image filled with 1, should result in an image # filled with 17, as there are 18 weights but we zero out one of them. mask = np.ones([3, 3, 2, 1], dtype=np.float32) mask[0, 0, 0, :] = 0 inputs = tf.constant(1.0, shape=(1, 5, 5, 2)) conv1 = snt.Conv2D( output_channels=1, kernel_shape=3, mask=mask, padding=snt.VALID, use_bias=False, initializers=create_constant_initializers(1.0, 0.0, use_bias=False)) out = conv1(inputs) expected_out = np.array([[17] * 3] * 3) with self.test_session(): tf.variables_initializer([conv1.w]).run() self.assertAllClose(np.reshape(out.eval(), [3, 3]), expected_out) def testMaskErrorIncompatibleRank1(self): """Errors are thrown for incompatible rank 1 mask.""" np_mask = np.ones((3,)) x = tf.constant(0.0, shape=(2, 8, 8, 6)) # Test with both numpy arrays and Tensors. for mask in (np_mask, tf.convert_to_tensor(np_mask)): with self.assertRaises(snt.Error) as cm: snt.Conv2D(output_channels=4, kernel_shape=5, mask=mask)(x) self.assertTrue(str(cm.exception).startswith( "Invalid mask shape: {}".format(np_mask.shape))) def testMaskErrorIncompatibleRank2(self): """Errors are thrown for incompatible rank 2 mask.""" np_mask = np.ones((3, 3)) x = tf.constant(0.0, shape=(2, 8, 8, 6)) # Test with both numpy arrays and Tensors. for mask in (np_mask, tf.convert_to_tensor(np_mask)): with self.assertRaises(snt.Error) as cm: snt.Conv2D(output_channels=4, kernel_shape=5, mask=mask)(x) self.assertTrue(str(cm.exception).startswith( "Invalid mask shape: {}".format(np_mask.shape))) def testMaskErrorIncompatibleRank3(self): """Errors are thrown for incompatible rank 3 mask.""" np_mask = np.ones((2, 4, 4)) x = tf.constant(0.0, shape=(2, 8, 8, 6)) # Test with both numpy arrays and Tensors. for mask in (np_mask, tf.convert_to_tensor(np_mask)): with self.assertRaises(snt.Error) as cm: snt.Conv2D(output_channels=4, kernel_shape=5, mask=mask)(x) self.assertTrue(str(cm.exception).startswith( "Invalid mask shape: {}".format(np_mask.shape))) def testMaskErrorIncompatibleRank4(self): """Errors are thrown for incompatible rank 4 mask.""" np_mask = np.ones((3, 3, 4, 5)) x = tf.constant(0.0, shape=(2, 8, 8, 6)) # Test with both numpy arrays and Tensors. for mask in (np_mask, tf.convert_to_tensor(np_mask)): with self.assertRaises(snt.Error) as cm: snt.Conv2D(output_channels=4, kernel_shape=5, mask=mask)(x) self.assertTrue(str(cm.exception).startswith( "Invalid mask shape: {}".format(np_mask.shape))) def testMaskErrorIfIncorrectDtype(self): """Errors are thrown when a Tensor with incorrect dtype is used.""" mask = tf.constant(0, shape=(4, 4), dtype=tf.int32) x = tf.constant(0.0, shape=(2, 8, 8, 6)) with self.assertRaises(TypeError) as cm: snt.Conv2D(output_channels=4, kernel_shape=(4, 4), mask=mask)(x) self.assertTrue(str(cm.exception).startswith( "Mask needs to have dtype float16, bfloat16, float32 or float64")) def testDataFormatNotSupported(self): """Errors are thrown when an unsupported data_format is used.""" x = tf.constant(0.0, shape=(2, 8, 8, 6)) data_format = "NWCH" self.assertNotIn(data_format, conv.SUPPORTED_2D_DATA_FORMATS) with self.assertRaisesRegexp(ValueError, "Invalid data_format"): snt.Conv2D(output_channels=4, kernel_shape=(4, 4), data_format=data_format)(x) class Conv2DTransposeTest(parameterized.TestCase, tf.test.TestCase): def setUp(self): """Set up some variables to re-use in multiple tests.""" super(Conv2DTransposeTest, self).setUp() self.batch_size = 100 self.in_height = 32 self.in_width = 32 self.in_channels = 3 self.out_channels = 10 self.kernel_shape_h = 5 self.kernel_shape_w = 5 self.strides = (1, 1, 1, 1) self.padding = snt.SAME self.in_shape = (self.batch_size, self.in_height, self.in_width, self.in_channels) self.out_shape = (self.in_height, self.in_width) self.kernel_shape = (self.kernel_shape_h, self.kernel_shape_w) self.kernel_shape2 = (self.kernel_shape_h, self.kernel_shape_w, self.out_channels, self.in_channels) def testKernelsNotSpecified(self): """Tests error is raised if kernel shape is not specified.""" with self.assertRaisesRegexp(ValueError, "`kernel_shape` cannot be None."): snt.Conv2DTranspose(output_channels=1) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testOutputShapeConsistency(self, use_bias): """Tests if output shapes are valid.""" # When padding is SAME, then the actual number of padding pixels can be # computed as: pad = kernel_shape - strides + (-input_shape % strides) # = 5 - 1 + (- 32 % 1) = 4 # The formula for the minimal size is: # oH = strides[1] * (in_height - 1) - padding + kernel_shape_h # oH = 1 * ( 32 - 1) - 4 + 5 = 32 # The formula for the maximum size (due to extra pixels) is: # oH_max = oH + strides[1] - 1 # so, for strides = 1 and padding = SAME, input size == output size. inputs = tf.placeholder(tf.float32, shape=self.in_shape) conv1 = snt.Conv2DTranspose(name="conv2d_1", output_channels=self.out_channels, output_shape=self.out_shape, kernel_shape=self.kernel_shape, padding=self.padding, stride=1, use_bias=use_bias) outputs = conv1(inputs) self.assertTrue(outputs.get_shape().is_compatible_with(( self.batch_size,) + self.out_shape + (self.out_channels,))) self.assertTrue(conv1.w.get_shape().is_compatible_with(self.kernel_shape2)) if use_bias: self.assertTrue(conv1.b.get_shape().is_compatible_with( [self.out_channels])) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testOutputShapeInteger(self, use_bias): """Tests if output shapes are valid when specified as an integer.""" inputs = tf.zeros(shape=[3, 5, 5, 2], dtype=tf.float32) inputs_2 = tf.zeros(shape=[3, 5, 7, 2], dtype=tf.float32) conv1 = snt.Conv2DTranspose(name="conv2d_1", output_channels=10, output_shape=tf.Dimension(10), kernel_shape=5, padding=snt.SAME, stride=2, use_bias=use_bias) outputs = conv1(inputs) outputs_2 = conv1(inputs_2) self.assertTrue(outputs.get_shape().is_compatible_with((3, 10, 10, 10))) with self.test_session() as sess: tf.global_variables_initializer().run() sess.run(outputs) with self.assertRaises(tf.errors.InvalidArgumentError): sess.run(outputs_2) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testTransposition(self, use_bias): """Tests if the correct output shapes are setup in transposed module.""" net = snt.Conv2DTranspose(name="conv2d", output_channels=self.out_channels, output_shape=self.out_shape, kernel_shape=self.kernel_shape, padding=self.padding, stride=1, use_bias=use_bias) net_transpose = net.transpose() input_to_net = tf.placeholder(tf.float32, shape=self.in_shape) err = "Variables in {} not instantiated yet, __call__ the module first." with self.assertRaisesRegexp(snt.NotConnectedError, err.format(net.scope_name)): net_transpose(input_to_net) net_transpose = net.transpose(name="another_net_transpose") net_out = net(input_to_net) net_transposed_output = net_transpose(net_out) self.assertEqual(net_transposed_output.get_shape(), input_to_net.get_shape()) def testInitializerMutation(self): """Test that initializers are not mutated.""" initializers = {"b": tf.constant_initializer(0)} initializers_copy = dict(initializers) conv1 = snt.Conv2DTranspose( output_shape=(10, 10), output_channels=1, kernel_shape=3, stride=1, name="conv2d", initializers=initializers) conv1(tf.placeholder(tf.float32, [1, 10, 10, 2])) self.assertAllEqual(initializers, initializers_copy) @parameterized.named_parameters( ("WithBiasWithOutputShape", True, True), ("WithBiasWithoutOutputShape", True, False), ("WithoutBiasWithOutputShape", False, True), ("WithoutBiasWithoutOutputShape", False, False)) def testTransposeNHWC(self, use_bias, use_output_shape): """Test transpose for NHWC format.""" output_shape = tf.TensorShape((4, 5)) conv2_transpose = snt.Conv2DTranspose( output_channels=5, output_shape=output_shape if use_output_shape else None, kernel_shape=3, padding=snt.VALID, stride=1, name="conv2_transpose", use_bias=use_bias, data_format=conv.DATA_FORMAT_NHWC) conv2 = conv2_transpose.transpose() # Check kernel shapes, strides and padding match. self.assertEqual(conv2_transpose.kernel_shape, conv2.kernel_shape) self.assertEqual((1,) + conv2_transpose.stride[1:3] + (1,), conv2.stride) self.assertEqual(conv2_transpose.conv_op_padding, conv2.conv_op_padding) # Before conv2_transpose is connected, we cannot know how many # `output_channels` conv1 should have. err = "Variables in conv2_transpose not instantiated yet" with self.assertRaisesRegexp(snt.NotConnectedError, err): _ = conv2.output_channels # After connection the number of `output_channels` is known. batch_size = 32 in_height = 2 in_width = 3 in_channels = 4 x = tf.constant(np.random.randn(batch_size, in_height, in_width, in_channels), dtype=np.float32) conv2_transpose(x) self.assertEqual(in_channels, conv2.output_channels) # As is `output_channels`. self.assertEqual(output_shape, conv2_transpose.output_shape) # However, even after connection, the `input_shape` of the forward # convolution is not known until it is itself connected (i.e. it can be # connected to a different shape input from the `output_shape` of the # transpose convolution!) err = "Variables in conv2_transpose_transpose not instantiated yet" with self.assertRaisesRegexp(snt.NotConnectedError, err): _ = conv2.input_shape @parameterized.named_parameters( ("WithBiasWithOutputShape", True, True), ("WithBiasWithoutOutputShape", True, False), ("WithoutBiasWithOutputShape", False, True), ("WithoutBiasWithoutOutputShape", False, False)) def testTransposeNCHW(self, use_bias, use_output_shape): """Test transpose for NCHW format.""" output_shape = tf.TensorShape((4, 5)) conv2_transpose = snt.Conv2DTranspose( output_channels=5, output_shape=output_shape if use_output_shape else None, kernel_shape=3, padding=snt.VALID, stride=1, name="conv2_transpose", use_bias=use_bias, data_format=conv.DATA_FORMAT_NCHW) conv2 = conv2_transpose.transpose() # Check kernel shapes, strides and padding match. self.assertEqual(conv2_transpose.kernel_shape, conv2.kernel_shape) self.assertEqual((1,) + conv2_transpose.stride[1:3] + (1,), conv2.stride) self.assertEqual(conv2_transpose.conv_op_padding, conv2.conv_op_padding) # Before conv2_transpose is connected, we cannot know how many # `output_channels` conv1 should have. err = "Variables in conv2_transpose not instantiated yet" with self.assertRaisesRegexp(snt.NotConnectedError, err): _ = conv2.output_channels # After connection the number of `output_channels` is known. batch_size = 32 in_height = 2 in_width = 3 in_channels = 4 x = tf.constant(np.random.randn(batch_size, in_channels, in_height, in_width), dtype=np.float32) conv2_transpose(x) self.assertEqual(in_channels, conv2.output_channels) # As is `output_channels`. self.assertEqual(output_shape, conv2_transpose.output_shape) # However, even after connection, the `input_shape` of the forward # convolution is not known until it is itself connected (i.e. it can be # connected to a different shape input from the `output_shape` of the # transpose convolution!) err = "Variables in conv2_transpose_transpose not instantiated yet" with self.assertRaisesRegexp(snt.NotConnectedError, err): _ = conv2.input_shape class Conv1DTest(parameterized.TestCase, tf.test.TestCase): @parameterized.parameters(*itertools.product( [True, False], # use_bias conv.SUPPORTED_1D_DATA_FORMATS, # data_format conv.ALLOWED_PADDINGS # padding )) def testShapes(self, use_bias, data_format, padding): """The generated shapes are correct with SAME and VALID padding.""" batch_size = random.randint(1, 100) in_length = random.randint(10, 288) in_channels = random.randint(1, 10) out_channels = random.randint(1, 32) kernel_shape = random.randint(1, 10) if data_format == conv.DATA_FORMAT_NWC: inputs = tf.placeholder( tf.float32, shape=[batch_size, in_length, in_channels]) else: # NCW inputs = tf.placeholder( tf.float32, shape=[batch_size, in_channels, in_length]) conv1 = snt.Conv1D( output_channels=out_channels, kernel_shape=kernel_shape, padding=padding, stride=1, data_format=data_format, name="conv1", use_bias=use_bias) output = conv1(inputs) expected_out_length = _PADDINGS_EXPECTED_SHAPE_TRANSFORMS[padding]( in_length, kernel_shape) if data_format == conv.DATA_FORMAT_NWC: self.assertTrue( output.get_shape().is_compatible_with( [batch_size, expected_out_length, out_channels])) else: # NCW self.assertTrue( output.get_shape().is_compatible_with( [batch_size, out_channels, expected_out_length])) self.assertTrue( conv1.w.get_shape().is_compatible_with( [kernel_shape, in_channels, out_channels])) if use_bias: self.assertTrue( conv1.b.get_shape().is_compatible_with( [out_channels])) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testShapesNotKnown(self, use_bias): """The generated shapes are correct when input shape not known.""" batch_size = 5 in_length = 32 in_channels = out_channels = 5 kernel_shape = 3 inputs = tf.placeholder( tf.float32, shape=[None, None, in_channels], name="inputs") conv1 = snt.Conv1D( name="conv1", output_channels=out_channels, kernel_shape=kernel_shape, padding=snt.SAME, stride=1, use_bias=use_bias) output = conv1(inputs) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() output_eval = output.eval({ inputs: np.zeros([batch_size, in_length, in_channels])}) self.assertEqual( output_eval.shape, (batch_size, in_length, out_channels)) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testKernelShape(self, use_bias): """Errors are thrown for invalid kernel shapes.""" snt.Conv1D(output_channels=10, kernel_shape=[3], name="conv1", use_bias=use_bias) snt.Conv1D(output_channels=10, kernel_shape=3, name="conv1", use_bias=use_bias) err = "Invalid kernel shape" with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): snt.Conv1D(output_channels=10, kernel_shape=[3, 3], name="conv1") @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testStrideError(self, use_bias): """Errors are thrown for invalid strides.""" snt.Conv1D( output_channels=10, kernel_shape=3, stride=1, name="conv1", use_bias=use_bias) err = "Invalid stride" with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): snt.Conv1D(output_channels=10, kernel_shape=3, stride=[1, 1], name="conv1") with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): snt.Conv1D(output_channels=10, kernel_shape=3, stride=[1, 1, 1, 1], name="conv1") @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testRateError(self, use_bias): """Errors are thrown for invalid dilation rates.""" snt.Conv1D( output_channels=10, kernel_shape=3, rate=1, name="conv1", use_bias=use_bias) snt.Conv1D( output_channels=10, kernel_shape=3, rate=2, name="conv1", use_bias=use_bias) for rate in [0, 0.5, -1]: with self.assertRaisesRegexp(snt.IncompatibleShapeError, "Invalid rate shape*"): snt.Conv1D(output_channels=10, kernel_shape=3, rate=rate, name="conv1") @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testRateAndStrideError(self, use_bias): """Errors are thrown for stride > 1 when using atrous convolution.""" err = "Cannot have stride > 1 with rate > 1" with self.assertRaisesRegexp(snt.NotSupportedError, err): snt.Conv1D(output_channels=10, kernel_shape=3, stride=2, rate=2, name="conv1", use_bias=use_bias) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testInputTypeError(self, use_bias): """Errors are thrown for invalid input types.""" conv1 = snt.Conv1D(output_channels=1, kernel_shape=3, stride=1, padding=snt.VALID, use_bias=use_bias, name="conv1", initializers=create_constant_initializers( 1.0, 1.0, use_bias)) for dtype in (tf.uint32, tf.uint64): x = tf.constant(np.ones([1, 5, 1]), dtype=dtype) err = "Input must have dtype tf.float.*" with self.assertRaisesRegexp(TypeError, err): conv1(x) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testInitializers(self, use_bias): """Test initializers work as expected.""" w = random.random() b = random.random() conv1 = snt.Conv1D( output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, use_bias=use_bias, name="conv1", initializers=create_constant_initializers(w, b, use_bias)) conv1(tf.placeholder(tf.float32, [1, 10, 2])) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose( conv1.w.eval(), np.full([3, 2, 1], w, dtype=np.float32)) if use_bias: self.assertAllClose( conv1.b.eval(), [b]) err = "Initializer for 'w' is not a callable function or dictionary" with self.assertRaisesRegexp(TypeError, err): snt.Conv1D(output_channels=10, kernel_shape=3, stride=1, padding=snt.SAME, use_bias=use_bias, name="conv1", initializers={"w": tf.ones([])}) def testInitializerMutation(self): """Test that initializers are not mutated.""" initializers = {"b": tf.constant_initializer(0)} initializers_copy = dict(initializers) conv1 = snt.Conv1D( output_channels=1, kernel_shape=3, stride=1, name="conv1", initializers=initializers) conv1(tf.placeholder(tf.float32, [1, 10, 2])) self.assertAllEqual(initializers, initializers_copy) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testRegularizersInRegularizationLosses(self, use_bias): regularizers = create_regularizers(use_bias, contrib_layers.l1_regularizer(scale=0.5)) conv1 = snt.Conv1D( output_channels=1, kernel_shape=3, stride=1, regularizers=regularizers, name="conv1") conv1(tf.placeholder(tf.float32, [1, 10, 2])) graph_regularizers = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) self.assertRegexpMatches(graph_regularizers[0].name, ".*l1_regularizer.*") if use_bias: self.assertRegexpMatches(graph_regularizers[1].name, ".*l1_regularizer.*") @parameterized.parameters(*itertools.product( [True, False], # use_bias [(snt.VALID, [3, 3, 3]), (snt.SAME, [2, 3, 3, 3, 2]), (snt.CAUSAL, [1, 2, 3, 3, 3]), (snt.FULL, [1, 2, 3, 3, 3, 2, 1]), (snt.REVERSE_CAUSAL, [3, 3, 3, 2, 1])] # (padding, expected_out) )) def testComputation(self, use_bias, padding_and_expected_out): """Run through for something with a known answer using different args.""" padding, expected_out = padding_and_expected_out conv1 = snt.Conv1D( output_channels=1, kernel_shape=3, stride=1, padding=padding, use_bias=use_bias, name="conv1", initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 1], dtype=np.float32))) expected_out = np.asarray(expected_out, dtype=np.float32) if use_bias: expected_out += 1 with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose(out.eval().flatten(), expected_out) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testSharing(self, use_bias): """Sharing is working.""" conv1 = snt.Conv1D( output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, use_bias=use_bias, name="conv1") x = np.random.randn(1, 5, 1) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose( out1.eval(), out2.eval()) # Now change the weights w = np.random.randn(3, 1, 1) conv1.w.assign(w).eval() self.assertAllClose( out1.eval(), out2.eval()) def testMask1D(self): """1D Masks are applied properly.""" mask = np.ones((3,), dtype=np.float32) inputs = tf.constant(1.0, shape=(1, 5, 2)) conv1 = snt.Conv1D( output_channels=1, kernel_shape=3, mask=mask, padding=snt.VALID, use_bias=False, initializers=create_constant_initializers(1.0, 0.0, use_bias=False)) out = conv1(inputs) expected_out = np.reshape(np.array([6, 6, 6]), (1, 3, 1)) with self.test_session(): tf.variables_initializer([conv1.w]).run() self.assertAllClose(out.eval(), expected_out) def testMask2D(self): """2D Masks are applied properly.""" mask = np.ones((3, 2), dtype=np.float32) inputs = tf.constant(1.0, shape=(1, 5, 2)) conv1 = snt.Conv1D( output_channels=1, kernel_shape=3, mask=mask, padding=snt.VALID, use_bias=False, initializers=create_constant_initializers(1.0, 0.0, use_bias=False)) out = conv1(inputs) expected_out = np.reshape(np.array([6, 6, 6]), (1, 3, 1)) with self.test_session(): tf.variables_initializer([conv1.w]).run() self.assertAllClose(out.eval(), expected_out) def testMask3D(self): """3D Masks are applied properly.""" mask = np.ones((3, 2, 1), dtype=np.float32) inputs = tf.constant(1.0, shape=(1, 5, 2)) conv1 = snt.Conv1D( output_channels=1, kernel_shape=3, mask=mask, padding=snt.VALID, use_bias=False, initializers=create_constant_initializers(1.0, 0.0, use_bias=False)) out = conv1(inputs) expected_out = np.reshape(np.array([6, 6, 6]), (1, 3, 1)) with self.test_session(): tf.variables_initializer([conv1.w]).run() self.assertAllClose(out.eval(), expected_out) def testMaskErrorIncompatibleRank1(self): """Errors are thrown for incompatible rank 1 mask.""" np_mask = np.ones((3,)) x = tf.constant(0.0, shape=(2, 8, 6)) # Test with both numpy arrays and Tensors. for mask in (np_mask, tf.convert_to_tensor(np_mask)): with self.assertRaises(snt.Error) as cm: snt.Conv1D(output_channels=4, kernel_shape=5, mask=mask)(x) self.assertTrue(str(cm.exception).startswith( "Invalid mask shape: {}".format(np_mask.shape))) def testMaskErrorIncompatibleRank2(self): """Errors are thrown for incompatible rank 2 mask.""" np_mask = np.ones((3, 3)) x = tf.constant(0.0, shape=(2, 8, 6)) # Test with both numpy arrays and Tensors. for mask in (np_mask, tf.convert_to_tensor(np_mask)): with self.assertRaises(snt.Error) as cm: snt.Conv1D(output_channels=4, kernel_shape=5, mask=mask)(x) self.assertTrue(str(cm.exception).startswith( "Invalid mask shape: {}".format(np_mask.shape))) def testMaskErrorIncompatibleRank3(self): """Errors are thrown for incompatible rank 3 mask.""" np_mask = np.ones((2, 4, 4)) x = tf.constant(0.0, shape=(2, 8, 6)) # Test with both numpy arrays and Tensors. for mask in (np_mask, tf.convert_to_tensor(np_mask)): with self.assertRaises(snt.Error) as cm: snt.Conv1D(output_channels=4, kernel_shape=5, mask=mask)(x) self.assertTrue(str(cm.exception).startswith( "Invalid mask shape: {}".format(np_mask.shape))) def testMaskErrorIfIncorrectDtype(self): """Errors are thrown when a Tensor with incorrect dtype is used.""" mask = tf.constant(0, shape=(4, 4), dtype=tf.int32) x = tf.constant(0.0, shape=(2, 8, 6)) with self.assertRaises(TypeError) as cm: snt.Conv1D(output_channels=4, kernel_shape=4, mask=mask)(x) self.assertTrue(str(cm.exception).startswith( "Mask needs to have dtype float16, bfloat16, float32 or float64")) def testClone(self): net = snt.Conv1D(name="conv1d", output_channels=4, kernel_shape=3, stride=5) clone1 = net.clone() clone2 = net.clone(name="clone2") input_to_net = tf.placeholder(tf.float32, shape=[None, 100, 3]) net_out = net(input_to_net) clone1_out = clone1(input_to_net) clone2_out = clone2(input_to_net) all_vars = tf.trainable_variables() net_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope=net.variable_scope.name + "/") clone1_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope=clone1.variable_scope.name + "/") clone2_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope=clone2.variable_scope.name + "/") self.assertEqual(net.output_channels, clone1.output_channels) self.assertEqual(net.module_name + "_clone", clone1.module_name) self.assertEqual("clone2", clone2.module_name) self.assertLen(all_vars, 3*len(net_vars)) self.assertLen(net_vars, len(clone1_vars)) self.assertLen(net_vars, len(clone2_vars)) self.assertEqual(net_out.get_shape().as_list(), clone1_out.get_shape().as_list()) self.assertEqual(net_out.get_shape().as_list(), clone2_out.get_shape().as_list()) def testDataFormatNotSupported(self): """Errors are thrown when an unsupported data_format is used.""" x = tf.constant(0.0, shape=(2, 8, 6)) data_format = "WNC" self.assertNotIn(data_format, conv.SUPPORTED_1D_DATA_FORMATS) with self.assertRaisesRegexp(ValueError, "Invalid data_format"): snt.Conv1D(output_channels=4, kernel_shape=4, data_format=data_format)(x) class Conv1DTransposeTest(parameterized.TestCase, tf.test.TestCase): # Constants for use in all tests. batch_size = [10, 2, 8, 18, 23] in_length = [20, 23, 24, 15, 16] in_channels = [6, 2, 3, 6, 9] out_channels = [18, 19, 15, 32, 5] kernel_shape = [4, 10, 1, 2, 7] stride = [1, 2, 4, 7, 5] padding = [snt.SAME, snt.SAME, snt.VALID, snt.VALID, snt.VALID] use_bias = [True, False, True, False, True] out_length = [] use_output_shape = [True, False, True, False, True] for i, pad in enumerate(padding): if pad == snt.SAME: out_length.append(in_length[i] * stride[i]) elif pad == snt.VALID: out_length.append(in_length[i] * stride[i] + kernel_shape[i] - 1) in_shape = tuple(zip(batch_size, in_length, in_channels)) out_shape = tuple(out_length) kernel_shape = tuple(kernel_shape) kernel_shape2 = tuple(zip(kernel_shape, out_channels, in_channels)) stride_shape = tuple(stride) def testKernelsNotSpecified(self): """Tests error is raised if kernel shape is not specified.""" with self.assertRaisesRegexp(ValueError, "`kernel_shape` cannot be None."): snt.Conv1DTranspose(output_channels=1) @parameterized.parameters( *zip(out_channels, kernel_shape, padding, use_bias, in_shape, out_shape, stride_shape, use_output_shape)) def testMissingBatchSize(self, out_channels, kernel_shape, padding, use_bias, in_shape, out_shape, stride_shape, use_output_shape): """Check functionality with unknown batch size at build time.""" if use_output_shape: output_shape_arg = out_shape else: output_shape_arg = None conv1 = snt.Conv1DTranspose(output_channels=out_channels, output_shape=output_shape_arg, kernel_shape=kernel_shape, padding=padding, stride=stride_shape, name="conv1", use_bias=use_bias) # Pass in an image with its batch size set to `None`: image = tf.placeholder(tf.float32, shape=(None,) + in_shape[1:]) output = conv1(image) self.assertTrue(output.get_shape().is_compatible_with( [None, out_shape, out_channels])) with self.test_session() as sess: tf.global_variables_initializer().run() sess.run(output, feed_dict={image: np.zeros((10,) + in_shape[1:])}) @parameterized.parameters( *zip(batch_size, in_length, in_channels, out_length, out_channels, kernel_shape, padding, use_bias, in_shape, out_shape, stride_shape, use_output_shape)) def testShapesSame(self, batch_size, in_length, in_channels, out_length, out_channels, kernel_shape, padding, use_bias, in_shape, out_shape, stride_shape, use_output_shape): """The generated shapes are correct.""" if use_output_shape: output_shape_arg = out_shape else: output_shape_arg = None inputs = tf.placeholder( tf.float32, shape=[batch_size, in_length, in_channels]) conv1 = snt.Conv1DTranspose(output_channels=out_channels, output_shape=output_shape_arg, kernel_shape=kernel_shape, padding=padding, stride=stride_shape, name="conv1", use_bias=use_bias) output = conv1(inputs) self.assertTrue( output.get_shape().is_compatible_with( [batch_size, out_length, out_channels])) self.assertTrue( conv1.w.get_shape().is_compatible_with( [1, kernel_shape, out_channels, in_channels])) if use_bias: self.assertTrue( conv1.b.get_shape().is_compatible_with( [out_channels])) @parameterized.parameters( *zip(out_channels, padding, use_bias, in_shape, out_shape, stride_shape, use_output_shape)) def testKernelShape(self, out_channels, padding, use_bias, in_shape, out_shape, stride_shape, use_output_shape): """Errors are thrown for invalid kernel shapes.""" snt.Conv1DTranspose( output_channels=out_channels, output_shape=out_shape if use_output_shape else None, kernel_shape=[3], padding=padding, stride=stride_shape, name="conv1", use_bias=use_bias) snt.Conv1DTranspose( output_channels=out_channels, output_shape=out_shape if use_output_shape else None, kernel_shape=3, padding=padding, stride=stride_shape, name="conv1", use_bias=use_bias) err = "Invalid kernel" with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): snt.Conv1DTranspose(output_channels=out_channels, output_shape=out_shape if use_output_shape else None, kernel_shape=[3, 3], name="conv1", use_bias=use_bias) with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): snt.Conv1DTranspose(output_channels=out_channels, output_shape=out_shape if use_output_shape else None, kernel_shape=[3, 3, 3, 3], name="conv1", use_bias=use_bias) @parameterized.parameters( *zip(out_channels, padding, use_bias, in_shape, out_shape, use_output_shape)) def testStrideError(self, out_channels, padding, use_bias, in_shape, out_shape, use_output_shape): """Errors are thrown for invalid strides.""" snt.Conv1DTranspose( output_channels=out_channels, output_shape=out_shape if use_output_shape else None, kernel_shape=3, padding=padding, stride=1, name="conv1", use_bias=use_bias) err = ("must be either a positive integer or an iterable of positive " "integers of size 1") with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): snt.Conv1DTranspose(output_channels=out_channels, output_shape=out_shape if use_output_shape else None, kernel_shape=3, padding=padding, stride=[1, 1], name="conv1", use_bias=use_bias) with self.assertRaisesRegexp(snt.IncompatibleShapeError, err): snt.Conv1DTranspose(output_channels=out_channels, output_shape=out_shape if use_output_shape else None, kernel_shape=3, padding=padding, stride=[1, 1, 1, 1], name="conv1", use_bias=use_bias) @parameterized.parameters( *zip(batch_size, in_length, in_channels, out_channels, kernel_shape, padding, use_bias, out_shape, stride_shape, use_output_shape)) def testInputTypeError(self, batch_size, in_length, in_channels, out_channels, kernel_shape, padding, use_bias, out_shape, stride_shape, use_output_shape): """Errors are thrown for invalid input types.""" conv1 = snt.Conv1DTranspose( output_channels=out_channels, output_shape=out_shape if use_output_shape else None, kernel_shape=kernel_shape, padding=padding, stride=stride_shape, name="conv1", use_bias=use_bias) for dtype in (tf.uint32, tf.uint64): x = tf.constant(np.ones([batch_size, in_length, in_channels]), dtype=dtype) err = "Input must have dtype tf.float.*" with self.assertRaisesRegexp(TypeError, err): conv1(x) @parameterized.parameters( *zip(batch_size, in_length, in_channels, out_channels, kernel_shape, padding, use_bias, out_shape, stride_shape, use_output_shape)) def testSharing(self, batch_size, in_length, in_channels, out_channels, kernel_shape, padding, use_bias, out_shape, stride_shape, use_output_shape): """Sharing is working.""" conv1 = snt.Conv1DTranspose( output_channels=out_channels, output_shape=out_shape if use_output_shape else None, kernel_shape=kernel_shape, padding=padding, stride=stride_shape, name="conv1", use_bias=use_bias) x = np.random.randn(batch_size, in_length, in_channels) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose( out1.eval(), out2.eval()) # Now change the weights w = np.random.randn(1, kernel_shape, out_channels, in_channels) conv1.w.assign(w).eval() self.assertAllClose( out1.eval(), out2.eval()) @parameterized.parameters( *zip(batch_size, in_length, in_channels, out_channels, kernel_shape, padding, use_bias, out_shape, stride_shape, use_output_shape)) def testTransposeNWC(self, batch_size, in_length, in_channels, out_channels, kernel_shape, padding, use_bias, out_shape, stride_shape, use_output_shape): """Test transpose for NWC format.""" conv1_transpose = snt.Conv1DTranspose( output_channels=out_channels, output_shape=out_shape if use_output_shape else None, kernel_shape=kernel_shape, padding=padding, stride=stride_shape, name="conv1_transpose", use_bias=use_bias, data_format=conv.DATA_FORMAT_NWC) conv1 = conv1_transpose.transpose() # Check kernel shapes, strides and padding match. self.assertEqual(conv1_transpose.kernel_shape, conv1.kernel_shape) self.assertEqual((1, conv1_transpose.stride[1], 1), conv1.stride) self.assertEqual(conv1_transpose.conv_op_padding, conv1.conv_op_padding) # Before conv1_transpose is connected, we cannot know how many # `output_channels` conv1 should have. err = "Variables in conv1_transpose not instantiated yet" with self.assertRaisesRegexp(snt.NotConnectedError, err): conv1.output_channels # pylint: disable=pointless-statement # After connection the number of `output_channels` is known. x = tf.constant(np.random.randn(batch_size, in_length, in_channels), dtype=np.float32) conv1_transpose(x) self.assertEqual(in_channels, conv1.output_channels) # As is `output_shape`. self.assertIn(out_shape, conv1_transpose.output_shape) # However, even after connection, the `input_shape` of the forward # convolution is not known until it is itself connected (i.e. it can be # connected to a different shape input from the `output_shape` of the # transpose convolution!) err = "Variables in conv1_transpose_transpose not instantiated yet" with self.assertRaisesRegexp(snt.NotConnectedError, err): _ = conv1.input_shape @parameterized.parameters( *zip(batch_size, in_length, in_channels, out_channels, kernel_shape, padding, use_bias, out_shape, stride_shape, use_output_shape)) def testTransposeNCW(self, batch_size, in_length, in_channels, out_channels, kernel_shape, padding, use_bias, out_shape, stride_shape, use_output_shape): """Test transpose for NCW format.""" conv1_transpose = snt.Conv1DTranspose( output_channels=out_channels, output_shape=out_shape if use_output_shape else None, kernel_shape=kernel_shape, padding=padding, stride=stride_shape, name="conv1_transpose", use_bias=use_bias, data_format=conv.DATA_FORMAT_NCW) conv1 = conv1_transpose.transpose() # Check kernel shapes, strides and padding match. self.assertEqual(conv1_transpose.kernel_shape, conv1.kernel_shape) self.assertEqual((1, 1, conv1_transpose.stride[2]), conv1.stride) self.assertEqual(conv1_transpose.conv_op_padding, conv1.conv_op_padding) # Before conv1_transpose is connected, we cannot know how many # `output_channels` conv1 should have. err = "Variables in conv1_transpose not instantiated yet" with self.assertRaisesRegexp(snt.NotConnectedError, err): conv1.output_channels # pylint: disable=pointless-statement # After connection the number of `output_channels` is known. x = tf.constant(np.random.randn(batch_size, in_channels, in_length), dtype=np.float32) conv1_transpose(x) self.assertEqual(in_channels, conv1.output_channels) # As is `output_shape`. self.assertIn(out_shape, conv1_transpose.output_shape) # However, even after connection, the `input_shape` of the forward # convolution is not known until it is itself connected (i.e. it can be # connected to a different shape input from the `output_shape` of the # transpose convolution!) err = "Variables in conv1_transpose_transpose not instantiated yet" with self.assertRaisesRegexp(snt.NotConnectedError, err): _ = conv1.input_shape def testInitializerMutation(self): """Test that initializers are not mutated.""" initializers = {"b": tf.constant_initializer(0)} initializers_copy = dict(initializers) conv1 = snt.Conv1DTranspose( output_shape=tf.Dimension(10), output_channels=1, kernel_shape=3, stride=1, name="conv1", initializers=initializers) conv1(tf.placeholder(tf.float32, [1, 10, 2])) self.assertAllEqual(initializers, initializers_copy) class CausalConv1DTest(parameterized.TestCase, tf.test.TestCase): @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testComputation(self, use_bias): """Run through for something with a known answer.""" conv1 = snt.CausalConv1D( output_channels=1, kernel_shape=3, stride=1, use_bias=use_bias, name="conv1", initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 1], dtype=np.float32))) expected_out = np.reshape(np.array([1, 2, 3, 3, 3]), [1, 5, 1]) if use_bias: expected_out += 1 init_op = tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]) with self.test_session() as sess: sess.run(init_op) actual_out = sess.run(out) self.assertAllClose(actual_out, expected_out) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testComputationStrided(self, use_bias): """Run through for something with a known answer.""" conv1 = snt.CausalConv1D( output_channels=1, kernel_shape=3, stride=2, use_bias=use_bias, name="conv1", initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 1], dtype=np.float32))) expected_out = np.reshape(np.array([1, 3, 3]), [1, 3, 1]) if use_bias: expected_out += 1 init_op = tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]) with self.test_session() as sess: sess.run(init_op) actual_out = sess.run(out) self.assertAllClose(actual_out, expected_out) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testComputationDilated(self, use_bias): """Run through for something with a known answer.""" conv1 = snt.CausalConv1D( output_channels=1, kernel_shape=3, stride=1, rate=2, use_bias=use_bias, name="conv1", initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 1], dtype=np.float32))) expected_out = np.reshape(np.array([1, 1, 2, 2, 3]), [1, 5, 1]) if use_bias: expected_out += 1 init_op = tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]) with self.test_session() as sess: sess.run(init_op) actual_out = sess.run(out) self.assertAllClose(actual_out, expected_out) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testSharing(self, use_bias): """Sharing is working.""" conv1 = snt.CausalConv1D( output_channels=1, kernel_shape=3, stride=1, use_bias=use_bias, name="conv1") x = np.random.randn(1, 5, 1) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) w = np.random.randn(3, 1, 1) weight_change_op = conv1.w.assign(w) init_op = tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]) with self.test_session() as sess: sess.run(init_op) first_replica_out = sess.run(out1) second_replica_out = sess.run(out2) # Now change the weights sess.run(weight_change_op) first_replica_out_changed = sess.run(out1) second_replica_out_changed = sess.run(out2) self.assertAllClose(first_replica_out, second_replica_out) self.assertAllClose(first_replica_out_changed, second_replica_out_changed) def testClone(self): net = snt.CausalConv1D(name="conv1d", output_channels=4, kernel_shape=3, stride=5) clone1 = net.clone() clone2 = net.clone(name="clone2") input_to_net = tf.placeholder(tf.float32, shape=[None, 100, 3]) net_out = net(input_to_net) clone1_out = clone1(input_to_net) clone2_out = clone2(input_to_net) all_vars = tf.trainable_variables() net_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope=net.variable_scope.name + "/") clone1_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope=clone1.variable_scope.name + "/") clone2_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope=clone2.variable_scope.name + "/") self.assertEqual(net.output_channels, clone1.output_channels) self.assertEqual(net.module_name + "_clone", clone1.module_name) self.assertEqual("clone2", clone2.module_name) self.assertLen(all_vars, 3*len(net_vars)) self.assertLen(net_vars, len(clone1_vars)) self.assertLen(net_vars, len(clone2_vars)) self.assertEqual(net_out.get_shape().as_list(), clone1_out.get_shape().as_list()) self.assertEqual(net_out.get_shape().as_list(), clone2_out.get_shape().as_list()) def testDataFormatNotSupported(self): """Errors are thrown when an unsupported data_format is used.""" x = tf.constant(0.0, shape=(2, 8, 6)) data_format = "WNC" self.assertNotIn(data_format, conv.SUPPORTED_1D_DATA_FORMATS) with self.assertRaisesRegexp(ValueError, "Invalid data_format"): snt.CausalConv1D(output_channels=4, kernel_shape=4, data_format=data_format)(x) class InPlaneConv2DTest(parameterized.TestCase, tf.test.TestCase): @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testSameNumberOfOutputAndInputChannels(self, use_bias): """Test that the number of output and input channels are equal.""" input_channels = random.randint(1, 32) inputs = tf.placeholder(tf.float32, shape=[1, 10, 10, input_channels]) conv1 = snt.InPlaneConv2D(kernel_shape=3, use_bias=use_bias) # Before conv1 is connected, we cannot know how many `output_channels` # conv1 should have. err = "Variables in in_plane_conv2d not instantiated yet" with self.assertRaisesRegexp(snt.NotConnectedError, err): _ = conv1.output_channels # After connection, should match `input_channels`. conv1(inputs) self.assertEqual(conv1.output_channels, input_channels) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testSharing(self, use_bias): """Sharing is working.""" conv1 = snt.InPlaneConv2D(kernel_shape=3, use_bias=use_bias) x = np.random.randn(1, 5, 5, 1) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose(out1.eval(), out2.eval()) w = np.random.randn(3, 3, 1, 1) # Now change the weights. conv1.w.assign(w).eval() self.assertAllClose(out1.eval(), out2.eval()) def testInitializerMutation(self): """Test that initializers are not mutated.""" initializers = {"b": tf.constant_initializer(0)} initializers_copy = dict(initializers) conv1 = snt.InPlaneConv2D(kernel_shape=3, initializers=initializers) conv1(tf.placeholder(tf.float32, [1, 10, 10, 2])) self.assertAllEqual(initializers, initializers_copy) class DepthwiseConv2DTest(parameterized.TestCase, tf.test.TestCase): def setUp(self): """Set up some variables to re-use in multiple tests.""" super(DepthwiseConv2DTest, self).setUp() self.batch_size = batch_size = random.randint(1, 20) self.in_height = in_height = random.randint(10, 128) self.in_width = in_width = random.randint(10, 128) self.in_channels = in_channels = random.randint(1, 10) self.kernel_shape_h = kernel_shape_h = random.randint(1, 11) self.kernel_shape_w = kernel_shape_w = random.randint(1, 11) self.channel_multiplier = channel_multiplier = random.randint(1, 10) self.out_channels = out_channels = in_channels * channel_multiplier self.input_shape = [batch_size, in_height, in_width, in_channels] self.kernel_shape = [kernel_shape_h, kernel_shape_w] self.output_shape = [batch_size, in_height, in_width, out_channels] self.weight_shape = [kernel_shape_h, kernel_shape_w, in_channels, channel_multiplier] @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testShapesSame(self, use_bias): """Test that the generated shapes are correct with SAME padding.""" out_channels = self.out_channels input_shape = self.input_shape kernel_shape = self.kernel_shape output_shape = self.output_shape weight_shape = self.weight_shape channel_multiplier = self.channel_multiplier inputs = tf.placeholder(tf.float32, shape=input_shape) conv1 = snt.DepthwiseConv2D( name="conv1", channel_multiplier=channel_multiplier, kernel_shape=kernel_shape, padding=snt.SAME, stride=1, use_bias=use_bias) output = conv1(inputs) self.assertEqual(output.get_shape(), output_shape) self.assertEqual(conv1.w.get_shape(), weight_shape) if use_bias: self.assertEqual(conv1.b.get_shape(), out_channels) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testShapesNotKnown(self, use_bias): """Test that the generated shapes are correct when input shape not known.""" inputs = tf.placeholder( tf.float32, shape=[None, None, None, self.in_channels], name="inputs") conv1 = snt.DepthwiseConv2D( channel_multiplier=self.channel_multiplier, kernel_shape=self.kernel_shape, padding=snt.SAME, stride=1, use_bias=use_bias) output = conv1(inputs) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() output_eval = output.eval({inputs: np.zeros(self.input_shape)}) self.assertEqual(output_eval.shape, tuple(self.output_shape)) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testKernelShape(self, use_bias): """Test that errors are thrown for invalid kernel shapes.""" snt.DepthwiseConv2D(channel_multiplier=1, kernel_shape=[3, 4]) snt.DepthwiseConv2D(channel_multiplier=1, kernel_shape=3) error_msg = (r"Invalid kernel shape: x is \[3], must be either a positive" r" integer or an iterable of positive integers of size 2") with self.assertRaisesRegexp(snt.IncompatibleShapeError, error_msg): snt.DepthwiseConv2D(channel_multiplier=1, kernel_shape=[3], use_bias=use_bias, name="conv1") @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testStrideError(self, use_bias): """Test that errors are thrown for invalid strides.""" snt.DepthwiseConv2D(channel_multiplier=1, kernel_shape=3, stride=1, use_bias=use_bias) snt.DepthwiseConv2D(channel_multiplier=1, kernel_shape=3, stride=[1] * 2, use_bias=use_bias) snt.DepthwiseConv2D(channel_multiplier=1, kernel_shape=3, stride=[1] * 4, use_bias=use_bias) error_msg = (r"Invalid stride shape: x is \[1, 1, 1\], must be " r"either a positive integer or an iterable of positive " r"integers of size 2") with self.assertRaisesRegexp(snt.IncompatibleShapeError, error_msg): snt.DepthwiseConv2D(channel_multiplier=3, kernel_shape=3, stride=[1, 1, 1], use_bias=use_bias, name="conv1") @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testInputTypeError(self, use_bias): """Test that errors are thrown for invalid input types.""" conv1 = snt.DepthwiseConv2D( channel_multiplier=3, kernel_shape=3, stride=1, padding=snt.SAME, use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) for dtype in (tf.uint32, tf.uint64): x = tf.constant(np.ones([1, 5, 5, 1]), dtype=dtype) err = "Input must have dtype tf.float.*" with self.assertRaisesRegexp(TypeError, err): conv1(x) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testInitializers(self, use_bias): """Test that initializers work as expected.""" w = random.random() b = np.random.randn(6) # Kernel shape is 3, input channels are 2, 2*3 = 6 conv1 = snt.DepthwiseConv2D( channel_multiplier=3, kernel_shape=3, stride=1, use_bias=use_bias, initializers=create_constant_initializers(w, b, use_bias)) conv1(tf.placeholder(tf.float32, [1, 10, 10, 2])) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose( conv1.w.eval(), np.full( [3, 3, 2, 3], w, dtype=np.float32)) if use_bias: self.assertAllClose(conv1.b.eval(), b) error_msg = "Initializer for 'w' is not a callable function" with self.assertRaisesRegexp(TypeError, error_msg): snt.DepthwiseConv2D( channel_multiplier=3, kernel_shape=3, stride=1, use_bias=use_bias, initializers={"w": tf.ones([])}) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testRegularizersInRegularizationLosses(self, use_bias): regularizers = create_regularizers(use_bias, contrib_layers.l1_regularizer(scale=0.5)) conv1 = snt.DepthwiseConv2D( channel_multiplier=3, kernel_shape=3, stride=1, regularizers=regularizers, use_bias=use_bias, name="conv1") conv1(tf.placeholder(tf.float32, [1, 10, 10, 2])) graph_regularizers = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) self.assertRegexpMatches(graph_regularizers[0].name, ".*l1_regularizer.*") if use_bias: self.assertRegexpMatches(graph_regularizers[1].name, ".*l1_regularizer.*") def testInitializerMutation(self): """Test that initializers are not mutated.""" initializers = {"b": tf.constant_initializer(0)} initializers_copy = dict(initializers) conv1 = snt.DepthwiseConv2D( channel_multiplier=3, kernel_shape=3, stride=1, initializers=initializers) conv1(tf.placeholder(tf.float32, [10, 10, 1, 2])) self.assertAllEqual(initializers, initializers_copy) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testComputationSame(self, use_bias): """Run through for something with a known answer using SAME padding.""" conv1 = snt.DepthwiseConv2D( channel_multiplier=1, kernel_shape=[3, 3], stride=1, padding=snt.SAME, use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32))) expected_out = np.array([[5, 7, 7, 7, 5], [7, 10, 10, 10, 7], [7, 10, 10, 10, 7], [7, 10, 10, 10, 7], [5, 7, 7, 7, 5]]) if not use_bias: expected_out -= 1 with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose(np.reshape(out.eval(), [5, 5]), expected_out) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testComputationValid(self, use_bias): """Run through for something with a known answer using snt.VALID padding.""" conv1 = snt.DepthwiseConv2D( channel_multiplier=1, kernel_shape=[3, 3], stride=1, padding=snt.VALID, use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32))) expected_out = np.array([[10, 10, 10], [10, 10, 10], [10, 10, 10]]) if not use_bias: expected_out -= 1 with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose(np.reshape(out.eval(), [3, 3]), expected_out) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testComputationValidMultiChannel(self, use_bias): """Run through for something with a known answer using snt.VALID padding.""" conv1 = snt.DepthwiseConv2D( channel_multiplier=1, kernel_shape=[3, 3], stride=1, padding=snt.VALID, use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 3], dtype=np.float32))) expected_out = np.array([[[10] * 3] * 3] * 3) if not use_bias: expected_out -= 1 with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose( np.reshape(out.eval(), [3, 3, 3]), expected_out) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testSharing(self, use_bias): """Sharing is working.""" conv1 = snt.DepthwiseConv2D( channel_multiplier=3, kernel_shape=3, stride=1, padding=snt.SAME, use_bias=use_bias) x = np.random.randn(1, 5, 5, 1) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose(out1.eval(), out2.eval()) # Kernel shape was set to 3, which is expandeded to [3, 3, 3]. # Input channels are 1, output channels := in_channels * multiplier. # multiplier is kernel_shape[2] == 3. So weight layout must be: # (3, 3, 1, 3). w = np.random.randn(3, 3, 1, 3) # Now change the weights. conv1.w.assign(w).eval() self.assertAllClose(out1.eval(), out2.eval()) class SeparableConv2DTest(parameterized.TestCase, tf.test.TestCase): def setUp(self): """Set up some variables to re-use in multiple tests.""" super(SeparableConv2DTest, self).setUp() self.batch_size = batch_size = random.randint(1, 100) self.in_height = in_height = random.randint(10, 188) self.in_width = in_width = random.randint(10, 188) self.in_channels = in_channels = random.randint(1, 10) self.input_shape = [batch_size, in_height, in_width, in_channels] self.kernel_shape_h = kernel_shape_h = random.randint(1, 10) self.kernel_shape_w = kernel_shape_w = random.randint(1, 10) self.channel_multiplier = channel_multiplier = random.randint(1, 10) self.kernel_shape = [kernel_shape_h, kernel_shape_w] self.out_channels_dw = out_channels_dw = in_channels * channel_multiplier self.output_shape = [batch_size, in_height, in_width, out_channels_dw] self.depthwise_filter_shape = [ kernel_shape_h, kernel_shape_w, in_channels, channel_multiplier ] self.pointwise_filter_shape = [1, 1, out_channels_dw, out_channels_dw] @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testShapesSame(self, use_bias): """Test that the generated shapes are correct with SAME padding.""" out_channels = self.out_channels_dw input_shape = self.input_shape kernel_shape = self.kernel_shape output_shape = self.output_shape depthwise_filter_shape = self.depthwise_filter_shape pointwise_filter_shape = self.pointwise_filter_shape channel_multiplier = self.channel_multiplier inputs = tf.placeholder(tf.float32, shape=input_shape) conv1 = snt.SeparableConv2D( output_channels=out_channels, channel_multiplier=channel_multiplier, kernel_shape=kernel_shape, padding=snt.SAME, use_bias=use_bias) output = conv1(inputs) self.assertTrue(output.get_shape().is_compatible_with(output_shape)) self.assertTrue(conv1.w_dw.get_shape().is_compatible_with( depthwise_filter_shape)) self.assertTrue(conv1.w_pw.get_shape().is_compatible_with( pointwise_filter_shape)) if use_bias: self.assertTrue(conv1.b.get_shape().is_compatible_with([out_channels])) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testShapesNotKnown(self, use_bias): """Test that the generated shapes are correct when input shape not known.""" inputs = tf.placeholder( tf.float32, shape=[None, None, None, self.in_channels], name="inputs") conv1 = snt.SeparableConv2D( output_channels=self.out_channels_dw, channel_multiplier=1, kernel_shape=self.kernel_shape, padding=snt.SAME, use_bias=use_bias) output = conv1(inputs) with self.test_session(): tf.variables_initializer( [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw]).run() output_eval = output.eval({inputs: np.zeros(self.input_shape)}) self.assertEqual(output_eval.shape, tuple(self.output_shape)) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testKernelShape(self, use_bias): """Test that errors are thrown for invalid kernel shapes.""" # No check against output_channels is done yet (needs input size). snt.SeparableConv2D( output_channels=1, channel_multiplier=2, kernel_shape=[3, 4], name="conv1", use_bias=use_bias) snt.SeparableConv2D( output_channels=1, channel_multiplier=1, kernel_shape=3, name="conv1") error_msg = (r"Invalid kernel shape: x is \[3], must be either a positive" r" integer or an iterable of positive integers of size 2") with self.assertRaisesRegexp(snt.IncompatibleShapeError, error_msg): snt.SeparableConv2D(output_channels=1, channel_multiplier=3, kernel_shape=[3], use_bias=use_bias) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testStrideError(self, use_bias): """Test that errors are thrown for invalid strides.""" snt.SeparableConv2D( output_channels=1, channel_multiplier=3, kernel_shape=3, stride=1, use_bias=use_bias) snt.SeparableConv2D( output_channels=1, channel_multiplier=3, kernel_shape=3, stride=[1, 1], use_bias=use_bias) snt.SeparableConv2D( output_channels=1, channel_multiplier=3, kernel_shape=3, stride=[1, 1, 1, 1], use_bias=use_bias) error_msg = (r"Invalid stride shape: x is \[1, 1, 1\], must be " r"either a positive integer or an iterable of positive " r"integers of size 2") with self.assertRaisesRegexp(snt.IncompatibleShapeError, error_msg): snt.SeparableConv2D(output_channels=1, channel_multiplier=3, kernel_shape=3, stride=[1, 1, 1], name="conv1", use_bias=use_bias) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testInputTypeError(self, use_bias): """Test that errors are thrown for invalid input types.""" conv1 = snt.SeparableConv2D( output_channels=3, channel_multiplier=1, kernel_shape=3, padding=snt.SAME, use_bias=use_bias, initializers=create_separable_constant_initializers( 1.0, 1.0, 1.0, use_bias)) for dtype in (tf.uint32, tf.uint64): x = tf.constant(np.ones([1, 5, 5, 1]), dtype=dtype) err = "Input must have dtype tf.float.*" with self.assertRaisesRegexp(TypeError, err): conv1(x) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testInitializers(self, use_bias): """Test that initializers work as expected.""" w_dw = random.random() w_pw = random.random() b = np.random.randn(6) # Kernel shape is 3, input channels are 2, 2*3 = 6. conv1 = snt.SeparableConv2D( output_channels=6, channel_multiplier=3, kernel_shape=3, use_bias=use_bias, initializers=create_separable_constant_initializers( w_dw, w_pw, b, use_bias)) conv1(tf.placeholder(tf.float32, [1, 10, 10, 2])) with self.test_session(): tf.variables_initializer( [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw]).run() self.assertAllClose( conv1.w_dw.eval(), np.full( [3, 3, 2, 3], w_dw, dtype=np.float32)) self.assertAllClose( conv1.w_pw.eval(), np.full( [1, 1, 6, 6], w_pw, dtype=np.float32)) if use_bias: self.assertAllClose(conv1.b.eval(), b) error_msg = "Initializer for 'w_dw' is not a callable function" with self.assertRaisesRegexp(TypeError, error_msg): snt.SeparableConv2D( output_channels=3, channel_multiplier=1, kernel_shape=3, stride=1, use_bias=use_bias, initializers={"w_dw": tf.ones([])}) def testInitializerMutation(self): """Test that initializers are not mutated.""" initializers = {"b": tf.constant_initializer(0)} initializers_copy = dict(initializers) conv1 = snt.SeparableConv2D( output_channels=3, channel_multiplier=1, kernel_shape=3, stride=1, initializers=initializers) conv1(tf.placeholder(tf.float32, [10, 10, 1, 2])) self.assertAllEqual(initializers, initializers_copy) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testRegularizersInRegularizationLosses(self, use_bias): regularizers = create_separable_regularizers( use_bias, contrib_layers.l1_regularizer(scale=0.5)) conv1 = snt.SeparableConv2D( output_channels=3, channel_multiplier=1, kernel_shape=3, stride=1, regularizers=regularizers, use_bias=use_bias, name="conv1") conv1(tf.placeholder(tf.float32, [10, 10, 1, 2])) graph_regularizers = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) self.assertRegexpMatches(graph_regularizers[0].name, ".*l1_regularizer.*") self.assertRegexpMatches(graph_regularizers[1].name, ".*l1_regularizer.*") if use_bias: self.assertRegexpMatches(graph_regularizers[2].name, ".*l1_regularizer.*") @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testComputationSame(self, use_bias): """Run through for something with a known answer using SAME padding.""" conv1 = snt.SeparableConv2D( output_channels=1, channel_multiplier=1, kernel_shape=[3, 3], padding=snt.SAME, name="conv1", use_bias=use_bias, initializers=create_separable_constant_initializers( 1.0, 1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32))) expected_out = np.array([[5, 7, 7, 7, 5], [7, 10, 10, 10, 7], [7, 10, 10, 10, 7], [7, 10, 10, 10, 7], [5, 7, 7, 7, 5]]) if not use_bias: expected_out -= 1 with self.test_session(): tf.variables_initializer( [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw]).run() self.assertAllClose(np.reshape(out.eval(), [5, 5]), expected_out) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testComputationSameNon1Rate(self, use_bias): """Same as `testComputationSame`, but have a non-default rate.""" conv1 = snt.SeparableConv2D( output_channels=1, channel_multiplier=1, kernel_shape=[3, 3], padding=snt.SAME, name="conv1", rate=(2, 3), use_bias=use_bias, initializers=create_separable_constant_initializers( 1.0, 1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32))) expected_out = np.array([[5, 5, 3, 5, 5], [5, 5, 3, 5, 5], [7, 7, 4, 7, 7], [5, 5, 3, 5, 5], [5, 5, 3, 5, 5]]) if not use_bias: expected_out -= 1 with self.test_session(): tf.variables_initializer( [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw]).run() self.assertAllClose(np.reshape(out.eval(), [5, 5]), expected_out) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testComputationValid(self, use_bias): """Run through for something with a known answer using snt.VALID padding.""" conv1 = snt.SeparableConv2D( output_channels=1, channel_multiplier=1, kernel_shape=[3, 3], padding=snt.VALID, use_bias=use_bias, initializers=create_separable_constant_initializers( 1.0, 1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 1], dtype=np.float32))) expected_out = np.array([[10, 10, 10], [10, 10, 10], [10, 10, 10]]) if not use_bias: expected_out -= 1 with self.test_session(): tf.variables_initializer( [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw]).run() self.assertAllClose(np.reshape(out.eval(), [3, 3]), expected_out) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testComputationValidMultiChannel(self, use_bias): """Run through for something with a known answer using snt.VALID padding.""" conv1 = snt.SeparableConv2D( output_channels=3, channel_multiplier=1, kernel_shape=[3, 3], padding=snt.VALID, use_bias=use_bias, initializers=create_separable_constant_initializers( 1.0, 1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 3], dtype=np.float32))) expected_out = np.array([[[28] * 3] * 3] * 3) if not use_bias: expected_out -= 1 with self.test_session(): tf.variables_initializer( [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw]).run() self.assertAllClose(np.reshape(out.eval(), [3, 3, 3]), expected_out) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testComputationValidChannelMultiplier(self, use_bias): """Run through for something with a known answer using snt.VALID padding.""" input_channels = 3 channel_multiplier = 5 output_channels = input_channels * channel_multiplier conv1 = snt.SeparableConv2D( output_channels=output_channels, channel_multiplier=channel_multiplier, kernel_shape=[3, 3], padding=snt.VALID, use_bias=use_bias, initializers=create_separable_constant_initializers( 1.0, 1.0, 1.0, use_bias)) input_data = np.ones([1, 5, 5, input_channels], dtype=np.float32) out = conv1(tf.constant(input_data)) expected_out = np.ones((3, 3, output_channels)) * 136 if not use_bias: expected_out -= 1 self.assertTrue(out.get_shape().is_compatible_with([1, 3, 3, output_channels ])) with self.test_session(): tf.variables_initializer( [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw]).run() self.assertAllClose(np.reshape(out.eval(), [3, 3, output_channels]), expected_out) # Each convolution with weight 1 and size 3x3 results in an output of 9. # Pointwise filter is [1, 1, input_channels * channel_multiplier = 15, x]. # Results in 9 * 15 = 135 + 1 bias = 136 as outputs. @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testSharing(self, use_bias): """Sharing is working.""" conv1 = snt.SeparableConv2D( output_channels=3, channel_multiplier=3, kernel_shape=3, use_bias=use_bias) x = np.random.randn(1, 5, 5, 1) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) with self.test_session(): tf.variables_initializer( [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw]).run() self.assertAllClose(out1.eval(), out2.eval()) # Kernel shape was set to 3, which is expandeded to [3, 3, 3]. # Input channels are 1, output channels := in_channels * multiplier. # multiplier is kernel_shape[2] == 3. So weight layout must be: # (3, 3, 1, 3). w_dw = np.random.randn(3, 3, 1, 3) # Now change the weights. w_pw = np.random.randn(1, 1, 3, 3) # Now change the weights. conv1.w_dw.assign(w_dw).eval() conv1.w_pw.assign(w_pw).eval() self.assertAllClose(out1.eval(), out2.eval()) class SeparableConv1DTest(parameterized.TestCase, tf.test.TestCase): def setUp(self): """Set up some variables to re-use in multiple tests.""" super(SeparableConv1DTest, self).setUp() self.batch_size = batch_size = random.randint(1, 100) self.in_width = in_width = random.randint(10, 188) self.in_channels = in_channels = random.randint(1, 10) self.input_shape = [batch_size, in_width, in_channels] self.kernel_shape_w = kernel_shape_w = random.randint(1, 10) self.channel_multiplier = channel_multiplier = random.randint(1, 10) self.kernel_shape = [kernel_shape_w] self.out_channels_dw = out_channels_dw = in_channels * channel_multiplier self.output_shape = [batch_size, in_width, out_channels_dw] self.depthwise_filter_shape = [ 1, kernel_shape_w, in_channels, channel_multiplier ] self.pointwise_filter_shape = [1, 1, out_channels_dw, out_channels_dw] @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testShapesSame(self, use_bias): """Test that the generated shapes are correct with SAME padding.""" out_channels = self.out_channels_dw input_shape = self.input_shape kernel_shape = self.kernel_shape output_shape = self.output_shape depthwise_filter_shape = self.depthwise_filter_shape pointwise_filter_shape = self.pointwise_filter_shape channel_multiplier = self.channel_multiplier inputs = tf.placeholder(tf.float32, shape=input_shape) conv1 = snt.SeparableConv1D( output_channels=out_channels, channel_multiplier=channel_multiplier, kernel_shape=kernel_shape, padding=snt.SAME, use_bias=use_bias) output = conv1(inputs) self.assertTrue(output.get_shape().is_compatible_with(output_shape)) self.assertTrue(conv1.w_dw.get_shape().is_compatible_with( depthwise_filter_shape)) self.assertTrue(conv1.w_pw.get_shape().is_compatible_with( pointwise_filter_shape)) if use_bias: self.assertTrue(conv1.b.get_shape().is_compatible_with([out_channels])) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testShapesNotKnown(self, use_bias): """Test that the generated shapes are correct when input shape not known.""" inputs = tf.placeholder( tf.float32, shape=[None, None, self.in_channels], name="inputs") conv1 = snt.SeparableConv1D( output_channels=self.out_channels_dw, channel_multiplier=1, kernel_shape=self.kernel_shape, padding=snt.SAME, use_bias=use_bias) output = conv1(inputs) with self.test_session() as session: tf.variables_initializer( [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw]).run() output_eval = session.run(output, {inputs: np.zeros(self.input_shape)}) self.assertEqual(output_eval.shape, tuple(self.output_shape)) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testKernelShape(self, use_bias): """Test that errors are thrown for invalid kernel shapes.""" # No check against output_channels is done yet (needs input size). snt.SeparableConv1D( output_channels=1, channel_multiplier=2, kernel_shape=[3], name="conv1", use_bias=use_bias) snt.SeparableConv1D( output_channels=1, channel_multiplier=1, kernel_shape=3, name="conv1") error_msg = (r"Invalid kernel shape: x is \[3, 3\], must be either a " r"positive integer or an iterable of positive integers of " r"size 1") with self.assertRaisesRegexp(snt.IncompatibleShapeError, error_msg): snt.SeparableConv1D(output_channels=1, channel_multiplier=3, kernel_shape=[3, 3], use_bias=use_bias) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testStrideError(self, use_bias): """Test that errors are thrown for invalid strides.""" snt.SeparableConv1D( output_channels=1, channel_multiplier=3, kernel_shape=3, stride=1, use_bias=use_bias) snt.SeparableConv1D( output_channels=1, channel_multiplier=3, kernel_shape=3, stride=[1], use_bias=use_bias) snt.SeparableConv1D( output_channels=1, channel_multiplier=3, kernel_shape=3, stride=[1, 1, 1], use_bias=use_bias) error_msg = (r"Invalid stride shape: x is \[1, 1\], must be " r"either a positive integer or an iterable of positive " r"integers of size 1") with self.assertRaisesRegexp(snt.IncompatibleShapeError, error_msg): snt.SeparableConv1D(output_channels=1, channel_multiplier=3, kernel_shape=3, stride=[1, 1], name="conv1", use_bias=use_bias) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testInputTypeError(self, use_bias): """Test that errors are thrown for invalid input types.""" conv1 = snt.SeparableConv1D( output_channels=3, channel_multiplier=1, kernel_shape=3, padding=snt.SAME, use_bias=use_bias, initializers=create_separable_constant_initializers( 1.0, 1.0, 1.0, use_bias)) for dtype in (tf.uint32, tf.uint64): x = tf.constant(np.ones([1, 5, 1]), dtype=dtype) err = "Input must have dtype tf.float.*" with self.assertRaisesRegexp(TypeError, err): conv1(x) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testInitializers(self, use_bias): """Test that initializers work as expected.""" w_dw = random.random() w_pw = random.random() b = np.random.randn(6) # Kernel shape is 3, input channels are 2, 2*3 = 6. conv1 = snt.SeparableConv1D( output_channels=6, channel_multiplier=3, kernel_shape=3, use_bias=use_bias, initializers=create_separable_constant_initializers( w_dw, w_pw, b, use_bias)) conv1(tf.placeholder(tf.float32, [1, 10, 2])) with self.test_session(): tf.variables_initializer( [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw]).run() self.assertAllClose( conv1.w_dw.eval(), np.full( [1, 3, 2, 3], w_dw, dtype=np.float32)) self.assertAllClose( conv1.w_pw.eval(), np.full( [1, 1, 6, 6], w_pw, dtype=np.float32)) if use_bias: self.assertAllClose(conv1.b.eval(), b) error_msg = "Initializer for 'w_dw' is not a callable function" with self.assertRaisesRegexp(TypeError, error_msg): snt.SeparableConv1D( output_channels=3, channel_multiplier=1, kernel_shape=3, stride=1, use_bias=use_bias, initializers={"w_dw": tf.ones([])}) def testInitializerMutation(self): """Test that initializers are not mutated.""" initializers = {"b": tf.constant_initializer(0)} initializers_copy = dict(initializers) conv1 = snt.SeparableConv1D( output_channels=3, channel_multiplier=1, kernel_shape=3, stride=1, initializers=initializers) conv1(tf.placeholder(tf.float32, [10, 1, 2])) self.assertAllEqual(initializers, initializers_copy) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testRegularizersInRegularizationLosses(self, use_bias): regularizers = create_separable_regularizers( use_bias, contrib_layers.l1_regularizer(scale=0.5)) conv1 = snt.SeparableConv1D( output_channels=3, channel_multiplier=1, kernel_shape=3, stride=1, regularizers=regularizers, use_bias=use_bias, name="conv1") conv1(tf.placeholder(tf.float32, [10, 1, 2])) graph_regularizers = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) self.assertRegexpMatches(graph_regularizers[0].name, ".*l1_regularizer.*") self.assertRegexpMatches(graph_regularizers[1].name, ".*l1_regularizer.*") if use_bias: self.assertRegexpMatches(graph_regularizers[2].name, ".*l1_regularizer.*") @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testComputationSame(self, use_bias): """Run through for something with a known answer using SAME padding.""" conv1 = snt.SeparableConv1D( output_channels=1, channel_multiplier=1, kernel_shape=[3], padding=snt.SAME, name="conv1", use_bias=use_bias, initializers=create_separable_constant_initializers( 1.0, 1.0, 1.0, use_bias)) output = conv1(tf.constant(np.ones([1, 5, 1], dtype=np.float32))) expected_out = np.array([[[3], [4], [4], [4], [3]]]) if not use_bias: expected_out -= 1 with self.test_session() as session: tf.variables_initializer( [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw]).run() output = session.run(output) self.assertAllClose(output, expected_out) @parameterized.named_parameters( ("WithBiasRateInt", True, 2), ("WithBiasRateSeq", True, [2]), ("WithoutBiasRateInt", False, 2), ("WithoutBiasRateSeq", False, [2])) def testComputationSameNon1Rate(self, use_bias, rate): """Same as `testComputationSame`, but have a non-default rate.""" conv1 = snt.SeparableConv1D( output_channels=1, channel_multiplier=1, kernel_shape=[3], padding=snt.SAME, name="conv1", rate=rate, use_bias=use_bias, initializers=create_separable_constant_initializers( 1.0, 1.0, 1.0, use_bias)) output = conv1(tf.constant(np.ones([3, 5, 2], dtype=np.float32))) expected_out = np.array([[[5], [5], [7], [5], [5]], [[5], [5], [7], [5], [5]], [[5], [5], [7], [5], [5]]]) if not use_bias: expected_out -= 1 with self.test_session() as session: tf.variables_initializer( [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw]).run() output = session.run(output) self.assertAllClose(output, expected_out) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testComputationValid(self, use_bias): """Run through for something with a known answer using snt.VALID padding.""" conv1 = snt.SeparableConv1D( output_channels=1, channel_multiplier=1, kernel_shape=[3], padding=snt.VALID, use_bias=use_bias, initializers=create_separable_constant_initializers( 1.0, 1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 1], dtype=np.float32))) expected_out = np.array([[[4], [4], [4]]]) if not use_bias: expected_out -= 1 with self.test_session() as session: tf.variables_initializer( [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw]).run() out = session.run(out) self.assertAllClose(out, expected_out) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testComputationValidMultiChannel(self, use_bias): """Run through for something with a known answer using snt.VALID padding.""" conv1 = snt.SeparableConv1D( output_channels=3, channel_multiplier=1, kernel_shape=[3], padding=snt.VALID, use_bias=use_bias, initializers=create_separable_constant_initializers( 1.0, 1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 3], dtype=np.float32))) expected_out = np.array([[[10] * 3] * 3] * 1) if not use_bias: expected_out -= 1 with self.test_session() as session: tf.variables_initializer( [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw]).run() out = session.run(out) self.assertAllClose(out, expected_out) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testComputationValidChannelMultiplier(self, use_bias): """Run through for something with a known answer using snt.VALID padding.""" input_channels = 3 channel_multiplier = 5 output_channels = input_channels * channel_multiplier conv1 = snt.SeparableConv1D( output_channels=output_channels, channel_multiplier=channel_multiplier, kernel_shape=[3], padding=snt.VALID, use_bias=use_bias, initializers=create_separable_constant_initializers( 1.0, 1.0, 1.0, use_bias)) input_data = np.ones([1, 5, input_channels], dtype=np.float32) out = conv1(tf.constant(input_data)) expected_out = np.ones((1, 3, output_channels)) * 46 if not use_bias: expected_out -= 1 self.assertTrue(out.get_shape().is_compatible_with([1, 3, output_channels])) with self.test_session() as session: tf.variables_initializer( [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw]).run() out = session.run(out) self.assertAllClose(out, expected_out) # Each convolution with weight 1 and size 1x3 results in an output of 3. # Pointwise filter is [1, 1, input_channels * channel_multiplier = 15, x]. # Results in 3 * 15 = 45 + 1 bias = 46 as outputs. @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testSharing(self, use_bias): """Sharing is working.""" conv1 = snt.SeparableConv1D( output_channels=3, channel_multiplier=3, kernel_shape=3, use_bias=use_bias) x = np.random.randn(1, 5, 1) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) with self.test_session(): tf.variables_initializer( [conv1.w_dw, conv1.w_pw, conv1.b] if use_bias else [conv1.w_dw, conv1.w_pw]).run() self.assertAllClose(out1.eval(), out2.eval()) # Kernel shape was set to 3, which is expandeded to [1, 3]. # Input channels are 1, output channels := in_channels * multiplier. # multiplier is kernel_shape[2] == 3. So weight layout must be: # (1, 3, 1, 3). w_dw = np.random.randn(1, 3, 1, 3) # Now change the weights. w_pw = np.random.randn(1, 1, 3, 3) # Now change the weights. conv1.w_dw.assign(w_dw).eval() conv1.w_pw.assign(w_pw).eval() self.assertAllClose(out1.eval(), out2.eval()) class Conv3DTest(parameterized.TestCase, tf.test.TestCase): @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testShapesSame(self, use_bias): """The generated shapes are correct with SAME padding.""" batch_size = random.randint(1, 100) in_depth = random.randint(10, 288) in_height = random.randint(10, 288) in_width = random.randint(10, 288) in_channels = random.randint(1, 10) out_channels = random.randint(1, 32) kernel_shape_d = random.randint(1, 11) kernel_shape_h = random.randint(1, 11) kernel_shape_w = random.randint(1, 11) inputs = tf.placeholder( tf.float32, shape=[batch_size, in_depth, in_height, in_width, in_channels]) conv1 = snt.Conv3D( output_channels=out_channels, kernel_shape=[kernel_shape_d, kernel_shape_h, kernel_shape_w], padding=snt.SAME, stride=1, use_bias=use_bias, name="conv1") output = conv1(inputs) self.assertTrue( output.get_shape().is_compatible_with( [batch_size, in_depth, in_height, in_width, out_channels])) self.assertTrue( conv1.w.get_shape().is_compatible_with( [kernel_shape_d, kernel_shape_h, kernel_shape_w, in_channels, out_channels])) if use_bias: self.assertTrue( conv1.b.get_shape().is_compatible_with( [out_channels])) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testShapesWithUnknownInputShape(self, use_bias): """The generated shapes are correct when input shape not known.""" batch_size = 5 in_depth = in_height = in_width = 32 in_channels = out_channels = 5 kernel_shape_d = kernel_shape_h = kernel_shape_w = 3 inputs = tf.placeholder( tf.float32, shape=[None, None, None, None, in_channels], name="inputs") conv1 = snt.Conv3D( name="conv1", output_channels=out_channels, kernel_shape=[kernel_shape_d, kernel_shape_h, kernel_shape_w], padding=snt.SAME, stride=1, use_bias=use_bias) output = conv1(inputs) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() output_eval = output.eval({ inputs: np.zeros([batch_size, in_depth, in_height, in_width, in_channels])}) self.assertEqual( output_eval.shape, (batch_size, in_depth, in_height, in_width, out_channels)) def testKernelShape(self): """Errors are thrown for invalid kernel shapes.""" snt.Conv3D(output_channels=10, kernel_shape=[3, 4, 5], name="conv1") snt.Conv3D(output_channels=10, kernel_shape=3, name="conv1") with self.assertRaisesRegexp(snt.Error, "Invalid kernel shape.*"): snt.Conv3D(output_channels=10, kernel_shape=[3, 3], name="conv1") snt.Conv3D(output_channels=10, kernel_shape=[3, 3, 3, 3], name="conv1") def testStrideError(self): """Errors are thrown for invalid strides.""" snt.Conv3D( output_channels=10, kernel_shape=3, stride=1, name="conv1") snt.Conv3D( output_channels=10, kernel_shape=3, stride=[1, 1, 1], name="conv1") snt.Conv3D( output_channels=10, kernel_shape=3, stride=[1, 1, 1, 1, 1], name="conv1") with self.assertRaisesRegexp(snt.Error, "Invalid stride.*"): snt.Conv3D(output_channels=10, kernel_shape=3, stride=[1, 1], name="conv1") snt.Conv3D(output_channels=10, kernel_shape=3, stride=[1, 1, 1, 1], name="conv1") @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testRateError(self, use_bias): """Errors are thrown for invalid dilation rates.""" snt.Conv3D( output_channels=10, kernel_shape=3, rate=1, name="conv1", use_bias=use_bias) snt.Conv3D( output_channels=10, kernel_shape=3, rate=2, name="conv1", use_bias=use_bias) for rate in [0, 0.5, -1]: with self.assertRaisesRegexp(snt.IncompatibleShapeError, "Invalid rate shape*"): snt.Conv3D(output_channels=10, kernel_shape=3, rate=rate, name="conv1") @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testRateAndStrideError(self, use_bias): """Errors are thrown for stride > 1 when using atrous convolution.""" err = "Cannot have stride > 1 with rate > 1" with self.assertRaisesRegexp(snt.NotSupportedError, err): snt.Conv3D(output_channels=10, kernel_shape=3, stride=2, rate=2, name="conv1", use_bias=use_bias) with self.assertRaisesRegexp(snt.NotSupportedError, err): snt.Conv3D(output_channels=10, kernel_shape=3, stride=[2, 2, 1], rate=2, name="conv1", use_bias=use_bias) def testInputTypeError(self): """Errors are thrown for invalid input types.""" conv1 = snt.Conv3D(output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, name="conv1", initializers={ "w": tf.constant_initializer(1.0), "b": tf.constant_initializer(1.0), }) for dtype in (tf.uint32, tf.uint64): x = tf.constant(np.ones([1, 5, 5, 5, 1]), dtype=dtype) self.assertRaisesRegexp(TypeError, "Input must have dtype tf.float.*", conv1, x) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testInitializers(self, use_bias): """Test initializers work as expected.""" w = random.random() b = random.random() conv1 = snt.Conv3D( output_channels=1, kernel_shape=3, stride=1, name="conv1", use_bias=use_bias, initializers=create_constant_initializers(w, b, use_bias)) conv1(tf.placeholder(tf.float32, [1, 10, 10, 10, 2])) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose( conv1.w.eval(), np.full([3, 3, 3, 2, 1], w, dtype=np.float32)) if use_bias: self.assertAllClose( conv1.b.eval(), [b]) with self.assertRaises(TypeError): snt.Conv3D(output_channels=10, kernel_shape=3, stride=1, name="conv1", initializers={"w": tf.ones([])}) def testInitializerMutation(self): """Test that initializers are not mutated.""" initializers = {"b": tf.constant_initializer(0)} initializers_copy = dict(initializers) conv1 = snt.Conv3D( output_channels=1, kernel_shape=3, stride=1, name="conv1", initializers=initializers) conv1(tf.placeholder(tf.float32, [1, 10, 10, 10, 2])) self.assertAllEqual(initializers, initializers_copy) def testBiasInitializerIsZeroByDefault(self): """Test that the default initializer for the bias consists of zeros.""" conv1 = snt.Conv3D( output_channels=5, kernel_shape=3, stride=1) conv1(tf.placeholder(tf.float32, [5, 10, 10, 10, 7])) with self.test_session(): tf.variables_initializer([conv1.w, conv1.b]).run() self.assertAllClose( conv1.b.eval(), np.zeros([5], dtype=np.float32)) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testRegularizersInRegularizationLosses(self, use_bias): regularizers = create_regularizers(use_bias, contrib_layers.l1_regularizer(scale=0.5)) conv1 = snt.Conv3D( output_channels=1, kernel_shape=3, stride=1, regularizers=regularizers, use_bias=use_bias, name="conv1") conv1(tf.placeholder(tf.float32, [1, 10, 10, 10, 2])) graph_regularizers = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) self.assertRegexpMatches(graph_regularizers[0].name, ".*l1_regularizer.*") if use_bias: self.assertRegexpMatches(graph_regularizers[1].name, ".*l1_regularizer.*") @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testComputationSame(self, use_bias): """Run through for something with a known answer using SAME padding.""" conv1 = snt.Conv3D( output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, name="conv1", use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 5, 1], dtype=np.float32))) expected_out = np.asarray([9, 13, 13, 13, 9, 13, 19, 19, 19, 13, 13, 19, 19, 19, 13, 13, 19, 19, 19, 13, 9, 13, 13, 13, 9, 13, 19, 19, 19, 13, 19, 28, 28, 28, 19, 19, 28, 28, 28, 19, 19, 28, 28, 28, 19, 13, 19, 19, 19, 13, 13, 19, 19, 19, 13, 19, 28, 28, 28, 19, 19, 28, 28, 28, 19, 19, 28, 28, 28, 19, 13, 19, 19, 19, 13, 13, 19, 19, 19, 13, 19, 28, 28, 28, 19, 19, 28, 28, 28, 19, 19, 28, 28, 28, 19, 13, 19, 19, 19, 13, 9, 13, 13, 13, 9, 13, 19, 19, 19, 13, 13, 19, 19, 19, 13, 13, 19, 19, 19, 13, 9, 13, 13, 13, 9]).reshape((5, 5, 5)) if not use_bias: expected_out -= 1 with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose( np.reshape(out.eval(), [5, 5, 5]), expected_out) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testComputationValid(self, use_bias): """Run through for something with a known answer using snt.VALID padding.""" conv1 = snt.Conv3D( output_channels=1, kernel_shape=3, stride=1, padding=snt.VALID, name="conv1", use_bias=use_bias, initializers=create_constant_initializers(1.0, 1.0, use_bias)) out = conv1(tf.constant(np.ones([1, 5, 5, 5, 1], dtype=np.float32))) expected_out = np.asarray([28] * 27).reshape((3, 3, 3)) if not use_bias: expected_out -= 1 with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose( np.reshape(out.eval(), [3, 3, 3]), expected_out) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testSharing(self, use_bias): """Sharing is working.""" conv1 = snt.Conv3D( output_channels=1, kernel_shape=3, stride=1, padding=snt.SAME, use_bias=use_bias, name="conv1") x = np.random.randn(1, 5, 5, 5, 1) x1 = tf.constant(x, dtype=np.float32) x2 = tf.constant(x, dtype=np.float32) out1 = conv1(x1) out2 = conv1(x2) with self.test_session(): tf.variables_initializer( [conv1.w, conv1.b] if use_bias else [conv1.w]).run() self.assertAllClose( out1.eval(), out2.eval()) # Now change the weights w = np.random.randn(3, 3, 3, 1, 1) conv1.w.assign(w).eval() self.assertAllClose( out1.eval(), out2.eval()) def testMask1D(self): """1D Masks are applied properly.""" mask = 0.5 * np.ones((3,), dtype=np.float32) inputs = tf.constant(1.0, shape=(5, 5, 5, 5, 5)) conv1 = snt.Conv3D( output_channels=1, kernel_shape=3, mask=mask, padding=snt.VALID, use_bias=False, initializers=create_constant_initializers(1.0, 0.0, use_bias=False)) out = conv1(inputs) expected_out = 67.5 * np.ones((5, 3, 3, 3, 1), dtype=np.float32) with self.test_session(): tf.variables_initializer([conv1.w]).run() self.assertAllClose(out.eval(), expected_out) def testMask2D(self): """2D Masks are applied properly.""" mask = np.ones((3, 3), dtype=np.float32) inputs = tf.constant(1.0, shape=(5, 5, 5, 5, 5)) conv1 = snt.Conv3D( output_channels=1, kernel_shape=3, mask=mask, padding=snt.VALID, use_bias=False, initializers=create_constant_initializers(1.0, 0.0, use_bias=False)) out = conv1(inputs) expected_out = 135 * np.ones((5, 3, 3, 3, 1), dtype=np.float32) with self.test_session(): tf.variables_initializer([conv1.w]).run() self.assertAllClose(out.eval(), expected_out) def testMask3D(self): """3D Masks are applied properly.""" mask = np.ones((3, 3, 3), dtype=np.float32) inputs = tf.constant(1.0, shape=(5, 5, 5, 5, 5)) conv1 = snt.Conv3D( output_channels=1, kernel_shape=3, mask=mask, padding=snt.VALID, use_bias=False, initializers=create_constant_initializers(1.0, 0.0, use_bias=False)) out = conv1(inputs) expected_out = 135 * np.ones((5, 3, 3, 3, 1), dtype=np.float32) with self.test_session(): tf.variables_initializer([conv1.w]).run() self.assertAllClose(out.eval(), expected_out) def testMask4D(self): """4D Masks are applied properly.""" mask = np.ones((3, 3, 3, 5), dtype=np.float32) inputs = tf.constant(1.0, shape=(5, 5, 5, 5, 5)) conv1 = snt.Conv3D( output_channels=1, kernel_shape=3, mask=mask, padding=snt.VALID, use_bias=False, initializers=create_constant_initializers(1.0, 0.0, use_bias=False)) out = conv1(inputs) expected_out = 135 * np.ones((5, 3, 3, 3, 1), dtype=np.float32) with self.test_session(): tf.variables_initializer([conv1.w]).run() self.assertAllClose(out.eval(), expected_out) def testMask5D(self): """5D Mask are applied properly.""" mask = np.ones((3, 3, 3, 5, 1), dtype=np.float32) inputs = tf.constant(1.0, shape=(5, 5, 5, 5, 5)) conv1 = snt.Conv3D( output_channels=1, kernel_shape=3, mask=mask, padding=snt.VALID, use_bias=False, initializers=create_constant_initializers(1.0, 0.0, use_bias=False)) out = conv1(inputs) expected_out = 135 * np.ones((5, 3, 3, 3, 1), dtype=np.float32) with self.test_session(): tf.variables_initializer([conv1.w]).run() self.assertAllClose(out.eval(), expected_out) def testMaskErrorIncompatibleRank1(self): """Errors are thrown for incompatible rank 1 mask.""" np_mask = np.ones((2,), dtype=np.float32) x = tf.constant(0.0, shape=(2, 8, 6, 5, 5), dtype=np.float32) # Test with both numpy arrays and Tensors. for mask in (np_mask, tf.convert_to_tensor(np_mask)): with self.assertRaises(snt.Error) as cm: snt.Conv3D(output_channels=4, kernel_shape=5, mask=mask)(x) self.assertTrue(str(cm.exception).startswith( "Invalid mask shape: {}".format(np_mask.shape))) def testMaskErrorIncompatibleRank2(self): """Errors are thrown for incompatible rank 2 mask.""" np_mask = np.ones((5, 2), dtype=np.float32) x = tf.constant(0.0, shape=(2, 8, 6, 5, 5), dtype=np.float32) # Test with both numpy arrays and Tensors. for mask in (np_mask, tf.convert_to_tensor(np_mask)): with self.assertRaises(snt.Error) as cm: snt.Conv3D(output_channels=4, kernel_shape=5, mask=mask)(x) self.assertTrue(str(cm.exception).startswith( "Invalid mask shape: {}".format(np_mask.shape))) def testMaskErrorIncompatibleRank3(self): """Errors are thrown for incompatible rank 3 mask.""" np_mask = np.ones((5, 5, 2), dtype=np.float32) x = tf.constant(0.0, shape=(2, 8, 6, 5, 5), dtype=np.float32) # Test with both numpy arrays and Tensors. for mask in (np_mask, tf.convert_to_tensor(np_mask)): with self.assertRaises(snt.Error) as cm: snt.Conv3D(output_channels=4, kernel_shape=5, mask=mask)(x) self.assertTrue(str(cm.exception).startswith( "Invalid mask shape: {}".format(np_mask.shape))) def testMaskErrorIncompatibleRank4(self): """Errors are thrown for incompatible rank 4 mask.""" np_mask = np.ones((5, 5, 5, 2), dtype=np.float32) x = tf.constant(0.0, shape=(2, 8, 6, 5, 5), dtype=np.float32) # Test with both numpy arrays and Tensors. for mask in (np_mask, tf.convert_to_tensor(np_mask)): with self.assertRaises(snt.Error) as cm: snt.Conv3D(output_channels=4, kernel_shape=5, mask=mask)(x) self.assertTrue(str(cm.exception).startswith( "Invalid mask shape: {}".format(np_mask.shape))) def testMaskErrorIncompatibleRank5(self): """Errors are thrown for incompatible rank 5 mask.""" np_mask = np.ones((5, 5, 5, 5, 2), dtype=np.float32) x = tf.constant(0.0, shape=(2, 8, 6, 5, 5), dtype=np.float32) # Test with both numpy arrays and Tensors. for mask in (np_mask, tf.convert_to_tensor(np_mask)): with self.assertRaises(snt.Error) as cm: snt.Conv3D(output_channels=4, kernel_shape=5, mask=mask)(x) self.assertTrue(str(cm.exception).startswith( "Invalid mask shape: {}".format(np_mask.shape))) def testMaskErrorIfIncorrectDtype(self): """Errors are thrown when a Tensor with incorrect dtype is used.""" mask = tf.constant(0, shape=(4, 4), dtype=tf.int32) x = tf.constant(0.0, shape=(2, 8, 6, 5, 5)) with self.assertRaises(TypeError) as cm: snt.Conv3D(output_channels=4, kernel_shape=(4, 4, 4), mask=mask)(x) self.assertTrue(str(cm.exception).startswith( "Mask needs to have dtype float16, bfloat16, float32 or float64")) def testClone(self): net = snt.Conv3D(name="conv3d", output_channels=4, kernel_shape=3, stride=5) clone1 = net.clone() clone2 = net.clone(name="clone2") input_to_net = tf.placeholder(tf.float32, shape=[None, 101, 102, 103, 3]) net_out = net(input_to_net) clone1_out = clone1(input_to_net) clone2_out = clone2(input_to_net) all_vars = tf.trainable_variables() net_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope=net.variable_scope.name + "/") clone1_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope=clone1.variable_scope.name + "/") clone2_vars = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope=clone2.variable_scope.name + "/") self.assertEqual(net.output_channels, clone1.output_channels) self.assertEqual(net.module_name + "_clone", clone1.module_name) self.assertEqual("clone2", clone2.module_name) self.assertLen(all_vars, 3*len(net_vars)) self.assertLen(net_vars, len(clone1_vars)) self.assertLen(net_vars, len(clone2_vars)) self.assertEqual(net_out.get_shape().as_list(), clone1_out.get_shape().as_list()) self.assertEqual(net_out.get_shape().as_list(), clone2_out.get_shape().as_list()) def testDataFormatNotSupported(self): """Errors are thrown when an unsupported data_format is used.""" x = tf.constant(0.0, shape=(2, 7, 8, 9, 6)) data_format = "NCHWD" self.assertNotIn(data_format, conv.SUPPORTED_3D_DATA_FORMATS) with self.assertRaisesRegexp(ValueError, "Invalid data_format"): snt.Conv3D(output_channels=4, kernel_shape=4, data_format=data_format)(x) class Conv3DTransposeTest(parameterized.TestCase, tf.test.TestCase): def setUp(self): """Set up some variables to re-use in multiple tests.""" super(Conv3DTransposeTest, self).setUp() self.batch_size = 7 self.in_depth = 7 self.in_height = 7 self.in_width = 11 self.in_channels = 4 self.out_channels = 10 self.kernel_shape_d = 5 self.kernel_shape_h = 5 self.kernel_shape_w = 7 self.stride_d = 1 self.stride_h = 2 self.stride_w = 3 self.padding = snt.SAME self.in_shape = (self.batch_size, self.in_depth, self.in_height, self.in_width, self.in_channels) self.out_shape = (self.in_depth, self.in_height, self.in_width) self.kernel_shape = (self.kernel_shape_d, self.kernel_shape_h, self.kernel_shape_w) self.kernel_shape2 = (self.kernel_shape_d, self.kernel_shape_h, self.kernel_shape_w, self.out_channels, self.in_channels) self.strides = (self.stride_d, self.stride_h, self.stride_w) def testKernelsNotSpecified(self): with self.assertRaisesRegexp(ValueError, "`kernel_shape` cannot be None."): snt.Conv3DTranspose(output_channels=1) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testOutputShapeConsistency(self, use_bias): """Tests if output shapes are valid.""" # When padding is SAME, then the actual number of padding pixels can be # computed as: pad = kernel_shape - strides + (-input_shape % strides) # = 5 - 1 + (- 32 % 1) = 4 # The formula for the minimal size is: # oH = strides[1] * (in_height - 1) - padding + kernel_shape_h # oH = 1 * ( 32 - 1) - 4 + 5 = 32 # The formula for the maximum size (due to extra pixels) is: # oH_max = oH + strides[1] - 1 # so, for strides = 1 and padding = SAME, input size == output size. inputs = tf.placeholder(tf.float32, shape=self.in_shape) conv1 = snt.Conv3DTranspose(name="conv3d_1", output_channels=self.out_channels, output_shape=self.out_shape, kernel_shape=self.kernel_shape, padding=self.padding, stride=1, use_bias=use_bias) outputs = conv1(inputs) self.assertTrue(outputs.get_shape().is_compatible_with(( self.batch_size,) + self.out_shape + (self.out_channels,))) self.assertTrue(conv1.w.get_shape().is_compatible_with(self.kernel_shape2)) if use_bias: self.assertTrue(conv1.b.get_shape().is_compatible_with( [self.out_channels])) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testOutputShapeInteger(self, use_bias): """Tests if output shapes are valid when specified as an integer.""" inputs = tf.zeros(shape=[3, 5, 5, 5, 2], dtype=tf.float32) inputs_2 = tf.zeros(shape=[3, 5, 7, 5, 2], dtype=tf.float32) conv1 = snt.Conv3DTranspose(name="conv3d_1", output_channels=10, output_shape=10, kernel_shape=5, padding=snt.SAME, stride=2, use_bias=use_bias) outputs = conv1(inputs) outputs_2 = conv1(inputs_2) self.assertTrue(outputs.get_shape().is_compatible_with((3, 10, 10, 10, 10))) with self.test_session() as sess: tf.global_variables_initializer().run() sess.run(outputs) with self.assertRaises(tf.errors.InvalidArgumentError): sess.run(outputs_2) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testOutputShapeInference(self, use_bias): """Tests if output shapes are valid when not specified.""" inputs = tf.zeros(shape=[3, 5, 5, 5, 2], dtype=tf.float32) conv1 = snt.Conv3DTranspose(name="conv3d_1", output_channels=10, output_shape=None, kernel_shape=5, padding=snt.SAME, stride=2, use_bias=use_bias) outputs = conv1(inputs) self.assertTrue(outputs.get_shape().is_compatible_with((3, 10, 10, 10, 10))) def testBiasInitializerIsZeroByDefault(self): """Test that the default initializer for the bias consists of zeros.""" conv1 = snt.Conv3DTranspose( output_channels=7, kernel_shape=3, stride=1) conv1(tf.placeholder(tf.float32, [7, 10, 10, 10, 5])) with self.test_session(): tf.variables_initializer([conv1.w, conv1.b]).run() self.assertAllClose( conv1.b.eval(), np.zeros([7], dtype=np.float32)) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testTransposition(self, use_bias): """Tests if the correct output shapes are setup in transposed module.""" net = snt.Conv3DTranspose(name="conv3d_3", output_channels=self.out_channels, output_shape=self.out_shape, kernel_shape=self.kernel_shape, padding=self.padding, stride=1, use_bias=use_bias) net_transpose = net.transpose() input_to_net = tf.placeholder(tf.float32, shape=self.in_shape) err = "Variables in {} not instantiated yet, __call__ the module first." with self.assertRaisesRegexp(snt.NotConnectedError, err.format(net.scope_name)): net_transpose(input_to_net) net_transpose = net.transpose(name="another_net_transpose") net_out = net(input_to_net) net_transposed_output = net_transpose(net_out) self.assertEqual(net_transposed_output.get_shape(), input_to_net.get_shape()) @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testTransposeNDHWC(self, use_bias): """Test transpose for NDHWC format.""" conv3_transpose = snt.Conv3DTranspose( output_channels=self.out_channels, output_shape=self.out_shape, kernel_shape=self.kernel_shape, padding=self.padding, stride=self.strides, name="conv3_transpose", use_bias=use_bias, data_format=conv.DATA_FORMAT_NDHWC) conv3 = conv3_transpose.transpose() # Check kernel shapes, strides and padding match. self.assertEqual(conv3_transpose.kernel_shape, conv3.kernel_shape) self.assertEqual((1,) + self.strides + (1,), conv3.stride) self.assertEqual(conv3_transpose.conv_op_padding, conv3.conv_op_padding) # Before conv3_transpose is connected, we cannot know how many # `output_channels` conv1 should have. err = "Variables in conv3_transpose not instantiated yet" with self.assertRaisesRegexp(snt.NotConnectedError, err): _ = conv3.output_channels # After connection the number of `output_channels` is known. x = tf.constant(np.random.randn(self.batch_size, self.in_depth, self.in_height, self.in_width, self.in_channels), dtype=np.float32) conv3_transpose(x) self.assertEqual(self.in_channels, conv3.output_channels) # However, even after connection, the `input_shape` of the forward # convolution is not known until it is itself connected (i.e. it can be # connected to a different shape input from the `output_shape` of the # transpose convolution!) err = "Variables in conv3_transpose_transpose not instantiated yet" with self.assertRaisesRegexp(snt.NotConnectedError, err): _ = conv3.input_shape @parameterized.named_parameters( ("WithBias", True), ("WithoutBias", False)) def testTransposeNCDHW(self, use_bias): """Test transpose for NCDHW format.""" conv3_transpose = snt.Conv3DTranspose( output_channels=self.out_channels, output_shape=self.out_shape, kernel_shape=self.kernel_shape, padding=self.padding, stride=self.strides, name="conv3_transpose", use_bias=use_bias, data_format=conv.DATA_FORMAT_NCDHW) conv3 = conv3_transpose.transpose() # Check kernel shapes, strides and padding match. self.assertEqual(conv3_transpose.kernel_shape, conv3.kernel_shape) self.assertEqual((1, 1) + self.strides, conv3.stride) self.assertEqual(conv3_transpose.conv_op_padding, conv3.conv_op_padding) # Before conv3_transpose is connected, we cannot know how many # `output_channels` conv1 should have. err = "Variables in conv3_transpose not instantiated yet" with self.assertRaisesRegexp(snt.NotConnectedError, err): _ = conv3.output_channels # After connection the number of `output_channels` is known. x = tf.constant(np.random.randn(self.batch_size, self.in_channels, self.in_depth, self.in_height, self.in_width), dtype=np.float32) conv3_transpose(x) self.assertEqual(self.in_channels, conv3.output_channels) # However, even after connection, the `input_shape` of the forward # convolution is not known until it is itself connected (i.e. it can be # connected to a different shape input from the `output_shape` of the # transpose convolution!) err = "Variables in conv3_transpose_transpose not instantiated yet" with self.assertRaisesRegexp(snt.NotConnectedError, err): _ = conv3.input_shape if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/modules/conv_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for `sonnet.python.modules.conv`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import functools # Dependency imports from absl.testing import parameterized import numpy as np import sonnet as snt import tensorflow.compat.v1 as tf from tensorflow.python.platform import test Conv1DInput = collections.namedtuple( "Conv1DInput", ["input_batch", "input_width", "input_channels"]) Conv2DInput = collections.namedtuple( "Conv2DInput", ["input_batch", "input_height", "input_width", "input_channels"]) Conv3DInput = collections.namedtuple( "Conv3DInput", ["input_batch", "input_depth", "input_height", "input_width", "input_channels"]) def create_initializers(use_bias=True): if use_bias: return {"w": tf.truncated_normal_initializer(), "b": tf.constant_initializer()} else: return {"w": tf.truncated_normal_initializer()} def create_custom_field_getter(conv_module, field_to_get): """Replace the tf.get_variable call for `field_to_get`.""" def custom_getter(*args, **kwargs): # pylint: disable=unused-argument return getattr(conv_module, field_to_get) return custom_getter class Conv1DTestDataFormats(parameterized.TestCase, tf.test.TestCase): OUT_CHANNELS = 4 KERNEL_SHAPE = 3 INPUT_SHAPE = Conv1DInput(2, 17, 18) def setUp(self): super(Conv1DTestDataFormats, self).setUp() name = "{}.{}".format(type(self).__name__, self._testMethodName) if not test.is_gpu_available(): self.skipTest("No GPU was detected, so {} will be skipped.".format(name)) def checkEquality(self, o1, o2, atol=1e-5): with self.test_session(use_gpu=True, force_gpu=True): tf.global_variables_initializer().run() self.assertAllClose(o1.eval(), o2.eval(), atol=atol) @parameterized.named_parameters( ("WithBias_Stride1", True, 1), ("WithoutBias_Stride1", False, 1), ("WithBias_Stride2", True, 2), ("WithoutBias_Stride2", False, 2)) def testConv1DDataFormats(self, use_bias, stride): """Check the module produces the same result for supported data formats.""" func = functools.partial( snt.Conv1D, output_channels=self.OUT_CHANNELS, kernel_shape=self.KERNEL_SHAPE, use_bias=use_bias, stride=stride, initializers=create_initializers(use_bias)) conv_nwc = func(name="NWC", data_format="NWC") x = tf.constant(np.random.random(self.INPUT_SHAPE).astype(np.float32)) result_nwc = conv_nwc(x) # We will force both modules to share the same weights by creating # a custom getter that returns the weights from the first conv module when # tf.get_variable is called. custom_getter = {"w": create_custom_field_getter(conv_nwc, "w"), "b": create_custom_field_getter(conv_nwc, "b")} conv_nwc = func(name="NCW", data_format="NCW", custom_getter=custom_getter) x_transpose = tf.transpose(x, perm=(0, 2, 1)) result_ncw = tf.transpose(conv_nwc(x_transpose), perm=(0, 2, 1)) self.checkEquality(result_nwc, result_ncw) @parameterized.named_parameters(("WithBias", True), ("WithoutBias", False)) def testConv1DDataFormatsBatchNorm(self, use_bias): """Similar to `testConv1DDataFormats`, but this checks BatchNorm support.""" def func(name, data_format, custom_getter=None): conv = snt.Conv1D( name=name, output_channels=self.OUT_CHANNELS, kernel_shape=self.KERNEL_SHAPE, use_bias=use_bias, initializers=create_initializers(use_bias), data_format=data_format, custom_getter=custom_getter) if data_format == "NWC": batch_norm = snt.BatchNorm(scale=True, update_ops_collection=None) else: # data_format = "NCW" batch_norm = snt.BatchNorm(scale=True, update_ops_collection=None, axis=(0, 2)) return snt.Sequential([conv, functools.partial(batch_norm, is_training=True)]) seq_nwc = func(name="NWC", data_format="NWC") x = tf.constant(np.random.random(self.INPUT_SHAPE).astype(np.float32)) result_nwc = seq_nwc(x) custom_getter = {"w": create_custom_field_getter(seq_nwc.layers[0], "w"), "b": create_custom_field_getter(seq_nwc.layers[0], "b")} seq_ncw = func(name="NCW", data_format="NCW", custom_getter=custom_getter) x_transpose = tf.transpose(x, perm=(0, 2, 1)) result_ncw = tf.transpose(seq_ncw(x_transpose), perm=(0, 2, 1)) self.checkEquality(result_nwc, result_ncw) class CausalConv1DTestDataFormats(parameterized.TestCase, tf.test.TestCase): OUT_CHANNELS = 4 KERNEL_SHAPE = 3 INPUT_SHAPE = Conv1DInput(2, 17, 18) def setUp(self): super(CausalConv1DTestDataFormats, self).setUp() name = "{}.{}".format(type(self).__name__, self._testMethodName) if not test.is_gpu_available(): self.skipTest("No GPU was detected, so {} will be skipped.".format(name)) def checkEquality(self, o1, o2, atol=1e-5): with self.test_session(use_gpu=True, force_gpu=True): tf.global_variables_initializer().run() self.assertAllClose(o1.eval(), o2.eval(), atol=atol) @parameterized.named_parameters( ("WithBias_Stride1", True, 1), ("WithoutBias_Stride1", False, 1), ("WithBias_Stride2", True, 2), ("WithoutBias_Stride2", False, 2)) def testCausalConv1DDataFormats(self, use_bias, stride): """Check the module produces the same result for supported data formats.""" func = functools.partial( snt.CausalConv1D, output_channels=self.OUT_CHANNELS, kernel_shape=self.KERNEL_SHAPE, use_bias=use_bias, stride=stride, initializers=create_initializers(use_bias)) conv_nwc = func(name="NWC", data_format="NWC") x = tf.constant(np.random.random(self.INPUT_SHAPE).astype(np.float32)) result_nwc = conv_nwc(x) # We will force both modules to share the same weights by creating # a custom getter that returns the weights from the first conv module when # tf.get_variable is called. custom_getter = {"w": create_custom_field_getter(conv_nwc, "w"), "b": create_custom_field_getter(conv_nwc, "b")} conv_ncw = func(name="NCW", data_format="NCW", custom_getter=custom_getter) x_transpose = tf.transpose(x, perm=(0, 2, 1)) result_ncw = tf.transpose(conv_ncw(x_transpose), perm=(0, 2, 1)) self.checkEquality(result_nwc, result_ncw) @parameterized.named_parameters(("WithBias", True), ("WithoutBias", False)) def testCausalConv1DDataFormatsBatchNorm(self, use_bias): """Similar to `testCausalConv1DDataFormats`. Checks BatchNorm support.""" def func(name, data_format, custom_getter=None): conv = snt.CausalConv1D( name=name, output_channels=self.OUT_CHANNELS, kernel_shape=self.KERNEL_SHAPE, use_bias=use_bias, initializers=create_initializers(use_bias), data_format=data_format, custom_getter=custom_getter) if data_format == "NWC": batch_norm = snt.BatchNorm(scale=True, update_ops_collection=None) else: # data_format == "NCW" batch_norm = snt.BatchNorm(scale=True, update_ops_collection=None, axis=(0, 2)) return snt.Sequential([conv, functools.partial(batch_norm, is_training=True)]) seq_nwc = func(name="NWC", data_format="NWC") x = tf.constant(np.random.random(self.INPUT_SHAPE).astype(np.float32)) result_nwc = seq_nwc(x) custom_getter = {"w": create_custom_field_getter(seq_nwc.layers[0], "w"), "b": create_custom_field_getter(seq_nwc.layers[0], "b")} seq_ncw = func(name="NCW", data_format="NCW", custom_getter=custom_getter) x_transpose = tf.transpose(x, perm=(0, 2, 1)) result_ncw = tf.transpose(seq_ncw(x_transpose), perm=(0, 2, 1)) self.checkEquality(result_nwc, result_ncw) class Conv2DTestDataFormats(parameterized.TestCase, tf.test.TestCase): OUT_CHANNELS = 5 KERNEL_SHAPE = 3 INPUT_SHAPE = Conv2DInput(2, 18, 19, 4) def setUp(self): super(Conv2DTestDataFormats, self).setUp() name = "{}.{}".format(type(self).__name__, self._testMethodName) if not test.is_gpu_available(): self.skipTest("No GPU was detected, so {} will be skipped.".format(name)) def checkEquality(self, o1, o2, atol=1e-5): with self.test_session(use_gpu=True, force_gpu=True): tf.global_variables_initializer().run() self.assertAllClose(o1.eval(), o2.eval(), atol=atol) @parameterized.named_parameters( ("WithBias_Stride1", True, 1), ("WithoutBias_Stride1", False, 1), ("WithBias_Stride2", True, 2), ("WithoutBias_Stride2", False, 2)) def testConv2DDataFormats(self, use_bias, stride): """Check the module produces the same result for supported data formats.""" func = functools.partial( snt.Conv2D, output_channels=self.OUT_CHANNELS, kernel_shape=self.KERNEL_SHAPE, use_bias=use_bias, stride=stride, initializers=create_initializers(use_bias)) conv_nhwc = func(name="NHWC", data_format="NHWC") x = tf.constant(np.random.random(self.INPUT_SHAPE).astype(np.float32)) result_nhwc = conv_nhwc(x) # We will force both modules to share the same weights by creating # a custom getter that returns the weights from the first conv module when # tf.get_variable is called. custom_getter = {"w": create_custom_field_getter(conv_nhwc, "w"), "b": create_custom_field_getter(conv_nhwc, "b")} conv_nchw = func(name="NCHW", data_format="NCHW", custom_getter=custom_getter) x_transpose = tf.transpose(x, perm=(0, 3, 1, 2)) result_nchw = tf.transpose(conv_nchw(x_transpose), perm=(0, 2, 3, 1)) self.checkEquality(result_nhwc, result_nchw) @parameterized.named_parameters(("WithBias", True), ("WithoutBias", False)) def testConv2DDataFormatsBatchNorm(self, use_bias): """Similar to `testConv2DDataFormats`, but this checks BatchNorm support.""" def func(name, data_format, custom_getter=None): conv = snt.Conv2D( name=name, output_channels=self.OUT_CHANNELS, kernel_shape=self.KERNEL_SHAPE, use_bias=use_bias, initializers=create_initializers(use_bias), data_format=data_format, custom_getter=custom_getter) if data_format == "NHWC": batch_norm = snt.BatchNorm(scale=True, update_ops_collection=None) else: # data_format = "NCHW" batch_norm = snt.BatchNorm(scale=True, update_ops_collection=None, fused=True, axis=(0, 2, 3)) return snt.Sequential([conv, functools.partial(batch_norm, is_training=True)]) seq_nhwc = func(name="NHWC", data_format="NHWC") x = tf.constant(np.random.random(self.INPUT_SHAPE).astype(np.float32)) result_nhwc = seq_nhwc(x) custom_getter = {"w": create_custom_field_getter(seq_nhwc.layers[0], "w"), "b": create_custom_field_getter(seq_nhwc.layers[0], "b")} seq_nchw = func(name="NCHW", data_format="NCHW", custom_getter=custom_getter) x_transpose = tf.transpose(x, perm=(0, 3, 1, 2)) result_nchw = tf.transpose(seq_nchw(x_transpose), perm=(0, 2, 3, 1)) self.checkEquality(result_nhwc, result_nchw) class Conv3DTestDataFormats(parameterized.TestCase, tf.test.TestCase): OUT_CHANNELS = 5 KERNEL_SHAPE = 3 INPUT_SHAPE = Conv3DInput(2, 17, 18, 19, 4) def setUp(self): super(Conv3DTestDataFormats, self).setUp() name = "{}.{}".format(type(self).__name__, self._testMethodName) if not test.is_gpu_available(): self.skipTest("No GPU was detected, so {} will be skipped.".format(name)) def checkEquality(self, o1, o2, atol=1e-5): with self.test_session(use_gpu=True, force_gpu=True): tf.global_variables_initializer().run() self.assertAllClose(o1.eval(), o2.eval(), atol=atol) @parameterized.named_parameters( ("WithBias_Stride1", True, 1), ("WithoutBias_Stride1", False, 1), ("WithBias_Stride2", True, 2), ("WithoutBias_Stride2", False, 2)) def testConv3DDataFormats(self, use_bias, stride): """Check the module produces the same result for supported data formats.""" func = functools.partial( snt.Conv3D, output_channels=self.OUT_CHANNELS, kernel_shape=self.KERNEL_SHAPE, use_bias=use_bias, stride=stride, initializers=create_initializers(use_bias)) conv_ndhwc = func(name="NDHWC", data_format="NDHWC") x = tf.constant(np.random.random(self.INPUT_SHAPE).astype(np.float32)) result_ndhwc = conv_ndhwc(x) # We will force both modules to share the same weights by creating # a custom getter that returns the weights from the first conv module when # tf.get_variable is called. custom_getter = {"w": create_custom_field_getter(conv_ndhwc, "w"), "b": create_custom_field_getter(conv_ndhwc, "b")} conv_ncdhw = func(name="NCDHW", data_format="NCDHW", custom_getter=custom_getter) x_transpose = tf.transpose(x, perm=(0, 4, 1, 2, 3)) result_ncdhw = tf.transpose(conv_ncdhw(x_transpose), perm=(0, 2, 3, 4, 1)) self.checkEquality(result_ndhwc, result_ncdhw) @parameterized.named_parameters(("WithBias", True), ("WithoutBias", False)) def testConv3DDataFormatsBatchNorm(self, use_bias): """Similar to `testConv3DDataFormats`, but this checks BatchNorm support.""" def func(name, data_format, custom_getter=None): conv = snt.Conv3D( name=name, output_channels=self.OUT_CHANNELS, kernel_shape=self.KERNEL_SHAPE, use_bias=use_bias, initializers=create_initializers(use_bias), data_format=data_format, custom_getter=custom_getter) if data_format == "NDHWC": batch_norm = snt.BatchNorm(scale=True, update_ops_collection=None) else: # data_format = "NCDHW" batch_norm = snt.BatchNorm(scale=True, update_ops_collection=None, axis=(0, 2, 3, 4)) return snt.Sequential([conv, functools.partial(batch_norm, is_training=True)]) seq_ndhwc = func(name="NDHWC", data_format="NDHWC") x = tf.constant(np.random.random(self.INPUT_SHAPE).astype(np.float32)) result_ndhwc = seq_ndhwc(x) custom_getter = {"w": create_custom_field_getter(seq_ndhwc.layers[0], "w"), "b": create_custom_field_getter(seq_ndhwc.layers[0], "b")} seq_ncdhw = func(name="NCDHW", data_format="NCDHW", custom_getter=custom_getter) x_transpose = tf.transpose(x, perm=(0, 4, 1, 2, 3)) result_ncdhw = tf.transpose(seq_ncdhw(x_transpose), perm=(0, 2, 3, 4, 1)) self.checkEquality(result_ndhwc, result_ncdhw) class Conv1DTransposeTestDataFormats(parameterized.TestCase, tf.test.TestCase): OUT_CHANNELS = 5 KERNEL_SHAPE = 3 INPUT_SHAPE = Conv1DInput(2, 17, 4) def setUp(self): super(Conv1DTransposeTestDataFormats, self).setUp() name = "{}.{}".format(type(self).__name__, self._testMethodName) if not test.is_gpu_available(): self.skipTest("No GPU was detected, so {} will be skipped.".format(name)) def checkEquality(self, o1, o2, atol=1e-5): with self.test_session(use_gpu=True, force_gpu=True): tf.global_variables_initializer().run() self.assertAllClose(o1.eval(), o2.eval(), atol=atol) @parameterized.named_parameters( ("WithBias_Stride1", True, 1), ("WithoutBias_Stride1", False, 1), ("WithBias_Stride2", True, 2), ("WithoutBias_Stride2", False, 2)) def testConv1DTransposeDataFormats(self, use_bias, stride): """Check the module produces the same result for supported data formats.""" input_shape = (self.INPUT_SHAPE.input_batch, int(np.ceil(self.INPUT_SHAPE.input_width / stride)), self.INPUT_SHAPE.input_channels) func = functools.partial( snt.Conv1DTranspose, output_channels=self.OUT_CHANNELS, kernel_shape=self.KERNEL_SHAPE, output_shape=(self.INPUT_SHAPE.input_width,), use_bias=use_bias, stride=stride, initializers=create_initializers(use_bias)) conv_nwc = func(name="NWC", data_format="NWC") x = tf.constant(np.random.random(input_shape).astype(np.float32)) result_nwc = conv_nwc(x) # We will force both modules to share the same weights by creating # a custom getter that returns the weights from the first conv module when # tf.get_variable is called. custom_getter = {"w": create_custom_field_getter(conv_nwc, "w"), "b": create_custom_field_getter(conv_nwc, "b")} conv_ncw = func(name="NCW", data_format="NCW", custom_getter=custom_getter) x_transpose = tf.transpose(x, perm=(0, 2, 1)) result_ncw = tf.transpose(conv_ncw(x_transpose), perm=(0, 2, 1)) self.checkEquality(result_nwc, result_ncw) @parameterized.named_parameters(("WithBias", True), ("WithoutBias", False)) def testConv1DTransposeDataFormatsBatchNorm(self, use_bias): """Like `testConv1DTransposeDataFormats` but checks BatchNorm support.""" def func(name, data_format, custom_getter=None): conv = snt.Conv1DTranspose( name=name, output_channels=self.OUT_CHANNELS, kernel_shape=self.KERNEL_SHAPE, output_shape=(self.INPUT_SHAPE.input_width,), use_bias=use_bias, initializers=create_initializers(use_bias), data_format=data_format, custom_getter=custom_getter) if data_format == "NWC": batch_norm = snt.BatchNorm(scale=True, update_ops_collection=None) else: # data_format == "NCW" batch_norm = snt.BatchNorm(scale=True, update_ops_collection=None, axis=(0, 2)) return snt.Sequential([conv, functools.partial(batch_norm, is_training=True)]) seq_nwc = func(name="NWC", data_format="NWC") x = tf.constant(np.random.random(self.INPUT_SHAPE).astype(np.float32)) result_nwc = seq_nwc(x) custom_getter = {"w": create_custom_field_getter(seq_nwc.layers[0], "w"), "b": create_custom_field_getter(seq_nwc.layers[0], "b")} seq_ncw = func(name="NCW", data_format="NCW", custom_getter=custom_getter) x_transpose = tf.transpose(x, perm=(0, 2, 1)) result_ncw = tf.transpose(seq_ncw(x_transpose), perm=(0, 2, 1)) self.checkEquality(result_nwc, result_ncw) class Conv2DTransposeTestDataFormats(parameterized.TestCase, tf.test.TestCase): OUT_CHANNELS = 5 KERNEL_SHAPE = 3 INPUT_SHAPE = Conv2DInput(2, 18, 19, 4) def setUp(self): super(Conv2DTransposeTestDataFormats, self).setUp() name = "{}.{}".format(type(self).__name__, self._testMethodName) if not test.is_gpu_available(): self.skipTest("No GPU was detected, so {} will be skipped.".format(name)) def checkEquality(self, o1, o2, atol=1e-5): with self.test_session(use_gpu=True, force_gpu=True): tf.global_variables_initializer().run() self.assertAllClose(o1.eval(), o2.eval(), atol=atol) @parameterized.named_parameters( ("WithBias_Stride1", True, 1), ("WithoutBias_Stride1", False, 1), ("WithBias_Stride2", True, 2), ("WithoutBias_Stride2", False, 2)) def testConv2DTransposeDataFormats(self, use_bias, stride): """Check the module produces the same result for supported data formats.""" input_shape = (self.INPUT_SHAPE.input_batch, int(np.ceil(self.INPUT_SHAPE.input_height / stride)), int(np.ceil(self.INPUT_SHAPE.input_width / stride)), self.INPUT_SHAPE.input_channels) func = functools.partial( snt.Conv2DTranspose, output_channels=self.OUT_CHANNELS, kernel_shape=self.KERNEL_SHAPE, output_shape=(self.INPUT_SHAPE.input_height, self.INPUT_SHAPE.input_width), use_bias=use_bias, stride=stride, initializers=create_initializers(use_bias)) conv_nhwc = func(name="NHWC", data_format="NHWC") x = tf.constant(np.random.random(input_shape).astype(np.float32)) result_nhwc = conv_nhwc(x) # We will force both modules to share the same weights by creating # a custom getter that returns the weights from the first conv module when # tf.get_variable is called. custom_getter = {"w": create_custom_field_getter(conv_nhwc, "w"), "b": create_custom_field_getter(conv_nhwc, "b")} conv_nchw = func(name="NCHW", data_format="NCHW", custom_getter=custom_getter) x_transpose = tf.transpose(x, perm=(0, 3, 1, 2)) result_nchw = tf.transpose(conv_nchw(x_transpose), perm=(0, 2, 3, 1)) self.checkEquality(result_nhwc, result_nchw) @parameterized.named_parameters(("WithBias", True), ("WithoutBias", False)) def testConv2DTransposeDataFormatsBatchNorm(self, use_bias): """Like `testConv2DTransposeDataFormats` but checks BatchNorm support.""" def func(name, data_format, custom_getter=None): conv = snt.Conv2DTranspose( name=name, output_channels=self.OUT_CHANNELS, kernel_shape=self.KERNEL_SHAPE, output_shape=(self.INPUT_SHAPE.input_height, self.INPUT_SHAPE.input_width), use_bias=use_bias, initializers=create_initializers(use_bias), data_format=data_format, custom_getter=custom_getter) if data_format == "NHWC": batch_norm = snt.BatchNorm(scale=True, update_ops_collection=None) else: # data_format == "NCHW" batch_norm = snt.BatchNorm(scale=True, update_ops_collection=None, fused=True, axis=(0, 2, 3)) return snt.Sequential([conv, functools.partial(batch_norm, is_training=True)]) seq_nhwc = func(name="NHWC", data_format="NHWC") x = tf.constant(np.random.random(self.INPUT_SHAPE).astype(np.float32)) result_nhwc = seq_nhwc(x) custom_getter = {"w": create_custom_field_getter(seq_nhwc.layers[0], "w"), "b": create_custom_field_getter(seq_nhwc.layers[0], "b")} seq_nchw = func(name="NCHW", data_format="NCHW", custom_getter=custom_getter) x_transpose = tf.transpose(x, perm=(0, 3, 1, 2)) result_nchw = tf.transpose(seq_nchw(x_transpose), perm=(0, 2, 3, 1)) self.checkEquality(result_nhwc, result_nchw) class Conv3DTransposeTestDataFormats(parameterized.TestCase, tf.test.TestCase): OUT_CHANNELS = 5 KERNEL_SHAPE = 3 INPUT_SHAPE = Conv3DInput(2, 17, 18, 19, 4) def setUp(self): super(Conv3DTransposeTestDataFormats, self).setUp() name = "{}.{}".format(type(self).__name__, self._testMethodName) if not test.is_gpu_available(): self.skipTest("No GPU was detected, so {} will be skipped.".format(name)) def checkEquality(self, o1, o2, atol=1e-5): with self.test_session(use_gpu=True, force_gpu=True): tf.global_variables_initializer().run() self.assertAllClose(o1.eval(), o2.eval(), atol=atol) @parameterized.named_parameters( ("WithBias_Stride1", True, 1), ("WithoutBias_Stride1", False, 1), ("WithBias_Stride2", True, 2), ("WithoutBias_Stride2", False, 2)) def testConv3DTransposeDataFormats(self, use_bias, stride): """Check the module produces the same result for supported data formats.""" input_shape = (self.INPUT_SHAPE.input_batch, int(np.ceil(self.INPUT_SHAPE.input_depth / stride)), int(np.ceil(self.INPUT_SHAPE.input_height / stride)), int(np.ceil(self.INPUT_SHAPE.input_width / stride)), self.INPUT_SHAPE.input_channels) func = functools.partial( snt.Conv3DTranspose, output_channels=self.OUT_CHANNELS, kernel_shape=self.KERNEL_SHAPE, output_shape=(self.INPUT_SHAPE.input_depth, self.INPUT_SHAPE.input_height, self.INPUT_SHAPE.input_width), use_bias=use_bias, stride=stride, initializers=create_initializers(use_bias)) conv_ndhwc = func(name="NDHWC", data_format="NDHWC") x = tf.constant(np.random.random(input_shape).astype(np.float32)) result_ndhwc = conv_ndhwc(x) # We will force both modules to share the same weights by creating # a custom getter that returns the weights from the first conv module when # tf.get_variable is called. custom_getter = {"w": create_custom_field_getter(conv_ndhwc, "w"), "b": create_custom_field_getter(conv_ndhwc, "b")} conv_ncdhw = func(name="NCDHW", data_format="NCDHW", custom_getter=custom_getter) x_transpose = tf.transpose(x, perm=(0, 4, 1, 2, 3)) result_ncdhw = tf.transpose(conv_ncdhw(x_transpose), perm=(0, 2, 3, 4, 1)) self.checkEquality(result_ndhwc, result_ncdhw) @parameterized.named_parameters(("WithBias", True), ("WithoutBias", False)) def testConv3DTransposeDataFormatsBatchNorm(self, use_bias): """Like `testConv3DTransposeDataFormats` but checks BatchNorm support.""" def func(name, data_format, custom_getter=None): conv = snt.Conv3DTranspose( name=name, output_channels=self.OUT_CHANNELS, kernel_shape=self.KERNEL_SHAPE, output_shape=(self.INPUT_SHAPE.input_depth, self.INPUT_SHAPE.input_height, self.INPUT_SHAPE.input_width), use_bias=use_bias, initializers=create_initializers(use_bias), data_format=data_format, custom_getter=custom_getter) if data_format == "NDHWC": batch_norm = snt.BatchNorm(scale=True, update_ops_collection=None) else: # data_format == "NCDHW" batch_norm = snt.BatchNorm(scale=True, update_ops_collection=None, axis=(0, 2, 3, 4)) return snt.Sequential([conv, functools.partial(batch_norm, is_training=True)]) seq_ndhwc = func(name="NDHWC", data_format="NDHWC") x = tf.constant(np.random.random(self.INPUT_SHAPE).astype(np.float32)) result_ndhwc = seq_ndhwc(x) custom_getter = {"w": create_custom_field_getter(seq_ndhwc.layers[0], "w"), "b": create_custom_field_getter(seq_ndhwc.layers[0], "b")} seq_ncdhw = func(name="NCDHW", data_format="NCDHW", custom_getter=custom_getter) x_transpose = tf.transpose(x, perm=(0, 4, 1, 2, 3)) result_ncdhw = tf.transpose(seq_ncdhw(x_transpose), perm=(0, 2, 3, 4, 1)) self.checkEquality(result_ndhwc, result_ncdhw) if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/modules/conv_gpu_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for sonnet.python.modules.nn.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections # Dependency imports import mock import six import sonnet as snt import tensorflow.compat.v1 as tf class SequentialTest(tf.test.TestCase): def testConstruct(self): module1 = snt.Linear(name="linear_1", output_size=123) module2 = snt.Linear(name="linear_2", output_size=45) seq = snt.Sequential([module1, tf.nn.relu, module2], name="sequential1") inputs = tf.placeholder(tf.float32, [67, 89]) outputs = seq(inputs) self.assertEqual(outputs.get_shape().as_list(), [67, 45]) self.assertEqual(seq.get_variables(), ()) self.assertEqual(len(seq.layers), 3) def testConstructError(self): module1 = snt.Linear(name="linear_1", output_size=123) module2 = snt.Linear(name="linear_2", output_size=45) with self.assertRaisesRegexp(TypeError, "Items 1 not callable with types: int"): snt.Sequential([module1, 5, module2], name="sequential1") err_str = "Items 1, 2 not callable with types: int, bool" with self.assertRaisesRegexp(TypeError, err_str): snt.Sequential([module1, 5, True, module2], name="sequential1") def testTupleInput(self): def module1(a, b): return a, b _, _ = snt.Sequential([module1, module1], name="seq1")(1, 2) def module2(a, b, c): return a, b, c if six.PY3: err_str = r"module2\(\) missing 1 required positional argument: 'c'" else: err_str = r"module2\(\) takes exactly 3 arguments \(2 given\)" with self.assertRaisesRegexp(TypeError, err_str): _, _ = snt.Sequential([module1, module2], name="seq2")(1, 2) def testCopiesModules(self): modules = [snt.Linear(output_size=200), tf.tanh, snt.Linear(output_size=10)] sequential = snt.Sequential(modules) # Modify the list, to simulate PEBKAC. Sequential must make internal copy. modules[1] = "i'm a string, not a module" # Connecting the Sequential would produce a TypeError if `modules` was # stored by reference, rather than making a copy. sequential(tf.placeholder(tf.float32, [23, 42])) def testNoneFails(self): with self.assertRaisesRegexp(TypeError, "'NoneType' object is not iterable"): snt.Sequential(None) def testNameScopeRecording(self): lin = snt.Linear(output_size=256) sequential = snt.Sequential([lin]) with tf.name_scope("blah"): sequential(tf.placeholder(dtype=tf.float32, shape=[2, 3])) self.assertEqual(sequential.name_scopes, ("blah/sequential",)) self.assertEqual(lin.name_scopes, ("blah/sequential/linear",)) def testWarning(self): seq = snt.Sequential([snt.Linear(output_size=23), snt.Linear(output_size=42)]) seq(tf.placeholder(dtype=tf.float32, shape=[2, 3])) with mock.patch.object(tf.logging, "warning") as mocked_logging_warning: self.assertEqual((), seq.get_variables()) self.assertTrue(mocked_logging_warning.called) first_call_args = mocked_logging_warning.call_args[0] self.assertTrue("will always return an empty tuple" in first_call_args[0]) def testNoLayers(self): # These two should really do the same thing. seq_with_identity = snt.Sequential([tf.identity]) seq_with_no_layers = snt.Sequential([]) inputs = tf.constant(3) identity_output = seq_with_identity(inputs) no_layers_output = seq_with_no_layers(inputs) # Make sure output is not a list / tuple, for either of the above cases. self.assertFalse(isinstance(identity_output, collections.Sequence)) self.assertFalse(isinstance(no_layers_output, collections.Sequence)) with self.test_session() as session: identity_output_np, no_layers_output_np = session.run( [identity_output, no_layers_output]) self.assertAllEqual(identity_output_np, no_layers_output_np) def testVariableProperties(self): seq = snt.Sequential([snt.Linear(output_size=23), snt.Linear(output_size=42)]) seq(tf.placeholder(dtype=tf.float32, shape=[2, 3])) self.assertEqual(len(seq.variables), 4) self.assertEqual(len(seq.trainable_variables), 4) self.assertEqual(len(seq.non_trainable_variables), 0) if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/modules/sequential_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for recurrent cores in snt.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools # Dependency imports from absl.testing import parameterized import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin import sonnet as snt import tensorflow.compat.v1 as tf from tensorflow.contrib import rnn as contrib_rnn from tensorflow.contrib.eager.python import tfe as contrib_eager from tensorflow.python.ops import variables # pylint: disable=g-direct-tensorflow-import # Some helpers used for generic tests which cover both LSTM and BatchNormLSTM: def _get_lstm_variable_names(lstm): if isinstance(lstm, snt.BatchNormLSTM): var_names = lstm.get_possible_initializer_keys( lstm.use_peepholes, lstm.use_batch_norm_h, lstm.use_batch_norm_x, lstm.use_batch_norm_c) if lstm.use_batch_norm_h or lstm.use_batch_norm_x: var_names |= {"w_x", "w_h"} else: var_names |= {"w_xh"} else: var_names = lstm.get_possible_initializer_keys(lstm.use_peepholes) var_names |= {"w_xh"} var_names -= {"w_gates", "b_gates"} var_names |= {"b"} return var_names def _construct_lstm(use_batch_norm_h=False, use_batch_norm_x=False, use_batch_norm_c=False, max_unique_stats=1, **kwargs): if any([use_batch_norm_h, use_batch_norm_x, use_batch_norm_c]): cell = snt.BatchNormLSTM( use_batch_norm_h=use_batch_norm_h, use_batch_norm_x=use_batch_norm_x, use_batch_norm_c=use_batch_norm_c, max_unique_stats=max_unique_stats, **kwargs) return cell, cell.with_batch_norm_control(is_training=True) else: cell = snt.LSTM(**kwargs) return cell, cell def _get_possible_initializer_keys(use_peepholes, use_batch_norm_h, use_batch_norm_x, use_batch_norm_c): if any([use_batch_norm_h, use_batch_norm_x, use_batch_norm_c]): return snt.BatchNormLSTM.get_possible_initializer_keys( use_peepholes, use_batch_norm_h, use_batch_norm_x, use_batch_norm_c) else: return snt.LSTM.get_possible_initializer_keys(use_peepholes) @contrib_eager.run_all_tests_in_graph_and_eager_modes class LSTMTest(tf.test.TestCase, parameterized.TestCase): def testShape(self): batch_size = 2 hidden_size = 4 inputs = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) prev_hidden = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) prev_cell = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) lstm = snt.LSTM(hidden_size) output, next_state = lstm(inputs, (prev_hidden, prev_cell)) shape = np.ndarray((batch_size, hidden_size)) self.assertShapeEqual(shape, next_state[0]) self.assertShapeEqual(shape, next_state[1]) self.assertShapeEqual(shape, output) def testVariables(self): batch_size = 5 hidden_size = 20 mod_name = "rnn" inputs = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) prev_cell = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) prev_hidden = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) lstm = snt.LSTM(hidden_size, name=mod_name) self.assertEqual(lstm.scope_name, mod_name) with self.assertRaisesRegexp(snt.Error, "not instantiated yet"): lstm.get_variables() lstm(inputs, (prev_hidden, prev_cell)) lstm_variables = lstm.get_variables() self.assertLen(lstm_variables, 2, "LSTM should have 2 variables") param_map = {param.name.split("/")[-1].split(":")[0]: param for param in lstm_variables} self.assertShapeEqual( np.ndarray(4 * hidden_size), tf.convert_to_tensor(param_map[snt.LSTM.B_GATES])) self.assertShapeEqual( np.ndarray((2 * hidden_size, 4 * hidden_size)), tf.convert_to_tensor(param_map[snt.LSTM.W_GATES])) @parameterized.named_parameters( [("lstm", None), ("lstm_with_recurrent_projection", 6)]) def testComputation(self, projection_size): batch_size = 2 hidden_size = 4 hidden_state_size = projection_size or hidden_size # With random data, check the TF calculation matches the Numpy version. input_data = np.random.randn(batch_size, hidden_size).astype(np.float32) prev_hidden_data = np.random.randn(batch_size, hidden_state_size).astype(np.float32) prev_cell_data = np.random.randn(batch_size, hidden_size).astype(np.float32) inputs = tf.constant(input_data) prev_cell = tf.constant(prev_cell_data) prev_hidden = tf.constant(prev_hidden_data) lstm = snt.LSTM(hidden_size, projection_size=projection_size) _, next_state = lstm(inputs, (prev_hidden, prev_cell)) next_hidden, next_cell = next_state lstm_variables = lstm.get_variables() param_map = {param.name.split("/")[-1].split(":")[0]: param for param in lstm_variables} self.evaluate(tf.global_variables_initializer()) fetches = [(next_hidden, next_cell), param_map[snt.LSTM.W_GATES], param_map[snt.LSTM.B_GATES]] if projection_size is not None: fetches.append(param_map[snt.LSTM.W_H_PROJECTION]) output = self.evaluate(fetches) next_state_ex, gate_weights_ex, gate_biases_ex = output[:3] in_and_hid = np.concatenate((input_data, prev_hidden_data), axis=1) real_gate = np.dot(in_and_hid, gate_weights_ex) + gate_biases_ex # i = input_gate, j = next_input, f = forget_gate, o = output_gate i, j, f, o = np.hsplit(real_gate, 4) real_cell = (prev_cell_data / (1 + np.exp(-(f + lstm._forget_bias))) + 1 / (1 + np.exp(-i)) * np.tanh(j)) real_hidden = np.tanh(real_cell) * 1 / (1 + np.exp(-o)) if projection_size is not None: real_hidden = np.matmul(real_hidden, output[-1]) self.assertAllClose(real_hidden, next_state_ex[0]) self.assertAllClose(real_cell, next_state_ex[1]) def testPeephole(self): batch_size = 5 hidden_size = 20 # With random data, check the TF calculation matches the Numpy version. input_data = np.random.randn(batch_size, hidden_size).astype(np.float32) prev_hidden_data = np.random.randn(batch_size, hidden_size).astype(np.float32) prev_cell_data = np.random.randn(batch_size, hidden_size).astype(np.float32) # Initialize the rnn and verify the number of parameter sets. inputs = tf.constant(input_data) prev_cell = tf.constant(prev_cell_data) prev_hidden = tf.constant(prev_hidden_data) lstm = snt.LSTM(hidden_size, use_peepholes=True) _, next_state = lstm(inputs, (prev_hidden, prev_cell)) next_hidden, next_cell = next_state lstm_variables = lstm.get_variables() self.assertLen(lstm_variables, 5, "LSTM should have 5 variables") # Unpack parameters into dict and check their sizes. param_map = {param.name.split("/")[-1].split(":")[0]: param for param in lstm_variables} self.assertShapeEqual( np.ndarray(4 * hidden_size), tf.convert_to_tensor(param_map[snt.LSTM.B_GATES])) self.assertShapeEqual( np.ndarray((2 * hidden_size, 4 * hidden_size)), tf.convert_to_tensor(param_map[snt.LSTM.W_GATES])) self.assertShapeEqual( np.ndarray(hidden_size), tf.convert_to_tensor(param_map[snt.LSTM.W_F_DIAG])) self.assertShapeEqual( np.ndarray(hidden_size), tf.convert_to_tensor(param_map[snt.LSTM.W_I_DIAG])) self.assertShapeEqual( np.ndarray(hidden_size), tf.convert_to_tensor(param_map[snt.LSTM.W_O_DIAG])) self.evaluate(tf.global_variables_initializer()) fetches = [(next_hidden, next_cell), param_map[snt.LSTM.W_GATES], param_map[snt.LSTM.B_GATES], param_map[snt.LSTM.W_F_DIAG], param_map[snt.LSTM.W_I_DIAG], param_map[snt.LSTM.W_O_DIAG]] output = self.evaluate(fetches) next_state_ex, w_ex, b_ex, wfd_ex, wid_ex, wod_ex = output in_and_hid = np.concatenate((input_data, prev_hidden_data), axis=1) real_gate = np.dot(in_and_hid, w_ex) + b_ex # i = input_gate, j = next_input, f = forget_gate, o = output_gate i, j, f, o = np.hsplit(real_gate, 4) real_cell = (prev_cell_data / (1 + np.exp(-(f + lstm._forget_bias + wfd_ex * prev_cell_data))) + 1 / (1 + np.exp(-(i + wid_ex * prev_cell_data))) * np.tanh(j)) real_hidden = (np.tanh(real_cell + wod_ex * real_cell) * 1 / (1 + np.exp(-o))) self.assertAllClose(real_hidden, next_state_ex[0]) self.assertAllClose(real_cell, next_state_ex[1]) @parameterized.parameters( *itertools.product( (True, False), (True, False), (True, False), (True, False)) ) def testInitializers(self, use_peepholes, use_batch_norm_h, use_batch_norm_x, use_batch_norm_c): batch_size = 2 hidden_size = 4 keys = _get_possible_initializer_keys( use_peepholes, use_batch_norm_h, use_batch_norm_x, use_batch_norm_c) initializers = { key: tf.constant_initializer(1.5) for key in keys } # Test we can successfully create the LSTM with initializers. lstm, wrapped_lstm = _construct_lstm(hidden_size=hidden_size, use_peepholes=use_peepholes, use_batch_norm_h=use_batch_norm_h, use_batch_norm_x=use_batch_norm_x, use_batch_norm_c=use_batch_norm_c, initializers=initializers) # Test we can build the LSTM. inputs = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) prev_cell = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) prev_hidden = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) wrapped_lstm(inputs, (prev_hidden, prev_cell)) init = tf.global_variables_initializer() # Test that the initializers have been correctly applied. lstm_variable_names = _get_lstm_variable_names(lstm) lstm_variables = [getattr(lstm, "_" + name) for name in lstm_variable_names] self.evaluate(init) lstm_variables_v = self.evaluate(lstm_variables) for lstm_variable_v in lstm_variables_v: self.assertAllClose(lstm_variable_v, 1.5 * np.ones(lstm_variable_v.shape)) def testPeepholeInitializersCheck(self): hidden_size = 4 # Test that passing in a peephole initializer when we don't request peephole # connections raises an error. for key in [snt.LSTM.W_F_DIAG, snt.LSTM.W_I_DIAG, snt.LSTM.W_O_DIAG]: with self.assertRaisesRegexp(KeyError, "Invalid initializer"): snt.LSTM(hidden_size, use_peepholes=False, initializers={key: tf.constant_initializer(0)}) @parameterized.parameters( (1e-6, 14), (0.5, None), (1 - 1e-6, 0) ) def testRecurrentDropout(self, keep_prob, expected_zeros): """Performs various recurrent dropout checks. - The training and testing versions have the same output when the keep prob is very close to 1. - The return is deterministic for keep probs 0 or close to 1. - The final hidden state has 0s at the same position as the mask. Args: keep_prob: the recurrent dropout keep probability. expected_zeros: the number of expected zeros in the dropout mask. """ batch_size = 2 input_size = 3 hidden_size = 7 seq_len = 5 train_cell, test_cell = snt.lstm_with_recurrent_dropout( hidden_size, keep_prob=keep_prob) input_data = np.stack( [np.random.rand(seq_len, input_size)] * batch_size).astype(np.float32) inputs = tf.constant(input_data) train_output, ((train_hidden, _), [train_mask]) = tf.nn.dynamic_rnn( train_cell, inputs, initial_state=train_cell.initial_state(batch_size, tf.float32), dtype=tf.float32) valid_output, _ = tf.nn.dynamic_rnn( test_cell, inputs, initial_state=test_cell.initial_state( batch_size, tf.float32), dtype=tf.float32) self.evaluate(tf.global_variables_initializer()) # Use the same input data for each row. input_data = np.stack([np.random.rand(seq_len, input_size)] * batch_size) train_out, valid_out, hidden, mask = self.evaluate( [train_output, valid_output, train_hidden, train_mask]) almost_one = abs(1 - keep_prob) < 1e-5 if almost_one: self.assertAllClose(train_out, valid_out) else: self.assertGreater(np.max(train_out - valid_out), 0.001) self.assertAllClose(valid_out[0], valid_out[1]) deterministic = almost_one or abs(keep_prob < 1e-5) if deterministic: self.assertAllClose(train_out[0], train_out[1]) self.assertEqual(expected_zeros, np.sum(hidden == 0)) self.assertEqual(expected_zeros, np.sum(mask == 0)) else: self.assertGreater(np.max(train_out[0] - train_out[1]), 0.001) self.assertAllEqual(mask == 0, hidden == 0) @parameterized.parameters( (1 - 1e-6, 0, 0), (0.5, None, None), (1e-6, 14, 14) ) def testZoneout(self, keep_prob, expected_frozen_h, expected_frozen_c): """Performs various zoneout checks. The training and testing versions have the same output when keep_prob is close to 0 or close to 1. The returned output for the training version is also deterministic in this case. Args: keep_prob: the probability to use the updated version of the state. expected_frozen_h: the number of hidden state values that are left unchanged after applying one step of LSTM with zoneout. expected_frozen_c: the number of cell state values that are left unchanged after applying one step of LSTM with zoneout. """ batch_size = 2 input_size = 3 hidden_size = 7 seq_len = 5 train_cell, test_cell = snt.lstm_with_zoneout( hidden_size, keep_prob_c=keep_prob, keep_prob_h=keep_prob) # Use the same input data for each row. input_data = np.stack( [np.random.rand(seq_len, input_size)] * batch_size).astype(np.float32) inputs = tf.constant(input_data) train_output, (train_h, train_c) = tf.nn.dynamic_rnn( train_cell, inputs, dtype=tf.float32) _, (next_train_h, next_train_c) = train_cell( inputs[:, 0], snt.LSTMState(train_h, train_c)) valid_output, _ = tf.nn.dynamic_rnn( test_cell, inputs, dtype=tf.float32) self.evaluate(tf.global_variables_initializer()) outputs = self.evaluate({ "train_out": train_output, "valid_out": valid_output, "train_h": train_h, "train_c": train_c, "next_train_h": next_train_h, "next_train_c": next_train_c, }) self.assertAllClose(outputs["valid_out"][0], outputs["valid_out"][1]) deterministic = abs(1 - keep_prob) < 1e-5 or abs(keep_prob < 1e-5) if deterministic: self.assertAllClose(outputs["train_out"], outputs["valid_out"]) self.assertAllClose(outputs["train_out"][0], outputs["train_out"][1]) self.assertEqual(expected_frozen_h, np.sum(outputs["train_h"] == outputs["next_train_h"])) self.assertEqual(expected_frozen_c, np.sum(outputs["train_c"] == outputs["next_train_c"])) else: # Ensure that the training and validation outputs are different. self.assertGreater( np.max(np.abs(outputs["train_out"] - outputs["valid_out"])), 0.05) # Ensure that the training output is not deterministic. self.assertGreater( np.max(np.abs(outputs["train_out"][0] - outputs["train_out"][1])), 0.04) @parameterized.parameters( (True, False, False), (False, True, False), (False, False, True) ) def testBatchNormBuildFlag(self, use_batch_norm_h, use_batch_norm_x, use_batch_norm_c): """Check if an error is raised if we don't specify the is_training flag.""" batch_size = 2 hidden_size = 4 inputs = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) prev_cell = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) prev_hidden = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) err = "is_training flag must be explicitly specified" with self.assertRaisesRegexp(ValueError, err): lstm = snt.BatchNormLSTM( hidden_size, use_batch_norm_h=use_batch_norm_h, use_batch_norm_x=use_batch_norm_x, use_batch_norm_c=use_batch_norm_c) lstm(inputs, (prev_cell, prev_hidden)) def testBatchNormInitializersCheck(self): hidden_size = 4 # Test that passing in a batchnorm initializer when we don't request # that form of batchnorm raises an error. for key, options in [ (snt.BatchNormLSTM.GAMMA_H, {"use_batch_norm_h": False, "use_batch_norm_x": True}), (snt.BatchNormLSTM.GAMMA_X, {"use_batch_norm_x": False, "use_batch_norm_h": True}), (snt.BatchNormLSTM.GAMMA_C, {"use_batch_norm_c": False, "use_batch_norm_h": True}), (snt.BatchNormLSTM.BETA_C, {"use_batch_norm_c": False, "use_batch_norm_h": True})]: with self.assertRaisesRegexp(KeyError, "Invalid initializer"): snt.BatchNormLSTM( hidden_size, initializers={key: tf.constant_initializer(0)}, **options) @parameterized.parameters( *itertools.product( (True, False), (True, False), (True, False), (True, False)) ) def testPartitioners(self, use_peepholes, use_batch_norm_h, use_batch_norm_x, use_batch_norm_c): if tf.executing_eagerly(): self.skipTest("Partitioned variables arenot supported in eager mode.") batch_size = 2 hidden_size = 4 keys = _get_possible_initializer_keys( use_peepholes, use_batch_norm_h, use_batch_norm_x, use_batch_norm_c) partitioners = { key: tf.variable_axis_size_partitioner(10) for key in keys } # Test we can successfully create the LSTM with partitioners. lstm, wrapped_lstm = _construct_lstm(hidden_size=hidden_size, use_peepholes=use_peepholes, use_batch_norm_h=use_batch_norm_h, use_batch_norm_x=use_batch_norm_x, use_batch_norm_c=use_batch_norm_c, partitioners=partitioners) # Test we can build the LSTM inputs = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) prev_cell = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) prev_hidden = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) wrapped_lstm(inputs, (prev_hidden, prev_cell)) # Test that the variables are partitioned. var_names = _get_lstm_variable_names(lstm) for var_name in var_names: self.assertEqual(type(getattr(lstm, "_" + var_name)), variables.PartitionedVariable) @parameterized.parameters( *itertools.product( (True, False), (True, False), (True, False), (True, False)) ) def testRegularizers(self, use_peepholes, use_batch_norm_h, use_batch_norm_x, use_batch_norm_c): batch_size = 2 hidden_size = 4 keys = _get_possible_initializer_keys( use_peepholes, use_batch_norm_h, use_batch_norm_x, use_batch_norm_c) regularizers = { key: tf.nn.l2_loss for key in keys } # Test we can successfully create the LSTM with regularizers. _, wrapped_lstm = _construct_lstm(hidden_size=hidden_size, use_peepholes=use_peepholes, use_batch_norm_h=use_batch_norm_h, use_batch_norm_x=use_batch_norm_x, use_batch_norm_c=use_batch_norm_c, regularizers=regularizers) # Test we can build the LSTM inputs = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) prev_cell = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) prev_hidden = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) wrapped_lstm(inputs, (prev_hidden, prev_cell)) # Test that we have regularization losses. num_reg_losses = len(tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES)) if use_batch_norm_h or use_batch_norm_x: self.assertEqual(num_reg_losses, len(keys) + 1) else: self.assertLen(keys, num_reg_losses) # Pick some hopefully representative combination of parameter values # (want to test with seq_len < max_unique_stats and seq_len > # max_unique_stats, and some other combinations for good measure). @parameterized.parameters( (False, 1, 1, 2), (True, 3, 1, 2), (False, 1, 2, 1), (True, 3, 2, 1), (False, 3, 3, 5)) def testSameInStaticAndDynamicWithBatchNorm(self, use_peepholes, batch_size, max_unique_stats, seq_len): # Tests that when the cell is used in either a normal tensorflow rnn, or in # tensorflow's dynamic_rnn, that the output is the same. This is to test # test that the cores aren't doing anything funny they shouldn't be (like # relying on the number of times they've been invoked). hidden_size = 3 input_size = 3 inputs = tf.ones( dtype=tf.float32, shape=[batch_size, seq_len, input_size], name="inputs") static_inputs = tf.unstack(inputs, axis=1) test_local_stats = False cell = snt.BatchNormLSTM( hidden_size=hidden_size, max_unique_stats=max_unique_stats, use_peepholes=use_peepholes, use_batch_norm_h=True, use_batch_norm_x=True, use_batch_norm_c=True) def connect(training): static_output_unpacked, _ = contrib_rnn.static_rnn( cell.with_batch_norm_control( is_training=training, test_local_stats=test_local_stats), static_inputs, initial_state=cell.initial_state(batch_size, tf.float32)) static_output = tf.stack(static_output_unpacked, axis=1) dynamic_output, _ = tf.nn.dynamic_rnn( cell.with_batch_norm_control(is_training=training, test_local_stats=test_local_stats), inputs, initial_state=cell.initial_state(batch_size, tf.float32), dtype=tf.float32) return static_output, dynamic_output ops = connect(training=True) self.evaluate(tf.global_variables_initializer()) static_out, dynamic_out = self.evaluate(ops) self.assertAllClose(static_out, dynamic_out) # Do a pass to train the exponential moving statistics. for _ in range(5): if tf.executing_eagerly(): ops = connect(training=True) static_out, dynamic_out = self.evaluate(ops) self.assertAllClose(static_out, dynamic_out) # And check that same when using test statistics. ops = connect(training=False) static_out, dynamic_out = self.evaluate(ops) self.assertAllClose(static_out, dynamic_out) def testSameInStaticAndDynamic(self): batch_size = 3 seq_len = 2 hidden_size = 3 input_size = 3 inputs = tf.ones( dtype=tf.float32, shape=[batch_size, seq_len, input_size], name="inputs") static_inputs = tf.unstack(inputs, axis=1) cell = snt.LSTM(hidden_size=hidden_size) static_output_unpacked, _ = contrib_rnn.static_rnn( cell, static_inputs, initial_state=cell.initial_state(batch_size, tf.float32)) dynamic_output, _ = tf.nn.dynamic_rnn( cell, inputs, initial_state=cell.initial_state(batch_size, tf.float32), dtype=tf.float32) static_output = tf.stack(static_output_unpacked, axis=1) self.evaluate(tf.global_variables_initializer()) # Check that static and dynamic give the same output static_out, dynamic_out = self.evaluate([static_output, dynamic_output]) self.assertAllClose(static_out, dynamic_out) def testLayerNormVariables(self): core = snt.LSTM(hidden_size=3, use_layer_norm=True) batch_size = 3 inputs = tf.ones(dtype=tf.float32, shape=[batch_size, 3, 3]) tf.nn.dynamic_rnn(core, inputs, initial_state=core.initial_state(batch_size, tf.float32)) self.assertTrue(core.use_layer_norm) expected = 4 # gate bias and one weight, plus LayerNorm's gamma, beta. self.assertLen(core.get_variables(), expected) def testHiddenClipping(self): core = snt.LSTM(hidden_size=5, hidden_clip_value=1.0) obs = tf.constant(np.random.rand(3, 10), dtype=tf.float32) unclipped = np.random.rand(3, 5) - 0.5 unclipped *= 2.0 / unclipped.max() unclipped = unclipped.astype(np.float32) clipped = unclipped.clip(-1., 1.) hidden = tf.constant(unclipped) cell = tf.constant(unclipped) output = core(obs, [hidden, cell]) self.evaluate(tf.global_variables_initializer()) output1, (hidden1, cell1) = self.evaluate(output) hidden = tf.constant(clipped) output = core(obs, [hidden, cell]) output2, (hidden2, cell2) = self.evaluate(output) self.assertAllClose(output1, output2) self.assertAllClose(hidden1, hidden2) self.assertAllClose(cell1, cell2) def testCellClipping(self): core = snt.LSTM(hidden_size=5, cell_clip_value=1.0) obs = tf.constant(np.random.rand(3, 10), dtype=tf.float32) unclipped = np.random.rand(3, 5) - 0.5 unclipped *= 2.0 / unclipped.max() unclipped = unclipped.astype(np.float32) clipped = unclipped.clip(-1., 1.) hidden = tf.constant(unclipped) cell = tf.constant(unclipped) output = core(obs, [hidden, cell]) self.evaluate(tf.global_variables_initializer()) output1, (hidden1, cell1) = self.evaluate(output) cell = tf.constant(clipped) output = core(obs, [hidden, cell]) output2, (hidden2, cell2) = self.evaluate(output) self.assertAllClose(output1, output2) self.assertAllClose(hidden1, hidden2) self.assertAllClose(cell1, cell2) @parameterized.parameters( (False, False, False, False), (False, True, False, False), (True, False, True, False), (False, True, True, False), (False, False, False, True), (True, True, False, True), (False, False, True, True), (False, True, True, True)) def testBatchNormVariables(self, use_peepholes, use_batch_norm_h, use_batch_norm_x, use_batch_norm_c): cell, wrapped_cell = _construct_lstm(hidden_size=3, use_peepholes=use_peepholes, use_batch_norm_h=use_batch_norm_h, use_batch_norm_x=use_batch_norm_x, use_batch_norm_c=use_batch_norm_c) # Need to connect the cell before it has variables batch_size = 3 inputs = tf.ones(dtype=tf.float32, shape=[batch_size, 3, 3]) tf.nn.dynamic_rnn(wrapped_cell, inputs, initial_state=cell.initial_state(batch_size, tf.float32)) self.assertEqual(use_peepholes, cell.use_peepholes) if use_batch_norm_h or use_batch_norm_x or use_batch_norm_c: self.assertEqual(use_batch_norm_h, cell.use_batch_norm_h) self.assertEqual(use_batch_norm_x, cell.use_batch_norm_x) self.assertEqual(use_batch_norm_c, cell.use_batch_norm_c) if use_batch_norm_h or use_batch_norm_x: expected = 3 # gate bias and two weights else: expected = 2 # gate bias and weight if use_peepholes: expected += 3 if use_batch_norm_h: expected += 1 # gamma_h if use_batch_norm_x: expected += 1 # gamma_x if use_batch_norm_c: expected += 2 # gamma_c, beta_c self.assertLen(cell.get_variables(), expected) def testCheckMaxUniqueStats(self): self.assertRaisesRegexp(ValueError, ".*must be >= 1", snt.BatchNormLSTM, hidden_size=1, max_unique_stats=0) @parameterized.parameters( (False, 1), (False, 2), (True, 1), (True, 2)) def testTraining(self, trainable_initial_state, max_unique_stats): """Test that everything trains OK, with or without trainable init. state.""" hidden_size = 3 batch_size = 3 time_steps = 3 cell = snt.BatchNormLSTM(hidden_size=hidden_size, max_unique_stats=max_unique_stats) inputs = tf.constant(np.random.rand(batch_size, time_steps, 3), dtype=tf.float32) initial_state = cell.initial_state( batch_size, tf.float32, trainable_initial_state) def loss_fn(): output, _ = tf.nn.dynamic_rnn( cell.with_batch_norm_control(is_training=True), inputs, initial_state=initial_state, dtype=tf.float32) return tf.reduce_mean(tf.square( output - np.random.rand(batch_size, time_steps, hidden_size))) train_op = tf.train.GradientDescentOptimizer(1).minimize( loss_fn if tf.executing_eagerly() else loss_fn()) init = tf.global_variables_initializer() self.evaluate(init) self.evaluate(train_op) # Regression test. def testSideBySide(self): hidden_size = 3 batch_size = 4 lstm1 = snt.LSTM(hidden_size=hidden_size) lstm2 = snt.LSTM(hidden_size=hidden_size) lstm1.initial_state(batch_size, trainable=True) # Previously either of the two lines below would cause a crash due to # Variable name collision. lstm1.initial_state(batch_size, trainable=True) lstm2.initial_state(batch_size, trainable=True) def testInitialStateNames(self): if tf.executing_eagerly(): self.skipTest("Tensor name is not supported in eager mode.") lstm = snt.LSTM(hidden_size=3, name="foo") unnamed_init_state = lstm.initial_state(4, trainable=True) named_init_state = lstm.initial_state(4, trainable=True, name="bar") self.assertEqual(unnamed_init_state[0].name, "foo_initial_state/state_hidden_tiled:0") self.assertEqual(unnamed_init_state[1].name, "foo_initial_state/state_cell_tiled:0") self.assertEqual(named_init_state[0].name, "bar/state_hidden_tiled:0") self.assertEqual(named_init_state[1].name, "bar/state_cell_tiled:0") @contrib_eager.run_all_tests_in_graph_and_eager_modes class ConvLSTMTest(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters( (snt.Conv1DLSTM, 1, False), (snt.Conv1DLSTM, 1, True), (snt.Conv2DLSTM, 2, False), (snt.Conv2DLSTM, 2, True), ) def testShape(self, lstm_class, dim, use_bias): batch_size = 2 input_shape = (8,) * dim input_channels = 3 output_channels = 5 input_shape = (batch_size,) + input_shape + (input_channels,) output_shape = input_shape[:-1] + (output_channels,) inputs = tf.ones(dtype=tf.float32, shape=input_shape) prev_hidden = tf.ones(dtype=tf.float32, shape=output_shape) prev_cell = tf.ones(dtype=tf.float32, shape=output_shape) lstm = lstm_class( input_shape=input_shape[1:], output_channels=output_channels, kernel_shape=1, use_bias=use_bias) output, next_state = lstm(inputs, (prev_hidden, prev_cell)) expected_shape = np.ndarray(output_shape) self.assertShapeEqual(expected_shape, next_state[0]) self.assertShapeEqual(expected_shape, next_state[1]) self.assertShapeEqual(expected_shape, output) @parameterized.parameters( (snt.Conv1DLSTM, 1, False, False), (snt.Conv1DLSTM, 1, True, False), (snt.Conv2DLSTM, 2, False, False), (snt.Conv2DLSTM, 2, True, False), (snt.Conv1DLSTM, 1, False, True), (snt.Conv1DLSTM, 1, True, True), (snt.Conv2DLSTM, 2, False, True), (snt.Conv2DLSTM, 2, True, True), ) def testInitializers(self, lstm_class, dim, use_bias, legacy_bias_behaviour): keys = snt.Conv2DLSTM.get_possible_initializer_keys(use_bias) initializers = { key: tf.constant_initializer(i) for i, key in enumerate(keys) } batch_size = 2 input_shape = (8,) * dim input_channels = 3 output_channels = 5 input_shape = (batch_size,) + input_shape + (input_channels,) output_shape = input_shape[:-1] + (output_channels,) inputs = tf.ones(dtype=tf.float32, shape=input_shape) prev_hidden = tf.ones(dtype=tf.float32, shape=output_shape) prev_cell = tf.ones(dtype=tf.float32, shape=output_shape) # Test we can successfully create the LSTM with partitioners. lstm = lstm_class( input_shape=input_shape[1:], output_channels=output_channels, kernel_shape=1, use_bias=use_bias, legacy_bias_behaviour=legacy_bias_behaviour, initializers=initializers) lstm(inputs, (prev_hidden, prev_cell)) init = tf.global_variables_initializer() # Test that the initializers have been applied correctly. self.evaluate(init) for conv_key, convolution in lstm.convolutions.items(): for i, key in enumerate(keys): if not legacy_bias_behaviour and conv_key == "hidden" and key == "b": self.assertFalse(hasattr(convolution, key)) continue variable = getattr(convolution, key) self.assertAllClose(self.evaluate(variable), np.full(variable.get_shape(), i, dtype=np.float32)) @parameterized.parameters( (snt.Conv1DLSTM, 1, False, False), (snt.Conv1DLSTM, 1, True, False), (snt.Conv2DLSTM, 2, False, False), (snt.Conv2DLSTM, 2, True, False), (snt.Conv1DLSTM, 1, False, True), (snt.Conv1DLSTM, 1, True, True), (snt.Conv2DLSTM, 2, False, True), (snt.Conv2DLSTM, 2, True, True), ) def testPartitioners(self, lstm_class, dim, use_bias, legacy_bias_behaviour): if tf.executing_eagerly(): self.skipTest("Partitioned variables are not supported in eager.") keys = snt.Conv2DLSTM.get_possible_initializer_keys(use_bias) partitioners = { key: tf.variable_axis_size_partitioner(10) for key in keys } batch_size = 2 input_shape = (8,) * dim input_channels = 3 output_channels = 5 input_shape = (batch_size,) + input_shape + (input_channels,) output_shape = input_shape[:-1] + (output_channels,) inputs = tf.ones(dtype=tf.float32, shape=input_shape) prev_hidden = tf.ones(dtype=tf.float32, shape=output_shape) prev_cell = tf.ones(dtype=tf.float32, shape=output_shape) # Test we can successfully create the LSTM with partitioners. lstm = lstm_class( input_shape=input_shape[1:], output_channels=output_channels, kernel_shape=1, use_bias=use_bias, legacy_bias_behaviour=legacy_bias_behaviour, partitioners=partitioners) lstm(inputs, (prev_hidden, prev_cell)) # Test that the variables are partitioned. for conv_key, convolution in lstm.convolutions.items(): for key in keys: if not legacy_bias_behaviour and conv_key == "hidden" and key == "b": self.assertFalse(hasattr(convolution, key)) continue self.assertEqual(type(getattr(convolution, key)), variables.PartitionedVariable) @parameterized.parameters( (snt.Conv1DLSTM, 1, False, False), (snt.Conv1DLSTM, 1, True, False), (snt.Conv2DLSTM, 2, False, False), (snt.Conv2DLSTM, 2, True, False), (snt.Conv1DLSTM, 1, False, True), (snt.Conv1DLSTM, 1, True, True), (snt.Conv2DLSTM, 2, False, True), (snt.Conv2DLSTM, 2, True, True), ) def testRegularizers(self, lstm_class, dim, use_bias, legacy_bias_behaviour): keys = snt.Conv2DLSTM.get_possible_initializer_keys(use_bias) batch_size = 2 input_shape = (8,) * dim input_channels = 3 output_channels = 5 input_shape = (batch_size,) + input_shape + (input_channels,) output_shape = input_shape[:-1] + (output_channels,) inputs = tf.ones(dtype=tf.float32, shape=input_shape) prev_hidden = tf.ones(dtype=tf.float32, shape=output_shape) prev_cell = tf.ones(dtype=tf.float32, shape=output_shape) # Test we can successfully create the LSTM with partitioners. lstm = lstm_class( input_shape=input_shape[1:], output_channels=output_channels, kernel_shape=1, use_bias=use_bias, legacy_bias_behaviour=legacy_bias_behaviour, regularizers={key: tf.nn.l2_loss for key in keys}) lstm(inputs, (prev_hidden, prev_cell)) # Test that we have regularization losses. num_reg_losses = len(tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES)) num_reg_losses_expected = len(lstm.convolutions) * len(keys) if use_bias and not legacy_bias_behaviour: # Bias is not applied to hidden num_reg_losses_expected -= 1 self.assertEqual(num_reg_losses, num_reg_losses_expected) @parameterized.parameters( (snt.Conv1DLSTM, 1, False), (snt.Conv1DLSTM, 1, True), (snt.Conv2DLSTM, 2, False), (snt.Conv2DLSTM, 2, True), ) def testTraining(self, lstm_class, dim, trainable_initial_state): """Test that training works, with or without trainable initial state.""" time_steps = 1 batch_size = 2 input_shape = (8,) * dim input_channels = 3 output_channels = 5 input_shape = (batch_size,) + input_shape + (input_channels,) lstm = lstm_class( input_shape=input_shape[1:], output_channels=output_channels, kernel_shape=1) inputs = tf.random_normal((time_steps,) + input_shape, dtype=tf.float32) initial_state = lstm.initial_state( batch_size, tf.float32, trainable_initial_state) def loss_fn(): output, _ = tf.nn.dynamic_rnn(lstm, inputs, time_major=True, initial_state=initial_state, dtype=tf.float32) return tf.reduce_mean(tf.square(output)) train_op = tf.train.GradientDescentOptimizer(1).minimize( loss_fn if tf.executing_eagerly() else loss_fn()) init = tf.global_variables_initializer() self.evaluate(init) self.evaluate(train_op) @parameterized.parameters( (snt.Conv1DLSTM, 1, False, 1, 1), (snt.Conv1DLSTM, 1, False, 1, 5), (snt.Conv1DLSTM, 1, False, 6, 1), (snt.Conv1DLSTM, 1, False, 6, 5), (snt.Conv1DLSTM, 1, True, 1, 1), (snt.Conv1DLSTM, 1, True, 1, 5), (snt.Conv1DLSTM, 1, True, 6, 1), (snt.Conv1DLSTM, 1, True, 6, 5), (snt.Conv2DLSTM, 2, False, 1, 1), (snt.Conv2DLSTM, 2, False, 1, 5), (snt.Conv2DLSTM, 2, False, 6, 1), (snt.Conv2DLSTM, 2, False, 6, 5), (snt.Conv2DLSTM, 2, True, 1, 1), (snt.Conv2DLSTM, 2, True, 1, 5), (snt.Conv2DLSTM, 2, True, 6, 1), (snt.Conv2DLSTM, 2, True, 6, 5), ) def testDilatedConv(self, lstm_class, dim, trainable_initial_state, rate, kernel_shape): """Test that training works, with or without dilated convolutions.""" time_steps = 1 batch_size = 2 input_shape = (8,) * dim input_channels = 3 output_channels = 5 input_shape = (batch_size,) + input_shape + (input_channels,) lstm = lstm_class( input_shape=input_shape[1:], output_channels=output_channels, kernel_shape=kernel_shape, rate=rate) inputs = tf.random_normal((time_steps,) + input_shape, dtype=tf.float32) initial_state = lstm.initial_state( batch_size, tf.float32, trainable_initial_state) def loss_fn(): output, _ = tf.nn.dynamic_rnn(lstm, inputs, time_major=True, initial_state=initial_state, dtype=tf.float32) return tf.reduce_mean(tf.square(output)) train_op = tf.train.GradientDescentOptimizer(1).minimize( loss_fn if tf.executing_eagerly() else loss_fn()) init = tf.global_variables_initializer() self.evaluate(init) self.evaluate(train_op) @parameterized.parameters( (snt.Conv1DLSTM, 1, False, False, 2), (snt.Conv1DLSTM, 1, False, True, 4), (snt.Conv1DLSTM, 1, True, False, 4), (snt.Conv1DLSTM, 1, True, True, 6), (snt.Conv2DLSTM, 2, False, False, 2), (snt.Conv2DLSTM, 2, False, True, 4), (snt.Conv2DLSTM, 2, True, False, 4), (snt.Conv2DLSTM, 2, True, True, 6), ) def testLayerNormVariables(self, lstm_class, dim, use_bias, use_layer_norm, expected_num_variables): batch_size = 2 input_shape = (8,) * dim + (3,) lstm = lstm_class( input_shape=input_shape, output_channels=5, kernel_shape=3, use_bias=use_bias, use_layer_norm=use_layer_norm) inputs = tf.ones(dtype=tf.float32, shape=((1, batch_size) + input_shape)) initial_state = lstm.initial_state(batch_size, tf.float32) tf.nn.dynamic_rnn(lstm, inputs, time_major=True, initial_state=initial_state, dtype=tf.float32) self.assertEqual(use_layer_norm, lstm.use_layer_norm) # Expect the following variables: # Weight, and bias if present, to apply to input # Weight, and bias if present, to apply to hidden state # LayerNorm's gamma and beta, if present self.assertLen(lstm.get_variables(), expected_num_variables) @parameterized.parameters( (snt.Conv1DLSTM, 1, False), (snt.Conv1DLSTM, 1, True), (snt.Conv2DLSTM, 2, False), (snt.Conv2DLSTM, 2, True), ) def testLayerNorm(self, lstm_class, dim, use_bias): """Test that training works, with or without dilated convolutions.""" time_steps = 3 batch_size = 2 input_shape = (8,) * dim input_channels = 3 output_channels = 5 kernel_shape = 3 input_shape = (batch_size,) + input_shape + (input_channels,) lstm = lstm_class( input_shape=input_shape[1:], output_channels=output_channels, kernel_shape=kernel_shape, use_bias=use_bias, use_layer_norm=True) inputs = tf.random_normal((time_steps,) + input_shape, dtype=tf.float32) initial_state = lstm.initial_state(batch_size, tf.float32) def loss_fn(): output, _ = tf.nn.dynamic_rnn(lstm, inputs, time_major=True, initial_state=initial_state, dtype=tf.float32) return tf.reduce_mean(tf.square(output)) train_op = tf.train.GradientDescentOptimizer(1).minimize( loss_fn if tf.executing_eagerly() else loss_fn()) init = tf.global_variables_initializer() self.evaluate(init) self.evaluate(train_op) @contrib_eager.run_all_tests_in_graph_and_eager_modes class GRUTest(tf.test.TestCase, parameterized.TestCase): def testShape(self): batch_size = 2 hidden_size = 4 inputs = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) state = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) gru = snt.GRU(hidden_size, name="rnn") output, next_state = gru(inputs, state) shape = np.ndarray((batch_size, hidden_size)) self.assertShapeEqual(shape, next_state) self.assertShapeEqual(shape, output) @parameterized.parameters( {"input_size": 4, "hidden_size": 5}, {"input_size": 16, "hidden_size": 128}) def testVariables(self, input_size, hidden_size): batch_size = 5 mod_name = "rnn" inputs = tf.ones(dtype=tf.float32, shape=[batch_size, input_size]) state = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) gru = snt.GRU(hidden_size, name=mod_name) self.assertEqual(gru.scope_name, mod_name) with self.assertRaisesRegexp(snt.Error, "not instantiated yet"): gru.get_variables() gru(inputs, state) gru_variables = gru.get_variables() self.assertLen(gru_variables, 9, "GRU should have 9 variables") param_map = {param.name.split("/")[-1].split(":")[0]: param for param in gru_variables} for part in ["z", "r", "h"]: self.assertShapeEqual( np.ndarray(hidden_size), tf.convert_to_tensor(param_map["b" + part])) self.assertShapeEqual( np.ndarray((hidden_size, hidden_size)), tf.convert_to_tensor(param_map["u" + part])) self.assertShapeEqual( np.ndarray((input_size, hidden_size)), tf.convert_to_tensor(param_map["w" + part])) def testComputation(self): def sigmoid(x): return 1 / (1 + np.exp(-x)) batch_size = 2 input_size = 3 hidden_size = 5 # With random data, check the TF calculation matches the Numpy version. input_data = np.random.randn(batch_size, input_size) state_data = np.random.randn(batch_size, hidden_size) inputs = tf.constant(input_data) state_in = tf.constant(state_data) gru = snt.GRU(hidden_size, name="rnn") _, state = gru(inputs, state_in) gru_variables = gru.get_variables() param_map = {param.name.split("/")[-1].split(":")[0]: param for param in gru_variables} self.evaluate(tf.global_variables_initializer()) fetches = [ state, param_map["wz"], param_map["uz"], param_map["bz"], param_map["wr"], param_map["ur"], param_map["br"], param_map["wh"], param_map["uh"], param_map["bh"] ] output = self.evaluate(fetches) state_ex, wz, uz, bz, wr, ur, br, wh, uh, bh = output z = sigmoid(np.dot(input_data, wz) + np.dot(state_data, uz) + bz) r = sigmoid(np.dot(input_data, wr) + np.dot(state_data, ur) + br) reset_state = r * state_data h_twiddle = np.tanh(np.dot(input_data, wh) + np.dot(reset_state, uh)+ bh) state_real = (1 - z) * state_data + z * h_twiddle self.assertAllClose(state_real, state_ex) def testInitializers(self): batch_size = 2 hidden_size = 4 # Test we can successfully create the GRU with initializers. keys = snt.GRU.POSSIBLE_INITIALIZER_KEYS initializers = { key: tf.constant_initializer(i) for i, key in enumerate(keys) } gru = snt.GRU(hidden_size, initializers=initializers) # Test we can build the GRU. inputs = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) state = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) gru(inputs, state) init = tf.global_variables_initializer() # Test that the initializers have been correctly applied. gru_variables = [getattr(gru, "_" + key) for key in keys] self.evaluate(init) gru_variables_v = self.evaluate(gru_variables) for i, gru_variable_v in enumerate(gru_variables_v): self.assertAllClose(gru_variable_v, i * np.ones(gru_variable_v.shape)) def testPartitioners(self): if tf.executing_eagerly(): self.skipTest("Partitioned variables are not supported in eager mode.") batch_size = 2 hidden_size = 4 # Test we can successfully create the GRU with partitioners. keys = snt.GRU.POSSIBLE_INITIALIZER_KEYS partitioners = { key: tf.variable_axis_size_partitioner(10) for key in keys } gru = snt.GRU(hidden_size, partitioners=partitioners) # Test we can build the GRU. inputs = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) state = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) gru(inputs, state) # Test that the variables are partitioned. for key in keys: self.assertEqual(type(getattr(gru, "_" + key)), variables.PartitionedVariable) def testRegularizers(self): batch_size = 2 hidden_size = 4 # Test we can successfully create the GRU with regularizers. keys = snt.GRU.POSSIBLE_INITIALIZER_KEYS regularizers = { key: tf.nn.l2_loss for key in keys } gru = snt.GRU(hidden_size, regularizers=regularizers) # Test we can build the GRU. inputs = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) state = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) gru(inputs, state) # Test that we have regularization losses. self.assertLen(tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES), len(keys)) @contrib_eager.run_all_tests_in_graph_and_eager_modes class HighwayCoreTest(tf.test.TestCase, parameterized.TestCase): def testShape(self): batch_size = 2 hidden_size = 4 inputs = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) state = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) core = snt.HighwayCore(hidden_size, num_layers=3) output, next_state = core(inputs, state) shape = np.ndarray((batch_size, hidden_size)) self.assertShapeEqual(shape, next_state) self.assertShapeEqual(shape, output) def testVariables(self): batch_size = 5 input_size = 10 hidden_size = 20 num_layers = 3 mod_name = "rnn" inputs = tf.ones(dtype=tf.float32, shape=[batch_size, input_size]) state = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) core = snt.HighwayCore(hidden_size, num_layers, name=mod_name) self.assertEqual(core.scope_name, mod_name) with self.assertRaisesRegexp(snt.Error, "not instantiated yet"): core.get_variables() core(inputs, state) core_variables = core.get_variables() self.assertLen(core_variables, 2 + 4 * num_layers) param_map = {param.name.split("/")[-1].split(":")[0]: param for param in core_variables} self.assertShapeEqual( np.ndarray((input_size, hidden_size)), tf.convert_to_tensor(param_map["wt"])) self.assertShapeEqual( np.ndarray((input_size, hidden_size)), tf.convert_to_tensor(param_map["wh"])) for layer_index in xrange(num_layers): layer_str = str(layer_index) self.assertShapeEqual( np.ndarray(hidden_size), tf.convert_to_tensor(param_map["bt" + layer_str])) self.assertShapeEqual( np.ndarray(hidden_size), tf.convert_to_tensor(param_map["bh" + layer_str])) self.assertShapeEqual( np.ndarray((hidden_size, hidden_size)), tf.convert_to_tensor(param_map["wt" + layer_str])) self.assertShapeEqual( np.ndarray((hidden_size, hidden_size)), tf.convert_to_tensor(param_map["wh" + layer_str])) @parameterized.parameters(True, False) def testComputation(self, with_dropout): """Checks that the TF and numpy versions match on random data.""" def sigmoid(x): return 1 / (1 + np.exp(-x)) batch_size = 2 input_size = 3 hidden_size = 5 num_layers = 2 input_data = np.random.randn(batch_size, input_size) state_data = np.random.randn(batch_size, hidden_size) inputs = tf.constant(input_data) state_in = tf.constant(state_data) if with_dropout: core, test_core = snt.highway_core_with_recurrent_dropout( hidden_size, num_layers, keep_prob=1.0) initial_state = core.initial_state(batch_size, dtype=tf.float64) _, state = core(inputs, (state_in, initial_state[1])) core_variables = test_core.get_variables() else: core = snt.HighwayCore(hidden_size, num_layers, name="rnn") _, state = core(inputs, state_in) core_variables = core.get_variables() param_map = {param.name.split("/")[-1].split(":")[0]: param for param in core_variables} param_names = ["wt", "wh"] param_names += ["wt0", "bt0", "wh0", "bh0", "wt1", "bt1", "wh1", "bh1"] self.evaluate(tf.global_variables_initializer()) fetches = [state] + [param_map[name] for name in param_names] output = self.evaluate(fetches) state_ex, wt, wh, wt0, bt0, wh0, bh0, wt1, bt1, wh1, bh1 = output # Layer 1 computation. t = sigmoid(np.dot(input_data, wt) + np.dot(state_data, wt0) + bt0) h = np.tanh(np.dot(input_data, wh) + np.dot(state_data, wh0) + bh0) state_data = (1 - t) * state_data + t * h # Layer 2 computation. t = sigmoid(np.dot(state_data, wt1) + bt1) h = np.tanh(np.dot(state_data, wh1) + bh1) state_data = (1 - t) * state_data + t * h if with_dropout: state_ex = state_ex[0] self.assertAllClose(state_data, state_ex) @contrib_eager.run_all_tests_in_graph_and_eager_modes class LSTMBlockCellTest(tf.test.TestCase, parameterized.TestCase): def testShape(self): batch_size = 2 hidden_size = 5 inputs = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) prev_hidden = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) prev_cell = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) lstm = snt.LSTMBlockCell(hidden_size) output, next_state = lstm(inputs, (prev_hidden, prev_cell)) shape = np.ndarray((batch_size, hidden_size)) self.assertShapeEqual(shape, next_state[0]) self.assertShapeEqual(shape, next_state[1]) self.assertShapeEqual(shape, output) self.assertEqual(hidden_size, lstm.output_size) self.assertEqual((hidden_size, hidden_size), lstm.state_size) def testVariables(self): batch_size = 5 hidden_size = 20 mod_name = "lstm_block" inputs = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) prev_cell = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) prev_hidden = tf.ones(dtype=tf.float32, shape=[batch_size, hidden_size]) lstm = snt.LSTMBlockCell(hidden_size, name=mod_name) self.assertEqual(lstm.scope_name, mod_name) with self.assertRaisesRegexp(snt.Error, "not instantiated yet"): lstm.get_variables() lstm(inputs, (prev_hidden, prev_cell)) lstm_variables = lstm.get_variables() self.assertLen(lstm_variables, 2, "LSTM should have 2 variables") param_map = {param.name.split("/")[-1].split(":")[0]: param for param in lstm_variables} self.assertShapeEqual(np.ndarray(4 * hidden_size), tf.convert_to_tensor(param_map["bias"])) self.assertShapeEqual(np.ndarray((2 * hidden_size, 4 * hidden_size)), tf.convert_to_tensor(param_map["kernel"])) if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/modules/gated_rnn_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for constrained optimization tools in Sonnet.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports from absl.testing import parameterized import mock from sonnet.python.modules import basic from sonnet.python.modules import moving_average from sonnet.python.modules import optimization_constraints from sonnet.python.modules import scale_gradient import tensorflow.compat.v1 as tf class OptimizationConstrainsTest(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters([ (0.5, 0.5), (17.3, 17.3), (tf.constant_initializer(3.14), 3.14), (tf.ones_initializer(), 1.0) ]) def testLagrangeMultInit(self, initializer, exp_lag_mul): cons = optimization_constraints.OptimizationConstraints() lhs = tf.zeros_like(1.0) rhs = tf.ones_like(1.0) cons.add(lhs > rhs, initializer=initializer)() l = cons.lagrange_multipliers[0] with tf.train.MonitoredSession() as sess: lag_mul = sess.run(l) self.assertAllClose(lag_mul, exp_lag_mul) @mock.patch.object(optimization_constraints, '_parametrize') def testRateDefaults(self, mocked_parametrized): mocked_parametrized.side_effect = ( lambda x, rate: scale_gradient.scale_gradient(x, -rate)) rate = 0.1 cons = optimization_constraints.OptimizationConstraints(rate=rate) lhs = tf.zeros_like(1.0) rhs = tf.ones_like(1.0) x = cons.add(lhs < rhs)() v = tf.all_variables()[0] dxdl = tf.gradients(x, v) with tf.train.MonitoredSession() as sess: grads = sess.run(dxdl) self.assertAllClose(grads[0], rate) @mock.patch.object(optimization_constraints, '_parametrize') def testRateOverrides(self, mocked_parametrized): mocked_parametrized.side_effect = ( lambda x, rate: scale_gradient.scale_gradient(x, -rate)) rate = 7.3 cons = optimization_constraints.OptimizationConstraints() lhs = tf.zeros_like(1.0) rhs = tf.ones_like(1.0) x = cons.add(lhs < rhs, rate=rate)() v = tf.all_variables()[0] dxdl = tf.gradients(x, v) with tf.train.MonitoredSession() as sess: grads = sess.run(dxdl) self.assertAllClose(grads[0], rate) def testValidRangeDefaults(self): valid_range = (1.0, 2.0) cons = optimization_constraints.OptimizationConstraints( valid_range=valid_range) lhs = tf.zeros_like(1.0) rhs = tf.ones_like(1.0) cons.add(lhs < rhs, initializer=3.0)() with tf.train.MonitoredSession() as sess: lag_mul = sess.run(cons.lagrange_multipliers[0]) self.assertAllClose(lag_mul, valid_range[1]) def testValidRangeOverrides(self): cons = optimization_constraints.OptimizationConstraints() lhs = tf.zeros_like(1.0) rhs = tf.ones_like(1.0) valid_range = (1.0, 2.0) cons.add(lhs < rhs, initializer=3.0, valid_range=valid_range)() with tf.train.MonitoredSession() as sess: lag_mul = sess.run(cons.lagrange_multipliers[0]) self.assertAllClose(lag_mul, valid_range[1]) @mock.patch.object( optimization_constraints.OptimizationConstraints, 'add_geq') @mock.patch.object( optimization_constraints.OptimizationConstraints, 'add_leq') def testOpIdentification(self, mocked_add_leq, mocked_add_geq): calls_to_add_leq = [0] def mock_add_leq(*args, **kwargs): del args del kwargs calls_to_add_leq[0] += 1 mocked_add_leq.side_effect = mock_add_leq calls_to_add_geq = [0] def mock_add_geq(*args, **kwargs): del args del kwargs calls_to_add_geq[0] += 1 mocked_add_geq.side_effect = mock_add_geq cons = optimization_constraints.OptimizationConstraints() lhs = tf.zeros_like(1.0) rhs = tf.ones_like(1.0) self.assertEqual(calls_to_add_leq[0], 0) self.assertEqual(calls_to_add_geq[0], 0) cons.add(lhs < rhs) self.assertEqual(calls_to_add_leq[0], 1) self.assertEqual(calls_to_add_geq[0], 0) cons.add(lhs <= rhs) self.assertEqual(calls_to_add_leq[0], 2) self.assertEqual(calls_to_add_geq[0], 0) cons.add(lhs > rhs) self.assertEqual(calls_to_add_geq[0], 1) self.assertEqual(calls_to_add_leq[0], 2) cons.add(lhs >= rhs) self.assertEqual(calls_to_add_geq[0], 2) self.assertEqual(calls_to_add_leq[0], 2) def testMinimalRun(self): x = basic.TrainableVariable( shape=(), initializers={'w': tf.ones_initializer()})() x2 = x ** 2.0 min_value = 0.5 constr = optimization_constraints.OptimizationConstraints().add( x > min_value) self.assertFalse(constr._is_connected) loss = moving_average.MovingAverage()( x2 + tf.random.normal((), stddev=1.0)) + constr() self.assertTrue(constr._is_connected) with self.assertRaisesRegexp(ValueError, 'Cannot add further constraints'): constr.add(x > min_value) with self.assertRaisesRegexp(ValueError, 'Cannot add further constraints'): constr.add_geq(x, min_value) with self.assertRaisesRegexp(ValueError, 'Cannot add further constraints'): constr.add_leq(min_value < x) opt = tf.train.AdamOptimizer(1e-2, beta1=0.0) update = opt.minimize(loss) with tf.control_dependencies([update]): x2 = tf.identity(x2) with tf.train.MonitoredSession() as sess: for _ in range(500): v, _ = sess.run([x2, update]) self.assertAllClose(v, min_value**2) class ConstrainToRangeTest(tf.test.TestCase, parameterized.TestCase): @parameterized.parameters([ (0.5, lambda x: x, [1.0]), (1.5, lambda x: x, [0.0]), (1.5, lambda x: -x, [-1.0]), (-.5, lambda x: x, [1.0]), (-.5, lambda x: -x, [0.0]), ]) def testOpWrtGradients(self, init_value, fn, expected_grad): x = tf.get_variable( name='x', shape=(), initializer=tf.constant_initializer(init_value)) max_val = 1.0 min_val = 0.0 z = optimization_constraints._constrain_to_range(x, min_val, max_val) g = tf.gradients(fn(z), x) with tf.train.MonitoredSession() as sess: grads, clipped_vals = sess.run([g, z]) self.assertAllEqual(grads, expected_grad) self.assertGreaterEqual(clipped_vals, min_val) self.assertLessEqual(clipped_vals, max_val) def testOpMemoization(self): def _run_in_new_graph(): with tf.Graph().as_default(): z = optimization_constraints._constrain_to_range(tf.zeros((1,)), 0, 1) with tf.train.MonitoredSession() as sess: sess.run(z) for _ in range(10): _run_in_new_graph() if __name__ == '__main__': tf.test.main()
sonnet-1
sonnet/python/modules/optimization_constraints_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Sonnet Modules in TensorFlow.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function
sonnet-1
sonnet/python/modules/__init__.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =================================================== """Tests for differentiable moving average in Sonnet.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports from absl.testing import parameterized from six.moves import range from sonnet.python.modules import moving_average import tensorflow.compat.v1 as tf class MovingAverageTest(parameterized.TestCase, tf.test.TestCase): def testFirst(self): var = tf.Variable(10.0) avg = moving_average.MovingAverage(decay=0.9)(var) with tf.train.MonitoredSession() as sess: avg_value = sess.run(avg) # The avg should be equal to the var after only one iteration self.assertEqual(avg_value, 10.0) def testReset(self): val = tf.placeholder(shape=(), dtype=tf.float32) module = moving_average.MovingAverage(decay=0.9) avg = module(val) reset = module.reset() with tf.train.MonitoredSession() as sess: avg_value = sess.run(avg, feed_dict={val: 10.0}) # The avg should be equal to the var after only one iteration self.assertEqual(avg_value, 10.0) sess.run(reset) avg_value = sess.run(avg, feed_dict={val: 100.0}) # The avg should be equal to the var after only one iteration, again self.assertEqual(avg_value, 100.0) @parameterized.named_parameters( ('no_resource_vars', False), ('resource_vars', True)) def testAverage(self, use_resource_vars): decay = 0.9 num_steps = 10 init_value = 3.14 with tf.variable_scope('', use_resource=use_resource_vars): var = tf.get_variable( 'var', (), initializer=tf.constant_initializer(init_value)) avg = moving_average.MovingAverage(decay=decay)(tf.identity(var)) with tf.control_dependencies([avg]): increment = tf.assign_add(var, 1.0) with tf.train.MonitoredSession() as sess: expected_value = init_value x = init_value for _ in range(num_steps): avg_value, _ = sess.run([avg, increment]) self.assertNear(avg_value, expected_value, 1e-4) x += 1 expected_value = expected_value * decay + x * (1 - decay) def testAssertDecayIsValid(self): with self.assertRaisesRegexp(ValueError, 'Decay must be'): moving_average.MovingAverage(decay=2.0) def testIsDifferentiable(self): x = tf.get_variable(name='x', shape=()) mva = moving_average.MovingAverage(decay=0.99, local=False) y = mva(x) dydx = tf.gradients(y, x) z = mva(2 * x) dzdx = tf.gradients(z, x) with tf.train.MonitoredSession() as sess: df = sess.run([dydx, dzdx]) self.assertEqual(df[0], [1.0]) self.assertEqual(df[1], [2.0]) def testOpMemoization(self): def _run_in_new_graph(): with tf.Graph().as_default(): x = tf.ones((1,)) y = tf.zeros((1,)) z = moving_average._pass_through_gradients(x, y) gx = tf.gradients(z, x) with tf.train.MonitoredSession() as sess: sess.run([z, gx]) for _ in range(10): _run_in_new_graph() if __name__ == '__main__': tf.test.main()
sonnet-1
sonnet/python/modules/moving_average_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for sonnet.python.modules.util.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import itertools import os import tempfile # Dependency imports from absl.testing import parameterized import contextlib2 import mock import numpy as np import sonnet as snt import sonnet.python.modules.util as util import tensorflow.compat.v1 as tf from tensorflow.contrib import layers as contrib_layers from tensorflow.contrib.eager.python import tfe as contrib_eager from tensorflow.python.ops import variable_scope as variable_scope_ops # We have a first "\" for the new line and one at the end. The rest is a direct # copy-paste of the ground truth output, with the {type} formatting placeholder. _EXPECTED_FORMATTED_VARIABLE_LIST = ("""\ Variable Shape Type Collections Device m1/v1 3x4 float32 global_variables, trainable_variables ({type}) m2/v2 5x6 float32 local_variables /device:GPU:* ({type})\ """) _EXPECTED_FORMATTED_VARIABLE_MAP = ("""\ Key Variable Shape Type Collections Device vv1 m1/v1 3x4 float32 global_variables, trainable_variables ({type}) vv2 m2/v2 5x6 float32 local_variables /device:GPU:* ({type})\ """) class UtilTest(parameterized.TestCase, tf.test.TestCase): def testQueryInModule(self): module = snt.Linear(output_size=42, name="linear") with self.assertRaisesRegexp(snt.Error, "not instantiated yet"): module.get_variables() # Compare to the desired result set, after connection. input_ = tf.placeholder(tf.float32, shape=[3, 4]) _ = module(input_) self.assertEqual(set(module.get_variables()), {module.w, module.b}) self.assertEqual(set(snt.get_variables_in_module(module)), {module.w, module.b}) def testScopeQuery(self): with tf.variable_scope("prefix") as s1: v1 = tf.get_variable("a", shape=[3, 4]) with tf.variable_scope("prefix_with_more_stuff") as s2: v2 = tf.get_variable("b", shape=[5, 6]) v3 = tf.get_variable("c", shape=[7]) # get_variables_in_scope should add a "/" to only search that scope, not # any others which share the same prefix. self.assertEqual(snt.get_variables_in_scope(s1), (v1,)) self.assertEqual(set(snt.get_variables_in_scope(s2)), {v2, v3}) self.assertEqual(snt.get_variables_in_scope(s1.name), (v1,)) self.assertEqual(set(snt.get_variables_in_scope(s2.name)), {v2, v3}) self.assertEqual(set(snt.get_variables_in_scope("")), {v1, v2, v3}) def testIsScopePrefix(self): self.assertTrue(util._is_scope_prefix("a/b/c", "")) self.assertTrue(util._is_scope_prefix("a/b/c", "a/b/c")) self.assertTrue(util._is_scope_prefix("a/b/c", "a/b")) self.assertTrue(util._is_scope_prefix("a/b/c", "a")) self.assertTrue(util._is_scope_prefix("a/b/c", "a/")) self.assertFalse(util._is_scope_prefix("a/b/c", "b")) self.assertFalse(util._is_scope_prefix("ab/c", "a")) def testGetNormalizedVariableMapScope(self): with tf.variable_scope("prefix") as s1: v1 = tf.get_variable("a", shape=[5, 6]) v2 = tf.get_variable("b", shape=[7]) variable_map = snt.get_normalized_variable_map(s1) self.assertLen(variable_map, 2) self.assertIn("a", variable_map) self.assertIn("b", variable_map) self.assertIs(variable_map["a"], v1) self.assertIs(variable_map["b"], v2) def testGetNormalizedVariableMapScopeContext(self): with tf.variable_scope("prefix1") as s1: with tf.variable_scope("prefix2") as s2: v1 = tf.get_variable("a", shape=[5, 6]) v2 = tf.get_variable("b", shape=[7]) with tf.variable_scope("prefix") as s3: _ = tf.get_variable("c", shape=[8]) err = r"Scope 'prefix1/prefix2' is not prefixed by 'prefix'." with self.assertRaisesRegexp(ValueError, err): variable_map = snt.get_normalized_variable_map(s2, context=s3) variable_map = snt.get_normalized_variable_map(s2, context=s1) self.assertEqual(snt.get_normalized_variable_map(s2.name, context=s1), variable_map) self.assertEqual(snt.get_normalized_variable_map(s2.name, context=s1.name), variable_map) self.assertLen(variable_map, 2) self.assertIn("prefix2/a", variable_map) self.assertIn("prefix2/b", variable_map) self.assertIs(variable_map["prefix2/a"], v1) self.assertIs(variable_map["prefix2/b"], v2) with tf.variable_scope("") as s4: self.assertEqual(s4.name, "") variable_map = snt.get_normalized_variable_map(s2, context=s4) self.assertEqual(snt.get_normalized_variable_map(s2.name, context=s4), variable_map) self.assertEqual(snt.get_normalized_variable_map(s2.name, context=s4.name), variable_map) self.assertLen(variable_map, 2) self.assertIn("prefix1/prefix2/a", variable_map) self.assertIn("prefix1/prefix2/b", variable_map) self.assertIs(variable_map["prefix1/prefix2/a"], v1) self.assertIs(variable_map["prefix1/prefix2/b"], v2) def testGetNormalizedVariableMapModule(self): input_ = tf.placeholder(tf.float32, shape=[1, 10, 10, 3]) conv = snt.Conv2D(output_channels=3, kernel_shape=3) conv(input_) variable_map = snt.get_normalized_variable_map(conv) self.assertLen(variable_map, 2) self.assertIn("w", variable_map) self.assertIn("b", variable_map) self.assertIs(variable_map["w"], conv.w) self.assertIs(variable_map["b"], conv.b) def testGetNormalizedVariableMapWithPartitionedVariable(self): hidden = tf.ones(shape=(1, 16, 16, 3)) partitioner = tf.variable_axis_size_partitioner(4) conv = snt.Conv2D(output_channels=3, kernel_shape=3, stride=1, partitioners={"w": partitioner}) conv(hidden) variable_map = snt.get_normalized_variable_map(conv, group_sliced_variables=True) self.assertLen(variable_map, 2) self.assertEqual(variable_map["b"], conv.b) self.assertLen(variable_map["w"], 3) variable_map = snt.get_normalized_variable_map(conv, group_sliced_variables=False) self.assertEqual(variable_map["b"], conv.b) self.assertEqual( set(variable_map), set(["b", "w/part_0", "w/part_1", "w/part_2"])) def testVariableMapItems(self): hidden = tf.ones(shape=(1, 16, 16, 3)) partitioner = tf.variable_axis_size_partitioner(4) conv = snt.Conv2D(output_channels=3, kernel_shape=3, stride=1, partitioners={"w": partitioner}) conv(hidden) variable_map = snt.get_normalized_variable_map(conv) items = snt.variable_map_items(variable_map) items_str = sorted((key, var.op.name) for key, var in items) self.assertEqual(items_str, [(u"b", u"conv_2d/b"), ("w", u"conv_2d/w/part_0"), ("w", u"conv_2d/w/part_1"), ("w", u"conv_2d/w/part_2")]) def testGetSaverScope(self): with tf.variable_scope("prefix") as s1: tf.get_variable("a", shape=[5, 6]) tf.get_variable("b", shape=[7]) saver = snt.get_saver(s1) self.assertIsInstance(saver, tf.train.Saver) self.assertEqual(set(saver._var_list), set(["a", "b"])) def testGetSaverModule(self): input_ = tf.placeholder(tf.float32, shape=[1, 10, 10, 3]) conv = snt.Conv2D(output_channels=3, kernel_shape=3) conv(input_) saver = snt.get_saver(conv) self.assertIsInstance(saver, tf.train.Saver) self.assertIn("w", saver._var_list) self.assertIn("b", saver._var_list) def _create_conv(self, partitioned, name): hidden = tf.ones(shape=(1, 16, 16, 3)) if partitioned: partitioners = {"w": tf.variable_axis_size_partitioner(4)} else: partitioners = None conv = snt.Conv2D(output_channels=3, kernel_shape=3, stride=1, partitioners=partitioners, name=name) conv(hidden) return conv @parameterized.parameters( {"save_partitioned": True, "load_partitioned": True}, {"save_partitioned": True, "load_partitioned": False}, {"save_partitioned": False, "load_partitioned": True}, {"save_partitioned": False, "load_partitioned": False}) def testGetSaverPartitioned(self, save_partitioned, load_partitioned): path = os.path.join(tempfile.mkdtemp(), "ckpt") # Save checkpoint. with self.test_session() as sess: conv = self._create_conv(partitioned=save_partitioned, name="a") saver = snt.get_saver(conv) sess.run(tf.global_variables_initializer()) saver.save(sess, path) w = tf.identity(conv.w) w_value = sess.run(w) # Restore checkpoint. with self.test_session() as sess: conv = self._create_conv(partitioned=load_partitioned, name="b") saver = snt.get_saver(conv) saver.restore(sess, path) w = tf.identity(conv.w) self.assertAllEqual(sess.run(w), w_value) def testCollectionGetVariableInScope(self): with tf.variable_scope("prefix") as s1: tf.get_variable("a", shape=[1], collections=["test"], trainable=False) self.assertEmpty(snt.get_variables_in_scope(s1)) self.assertEmpty(snt.get_variables_in_scope(s1, collection="test2")) self.assertLen(snt.get_variables_in_scope(s1, collection="test"), 1) def testCollectionGetSaver(self): with tf.variable_scope("prefix") as s1: input_ = tf.placeholder(tf.float32, shape=[3, 4]) net = snt.Linear(10)(input_) net = snt.BatchNorm()(net, is_training=True) saver1 = snt.get_saver(s1) saver2 = snt.get_saver(s1, collections=( tf.GraphKeys.TRAINABLE_VARIABLES,)) self.assertIsInstance(saver1, tf.train.Saver) self.assertIsInstance(saver2, tf.train.Saver) self.assertLen(saver1._var_list, 5) self.assertIn("linear/w", saver1._var_list) self.assertIn("linear/b", saver1._var_list) self.assertIn("batch_norm/beta", saver1._var_list) self.assertIn("batch_norm/moving_mean", saver1._var_list) self.assertIn("batch_norm/moving_variance", saver1._var_list) self.assertLen(saver2._var_list, 3) self.assertIn("linear/w", saver2._var_list) self.assertIn("linear/b", saver2._var_list) self.assertIn("batch_norm/beta", saver2._var_list) self.assertNotIn("batch_norm/moving_mean", saver2._var_list) self.assertNotIn("batch_norm/moving_variance", saver2._var_list) def testCheckInitializers(self): initializers = { "key_a": tf.truncated_normal_initializer(mean=0, stddev=1), "key_c": tf.truncated_normal_initializer(mean=0, stddev=1), } keys = ["key_a", "key_b"] self.assertRaisesRegexp(KeyError, "Invalid initializer keys.*", snt.check_initializers, initializers=initializers, keys=keys) del initializers["key_c"] initializers["key_b"] = "not a function" self.assertRaisesRegexp(TypeError, "Initializer for.*", snt.check_initializers, initializers=initializers, keys=keys) initializers["key_b"] = {"key_c": "not a function"} self.assertRaisesRegexp(TypeError, "Initializer for.*", snt.check_initializers, initializers=initializers, keys=keys) initializers["key_b"] = { "key_c": tf.truncated_normal_initializer(mean=0, stddev=1), "key_d": tf.truncated_normal_initializer(mean=0, stddev=1), } snt.check_initializers(initializers=initializers, keys=keys) def testCheckPartitioners(self): partitioners = {"key_a": tf.variable_axis_size_partitioner(10), "key_c": tf.variable_axis_size_partitioner(10)} keys = ["key_a", "key_b"] self.assertRaisesRegexp(KeyError, "Invalid partitioner keys.*", snt.check_partitioners, partitioners=partitioners, keys=keys) del partitioners["key_c"] partitioners["key_b"] = "not a function" self.assertRaisesRegexp(TypeError, "Partitioner for.*", snt.check_partitioners, partitioners=partitioners, keys=keys) partitioners["key_b"] = {"key_c": "not a function"} self.assertRaisesRegexp(TypeError, "Partitioner for.*", snt.check_partitioners, partitioners=partitioners, keys=keys) partitioners["key_b"] = { "key_c": tf.variable_axis_size_partitioner(10), "key_d": tf.variable_axis_size_partitioner(10), } snt.check_partitioners(partitioners=partitioners, keys=keys) def testCheckRegularizers(self): regularizers = { "key_a": contrib_layers.l1_regularizer(scale=0.5), "key_c": contrib_layers.l2_regularizer(scale=0.5), } keys = ["key_a", "key_b"] self.assertRaisesRegexp(KeyError, "Invalid regularizer keys.*", snt.check_regularizers, regularizers=regularizers, keys=keys) del regularizers["key_c"] regularizers["key_b"] = "not a function" self.assertRaisesRegexp(TypeError, "Regularizer for.*", snt.check_regularizers, regularizers=regularizers, keys=keys) regularizers["key_b"] = {"key_c": "not a function"} self.assertRaisesRegexp(TypeError, "Regularizer for.*", snt.check_regularizers, regularizers=regularizers, keys=keys) regularizers["key_b"] = { "key_c": contrib_layers.l1_regularizer(scale=0.5), "key_d": contrib_layers.l2_regularizer(scale=0.5), } snt.check_regularizers(regularizers=regularizers, keys=keys) def testInvalidDicts(self): batch_size = 3 # Mistake seen in the wild - https://github.com/deepmind/sonnet/issues/74 # Should actually be {'hidden_to_hidden': {'w': some_initializers(), ...}} initializers = {"hidden_to_hidden": tf.truncated_normal_initializer(0, 1)} vanilla_rnn = snt.VanillaRNN(hidden_size=23, initializers=initializers) with self.assertRaisesRegexp(TypeError, "Expected a dict"): vanilla_rnn(tf.zeros([batch_size, 4], dtype=tf.float32), vanilla_rnn.zero_state(batch_size, dtype=tf.float32)) # Error: should be a dict mapping strings to partitioners/regularizers. partitioners = tf.fixed_size_partitioner(num_shards=16) with self.assertRaisesRegexp(TypeError, "Expected a dict"): snt.LSTM(hidden_size=42, partitioners=partitioners) regularizers = contrib_layers.l1_regularizer(scale=0.5) with self.assertRaisesRegexp(TypeError, "Expected a dict"): snt.GRU(hidden_size=108, regularizers=regularizers) def testHasVariableScope(self): self.assertFalse(snt.has_variable_scope("string")) linear = snt.Linear(10) self.assertTrue(snt.has_variable_scope(linear)) linear(tf.ones((10, 10))) self.assertTrue(snt.has_variable_scope(linear)) @parameterized.parameters( (False, _EXPECTED_FORMATTED_VARIABLE_LIST.format(type="legacy")), (True, _EXPECTED_FORMATTED_VARIABLE_LIST.format(type="resource")), ) def testFormatVariables(self, use_resource, expected): with tf.variable_scope("m1"): v1 = tf.get_variable("v1", shape=[3, 4], use_resource=use_resource) with tf.device("/gpu"): with tf.variable_scope("m2"): v2 = tf.get_local_variable( "v2", shape=[5, 6], use_resource=use_resource) self.assertEqual(snt.format_variables([v2, v1]), expected) @parameterized.parameters( (False, _EXPECTED_FORMATTED_VARIABLE_MAP.format(type="legacy")), (True, _EXPECTED_FORMATTED_VARIABLE_MAP.format(type="resource")), ) def testFormatVariableMap(self, use_resource, expected): with tf.variable_scope("m1"): v1 = tf.get_variable("v1", shape=[3, 4], use_resource=use_resource) with tf.device("/gpu"): with tf.variable_scope("m2"): v2 = tf.get_local_variable( "v2", shape=[5, 6], use_resource=use_resource) var_map = {"vv1": v1, "vv2": v2} self.assertEqual(snt.format_variable_map(var_map), expected) def testLogVariables(self): tf.get_default_graph().add_to_collection("config", {"version": 1}) with tf.variable_scope("m1"): tf.get_variable("v1", shape=[3, 4]) with tf.device("/gpu"): with tf.variable_scope("m2"): tf.get_local_variable("v2", shape=[5, 6]) snt.log_variables() def testLogVariables_with_arg(self): tf.get_default_graph().add_to_collection("config", {"version": 1}) with tf.variable_scope("m1"): v1 = tf.get_variable("v1", shape=[3, 4]) with tf.device("/gpu"): with tf.variable_scope("m2"): v2 = tf.get_local_variable("v2", shape=[5, 6]) snt.log_variables([v2, v1]) @parameterized.parameters( (5, "5 B"), (1023, "1023 B"), (1024, "1.000 KB"), (1536, "1.500 KB"), (2**20, "1.000 MB"), (2**21, "2.000 MB"), (2**30, "1.000 GB"), (2**31, "2.000 GB"), ) def testNumBytesToHumanReadable(self, num_bytes, expected_string): self.assertEqual( util._num_bytes_to_human_readable(num_bytes), expected_string) # pylint: disable long lambda warning @parameterized.parameters( (lambda: tf.get_variable("a", dtype=tf.int64, shape=1024), ["tf.int64: 1 variables comprising 1024 scalars, 8.000 KB", "Total: 1 variables comprising 1024 scalars, 8.000 KB"]), (lambda: (tf.get_variable("b", dtype=tf.float32, shape=100000), tf.get_variable("c", dtype=tf.float32, shape=5000)), ["tf.float32: 2 variables comprising 105000 scalars, 410.156 KB", "Total: 2 variables comprising 105000 scalars, 410.156 KB"]), (lambda: (tf.get_variable("d", dtype=tf.int16, shape=1024), tf.get_variable("e", dtype=tf.int64, shape=2048)), ["tf.int16: 1 variables comprising 1024 scalars, 2.000 KB", "tf.int64: 1 variables comprising 2048 scalars, 16.000 KB", "Total: 2 variables comprising 3072 scalars, 18.000 KB"]) ) def testSummarizeVariables(self, graph_creator_fn, expected_strings): with mock.patch.object(tf.logging, "info") as mocked_logging_info: graph_creator_fn() snt.summarize_variables() self.assertTrue(len(expected_strings), len(mocked_logging_info.call_args_list)) for expected, actual in zip(expected_strings, mocked_logging_info.call_args_list): actual_args = actual[0] # The rest of this structure is empty kwargs. self.assertEqual(expected, actual_args[0] % actual_args[1:]) @parameterized.parameters( (lambda: tf.get_variable("a", dtype=tf.float32, shape=132), {tf.float32: {"num_scalars": 132, "num_variables": 1}}), (lambda: (tf.get_variable("b", dtype=tf.float64, shape=1024), tf.get_variable("c", dtype=tf.float64, shape=2048)), {tf.float64: {"num_scalars": 3072, "num_variables": 2}}), (lambda: (tf.get_variable("d", dtype=tf.float16, shape=100), tf.get_variable("e", dtype=tf.float32, shape=200)), {tf.float16: {"num_scalars": 100, "num_variables": 1}, tf.float32: {"num_scalars": 200, "num_variables": 1}}) ) def testCountVariablesByType(self, graph_creator_fn, expected_dict): graph_creator_fn() self.assertEqual(snt.count_variables_by_type(), expected_dict) # pylint: enable long lambda warning @parameterized.parameters( ("LayerNorm", snt.LayerNorm), ("snt.LayerNorm", snt.LayerNorm), ("sonnet.LayerNorm", snt.LayerNorm), ("snt.nets.ConvNet2D", snt.nets.ConvNet2D), ("sonnet.python.modules.nets.ConvNet2D", snt.nets.ConvNet2D), ) def testParseStringToConstructor(self, constructor_string, expected_result): self.assertEqual(snt.parse_string_to_constructor(constructor_string), expected_result) @parameterized.parameters( ("non_existent_thing",), ("snt.asdfadsf",), ) def testParseStringToConstructorErrors(self, erroneous_string): with self.assertRaisesRegexp(ValueError, "could not find"): snt.parse_string_to_constructor(erroneous_string) @parameterized.parameters( (lambda: snt.Linear(42), [], util.SUPPORTED), (snt.LayerNorm, "is_training", util.NOT_SUPPORTED), (snt.BatchNorm, "is_training", util.SUPPORTED), (snt.BatchNorm, ["is_training"], util.SUPPORTED), (snt.BatchNorm, ["is_training", "test_local_stats"], util.SUPPORTED), (snt.BatchNorm, ["is_training", "test_local_stoats"], util.NOT_SUPPORTED), ) def testModuleSupportsKwargs(self, module_builder, kwargs_list, expected): mod = module_builder() self.assertEqual(snt.supports_kwargs(mod, kwargs_list), expected) def testModuleSupportsKwargsReuseVariables(self): # Test whether reuse_variables wrapping preserves the signature so that # we can query for supported kwargs. Also check whether inheritance breaks # things. class ParentModule(snt.AbstractModule): def _build(self): raise ValueError("call reuse_variables methods instead") @snt.reuse_variables def a(self, inputs, flag_a=False): return inputs + 1 @snt.reuse_variables def b(self, inputs, flag_b=False): return inputs + 2 pm = ParentModule() self.assertEqual( snt.supports_kwargs(pm.a, "flag_a"), util.SUPPORTED) self.assertEqual( snt.supports_kwargs(pm.b, "flag_b"), util.SUPPORTED) self.assertEqual( snt.supports_kwargs(pm.a, ["flag_a", "nonexistent_flag"]), util.NOT_SUPPORTED) self.assertEqual( snt.supports_kwargs(pm.b, "flag_a"), util.NOT_SUPPORTED) class ChildModule(ParentModule): # Override parent implementation of a() @snt.reuse_variables def a(self, inputs, new_flag_a=True, another_new_flag_a=False): return inputs + 3 @snt.reuse_variables def c(self, inputs, flag_c=42): return inputs + 4 cm = ChildModule() self.assertEqual( snt.supports_kwargs(cm.a, ["new_flag_a", "another_new_flag_a"]), util.SUPPORTED) self.assertEqual( snt.supports_kwargs(cm.a, "flag_a"), util.NOT_SUPPORTED) self.assertEqual( snt.supports_kwargs(cm.b, "flag_b"), util.SUPPORTED) self.assertEqual( snt.supports_kwargs(cm.c, "flag_c"), util.SUPPORTED) def testModuleSupportsKwargsMaybe(self): def foo(x, y, z): return x + y + z self.assertEqual(snt.supports_kwargs(foo, ["x", "y"]), util.SUPPORTED) self.assertEqual(snt.supports_kwargs(foo, ["x", "y", "is_training"]), util.NOT_SUPPORTED) def bar(x, y, **kwargs): return x + y + sum(kwargs) self.assertEqual(snt.supports_kwargs(bar, ["x", "y"]), util.SUPPORTED) self.assertEqual(snt.supports_kwargs(bar, ["x", "y", "is_training"]), util.MAYBE_SUPPORTED) @parameterized.parameters( (lambda: snt.Linear(106), None, {}), (snt.BatchNorm, {"is_training": 42}, {"is_training": 42}), (snt.BatchNorm, {"non_existent_flag": False}, {}), (lambda: snt.nets.MLP([23, 42]), {"dropout_keep_prob": 0.4, "is_training": True, "blah": True}, {"dropout_keep_prob": 0.4, "is_training": True})) def testRemoveUnsupportedKwargs(self, module_builder, in_kwargs, expected_kwargs): mod = module_builder() self.assertEqual(snt.remove_unsupported_kwargs(mod, in_kwargs), expected_kwargs) def testRemoveUnsupportedKwargsWithMaybe(self): def foo(x, y): return x + y # z is definitely not supported self.assertEqual( snt.remove_unsupported_kwargs(foo, {"x": 1, "y": 2, "z": 3}), {"x": 1, "y": 2}) def bar(x, y, **kwargs): return x + y + sum(kwargs) # **kwargs means that potentially anything is supported. We can't remove # anything from the kwargs. self.assertEqual( snt.remove_unsupported_kwargs(bar, {"x": 4, "y": 5, "z": 6}), {"x": 4, "y": 5, "z": 6}) class ReuseVarsTest(parameterized.TestCase, tf.test.TestCase): class VariableContainer(object): def __init__(self, name): with tf.variable_scope(name) as vs: self.variable_scope = vs @util.reuse_variables def method_with_reuse(self): return tf.get_variable("a", shape=[1]) def method_without_reuse(self): return tf.get_variable("b", shape=[1]) class InheritedVariableContainer(VariableContainer): @util.reuse_variables def not_inherited_method_with_reuse(self): return tf.get_variable("c", shape=[1]) class ModuleReuse(snt.AbstractModule): def __init__(self, shape, name="multi_template_test"): super(ReuseVarsTest.ModuleReuse, self).__init__(name=name) self._shape = shape @util.reuse_variables def a(self): return tf.get_variable("a", shape=self._shape) @util.reuse_variables def add_b(self, inputs): return inputs + tf.get_variable("b", shape=self._shape) def _build(self, inputs): return self.add_b(inputs + self.a()) def test_get_all_variables(self): np.random.seed(100) batch_size = 3 in_size = 4 inputs = tf.placeholder(tf.float32, shape=[batch_size, in_size]) module = ReuseVarsTest.ModuleReuse(inputs.get_shape().as_list()) module.a() all_variable_names = sorted([v.name for v in module._all_variables]) self.assertEqual(["multi_template_test/a_1:0"], all_variable_names) module(inputs) # pylint: disable=not-callable all_variable_names = sorted([v.name for v in module.get_all_variables()]) self.assertEqual(["multi_template_test/a_1:0", "multi_template_test/b:0"], all_variable_names) seq = snt.Sequential([ ReuseVarsTest.ModuleReuse(inputs.get_shape().as_list()), ReuseVarsTest.ModuleReuse(inputs.get_shape().as_list()), ReuseVarsTest.ModuleReuse(inputs.get_shape().as_list()), ]) for layer in seq.layers: layer.add_b(inputs) self.assertEmpty(seq._all_variables) seq(inputs) all_variable_names = sorted([v.name for v in seq.get_all_variables()]) self.assertEqual([ "multi_template_test_1/a:0", "multi_template_test_1/b:0", "multi_template_test_2/a:0", "multi_template_test_2/b:0", "multi_template_test_3/a:0", "multi_template_test_3/b:0", ], all_variable_names) def test_reuse_method(self): obj1 = ReuseVarsTest.VariableContainer("scope1") obj2 = ReuseVarsTest.VariableContainer("scope2") self.assertEqual("b", obj1.method_without_reuse().op.name) self.assertRaisesRegexp(ValueError, r"Variable b already exists, disallowed.*", obj1.method_without_reuse) self.assertRaisesRegexp(ValueError, r"Variable b already exists, disallowed.*", obj2.method_without_reuse) self.assertEqual("scope1/a", obj1.method_with_reuse().op.name) self.assertEqual("scope1/a", obj1.method_with_reuse().op.name) self.assertEqual("scope2/a", obj2.method_with_reuse().op.name) self.assertEqual("scope2/a", obj2.method_with_reuse().op.name) def test_multiple_objects_per_variable_scope(self): obj1 = ReuseVarsTest.VariableContainer("scope1") obj2 = ReuseVarsTest.VariableContainer("scope1") self.assertEqual("scope1/a", obj1.method_with_reuse().op.name) self.assertEqual("scope1/a", obj1.method_with_reuse().op.name) self.assertEqual("scope1/a", obj2.method_with_reuse().op.name) self.assertEqual("scope1/a", obj2.method_with_reuse().op.name) def test_reuse_inherited_method(self): obj1 = ReuseVarsTest.InheritedVariableContainer("scope1") obj2 = ReuseVarsTest.InheritedVariableContainer("scope2") self.assertEqual("b", obj1.method_without_reuse().op.name) self.assertRaisesRegexp(ValueError, r"Variable b already exists, disallowed.*", obj1.method_without_reuse) self.assertRaisesRegexp(ValueError, r"Variable b already exists, disallowed.*", obj2.method_without_reuse) self.assertEqual("scope1/a", obj1.method_with_reuse().op.name) self.assertEqual("scope1/a", obj1.method_with_reuse().op.name) self.assertEqual("scope1/c", obj1.not_inherited_method_with_reuse().op.name) self.assertEqual("scope1/c", obj1.not_inherited_method_with_reuse().op.name) self.assertEqual("scope2/a", obj2.method_with_reuse().op.name) self.assertEqual("scope2/a", obj2.method_with_reuse().op.name) self.assertEqual("scope2/c", obj2.not_inherited_method_with_reuse().op.name) self.assertEqual("scope2/c", obj2.not_inherited_method_with_reuse().op.name) def test_reuse_abstract_module(self): np.random.seed(100) batch_size = 3 in_size = 4 inputs = tf.placeholder(tf.float32, shape=[batch_size, in_size]) module1 = ReuseVarsTest.ModuleReuse(inputs.get_shape().as_list()) module2 = ReuseVarsTest.ModuleReuse(inputs.get_shape().as_list()) a1 = module1.a() inputs_plus_b1 = module1.add_b(inputs) inputs_plus_ab1 = module1(inputs) # pylint: disable=not-callable inputs_plus_ab2 = module2(inputs) # pylint: disable=not-callable inputs_plus_b2 = module2.add_b(inputs) a2 = module2.a() inputs_plus_ab1_again = module1(inputs) # pylint: disable=not-callable inputs_plus_ab2_again = module2(inputs) # pylint: disable=not-callable with self.test_session() as sess: sess.run(tf.global_variables_initializer()) input_data = np.random.rand(batch_size, in_size) out = sess.run([a1, inputs_plus_b1, inputs_plus_ab1, a2, inputs_plus_b2, inputs_plus_ab2], feed_dict={inputs: input_data}) self.assertNotAlmostEqual(np.linalg.norm(out[0] - out[3]), 0) self.assertNotAlmostEqual(np.linalg.norm(out[1] - out[4]), 0) self.assertNotAlmostEqual(np.linalg.norm(out[2] - out[5]), 0) self.assertAllClose(out[0] + out[1], out[2]) self.assertAllClose(out[3] + out[4], out[5]) out = sess.run([inputs_plus_ab1, inputs_plus_ab1_again], feed_dict={inputs: input_data}) self.assertAllEqual(out[0], out[1]) out = sess.run([inputs_plus_ab2, inputs_plus_ab2_again], feed_dict={inputs: input_data}) self.assertAllEqual(out[0], out[1]) def test_variable_scope_call_order(self): class TestModule(snt.AbstractModule): def __init__(self, name="test_module"): super(TestModule, self).__init__(name=name) @util.reuse_variables def a(self): return self.scope_name def _build(self): pass @property def variable_scope(self): # Needed to access `self.variable_scope` before calling `self.build()`. return self._template.variable_scope m1 = TestModule(name="m1") m2 = TestModule(name="m2") a1 = m1.a a2 = m2.a self.assertEqual("m1", a1()) self.assertEqual("m2", a2()) def test_multiple_graphs(self): g1 = tf.Graph() g2 = tf.Graph() with g1.as_default(): obj1 = ReuseVarsTest.VariableContainer("scope1") obj2 = ReuseVarsTest.VariableContainer("scope1") self.assertEqual("scope1/a", obj1.method_with_reuse().op.name) self.assertEqual("scope1/a", obj1.method_with_reuse().op.name) self.assertEqual("scope1/a", obj2.method_with_reuse().op.name) self.assertEqual("scope1/a", obj2.method_with_reuse().op.name) with g2.as_default(): obj1 = ReuseVarsTest.VariableContainer("scope1") obj2 = ReuseVarsTest.VariableContainer("scope1") self.assertEqual("scope1/a", obj1.method_with_reuse().op.name) self.assertEqual("scope1/a", obj1.method_with_reuse().op.name) self.assertEqual("scope1/a", obj2.method_with_reuse().op.name) self.assertEqual("scope1/a", obj2.method_with_reuse().op.name) def test_name_scopes(self): class VariableContainerWithOps(ReuseVarsTest.VariableContainer): @util.reuse_variables def add_b(self, tensor): b = tf.get_variable("b", shape=[1]) return tensor + b @util.reuse_variables def add_a(self, tensor): return tensor + self.method_with_reuse() @util.reuse_variables def nested_add(self, tensor): return tf.ones(shape=[1]) + self.add_a(tensor) def get_tensor_names_from_default_graph(): ops = [ op for op in tf.get_default_graph().get_operations() if "Initializer" not in op.name and "Assign" not in op.name and "read" not in op.name ] tensor_names = [] for op in ops: tensor_names.extend(tensor.name for tensor in op.outputs) return tensor_names obj1 = VariableContainerWithOps("scope1") obj2 = VariableContainerWithOps("scope2") zeros = tf.zeros(shape=[1]) self.assertEqual("scope1/add_b/add", obj1.add_b(zeros).op.name) self.assertEqual("scope1/add_b_1/add", obj1.add_b(zeros).op.name) with tf.name_scope("outer_scope"): self.assertEqual("outer_scope/scope1/add_b/add", obj1.add_b(zeros).op.name) self.assertEqual("outer_scope/scope1/add_b_1/add", obj1.add_b(zeros).op.name) self.assertEqual("scope1/add_a/add", obj1.add_a(zeros).op.name) self.assertEqual("scope1/add_a_1/add", obj1.add_a(zeros).op.name) self.assertEqual("scope1/nested_add/add", obj1.nested_add(zeros).op.name) self.assertEqual("scope1/nested_add_1/add", obj1.nested_add(zeros).op.name) ones = tf.ones(shape=[1]) self.assertEqual("scope2/add_b/add", obj2.add_b(ones).op.name) self.assertEqual("scope2/add_b_1/add", obj2.add_b(ones).op.name) self.assertEqual("scope2/add_a/add", obj2.add_a(ones).op.name) self.assertEqual("scope2/add_a_1/add", obj2.add_a(ones).op.name) self.assertEqual("scope2/nested_add/add", obj2.nested_add(ones).op.name) self.assertEqual("scope2/nested_add_1/add", obj2.nested_add(ones).op.name) observed_tensor_names = get_tensor_names_from_default_graph() # Keep this for compatibility with versions of tensorflow lower than 1.6 if len(observed_tensor_names) == 40: expected_tensor_names = [ u"zeros/shape_as_tensor:0", u"zeros/Const:0", u"zeros:0", u"scope1/b:0", u"scope1/add_b/add:0", u"scope1/add_b_1/add:0", u"outer_scope/scope1/add_b/add:0", u"outer_scope/scope1/add_b_1/add:0", u"scope1/a:0", u"scope1/add_a/add:0", u"scope1/add_a_1/add:0", u"scope1/nested_add/ones/shape_as_tensor:0", u"scope1/nested_add/ones/Const:0", u"scope1/nested_add/ones:0", u"scope1/nested_add/scope1/add_a/add:0", u"scope1/nested_add/add:0", u"scope1/nested_add_1/ones/shape_as_tensor:0", u"scope1/nested_add_1/ones/Const:0", u"scope1/nested_add_1/ones:0", u"scope1/nested_add_1/scope1/add_a/add:0", u"scope1/nested_add_1/add:0", u"ones/shape_as_tensor:0", u"ones/Const:0", u"ones:0", u"scope2/b:0", u"scope2/add_b/add:0", u"scope2/add_b_1/add:0", u"scope2/a:0", u"scope2/add_a/add:0", u"scope2/add_a_1/add:0", u"scope2/nested_add/ones/shape_as_tensor:0", u"scope2/nested_add/ones/Const:0", u"scope2/nested_add/ones:0", u"scope2/nested_add/scope2/add_a/add:0", u"scope2/nested_add/add:0", u"scope2/nested_add_1/ones/shape_as_tensor:0", u"scope2/nested_add_1/ones/Const:0", u"scope2/nested_add_1/ones:0", u"scope2/nested_add_1/scope2/add_a/add:0", u"scope2/nested_add_1/add:0", ] else: expected_tensor_names = [ u"zeros:0", u"scope1/b:0", u"scope1/add_b/add:0", u"scope1/add_b_1/add:0", u"outer_scope/scope1/add_b/add:0", u"outer_scope/scope1/add_b_1/add:0", u"scope1/a:0", u"scope1/add_a/add:0", u"scope1/add_a_1/add:0", u"scope1/nested_add/ones:0", u"scope1/nested_add/scope1/add_a/add:0", u"scope1/nested_add/add:0", u"scope1/nested_add_1/ones:0", u"scope1/nested_add_1/scope1/add_a/add:0", u"scope1/nested_add_1/add:0", u"ones:0", u"scope2/b:0", u"scope2/add_b/add:0", u"scope2/add_b_1/add:0", u"scope2/a:0", u"scope2/add_a/add:0", u"scope2/add_a_1/add:0", u"scope2/nested_add/ones:0", u"scope2/nested_add/scope2/add_a/add:0", u"scope2/nested_add/add:0", u"scope2/nested_add_1/ones:0", u"scope2/nested_add_1/scope2/add_a/add:0", u"scope2/nested_add_1/add:0", ] self.assertEqual(expected_tensor_names, observed_tensor_names) def test_reuse_vars_subgraph_recording(self): obj1 = ReuseVarsTest.ModuleReuse(shape=[3, 4], name="scope1") self.assertFalse(obj1.is_connected) obj1_a_outputs = obj1.a() self.assertTrue(obj1.is_connected) if not tf.executing_eagerly(): self.assertEqual(obj1.last_connected_subgraph.name_scope, "scope1/a/") self.assertIs(obj1.last_connected_subgraph.module, obj1) self.assertEqual(obj1.last_connected_subgraph.inputs, {}) self.assertIs(obj1.last_connected_subgraph.outputs, obj1_a_outputs) @contrib_eager.run_test_in_graph_and_eager_modes def test_container_not_supported_in_eager(self): if not tf.executing_eagerly(): self.skipTest("Skipping test in graph mode.") container = ReuseVarsTest.VariableContainer("name") with self.assertRaisesRegexp(ValueError, ".* not supported in eager mode .*"): container.method_with_reuse() @contrib_eager.run_test_in_graph_and_eager_modes def test_variable_reuse_defun(self): if not tf.executing_eagerly(): self.skipTest("Skipping test in graph mode.") class AssigningModule(snt.AbstractModule): _build = None @util.reuse_variables def assign_a(self): self.a = tf.get_variable("a", []) module = AssigningModule() # Uses `get_variable` to create a and keep a reference. module.assign_a() a, module.a = module.a, None # Now do the same but inside a defun. contrib_eager.defun(module.assign_a)() defun_a = module.a # In and out of the `defun` we should get literally the same object for `a`. self.assertIs(a, defun_a) @parameterized.parameters([True, False]) def test_defun(self, connect_defun_first): raw_module = ReuseVarsTest.ModuleReuse([]) defun_module = contrib_eager.defun(raw_module) if connect_defun_first: defun_result = defun_module(tf.zeros([])) raw_result = raw_module.add_b(raw_module.a()) else: raw_result = raw_module.add_b(raw_module.a()) defun_result = defun_module(tf.zeros([])) self.evaluate(tf.global_variables_initializer()) raw_result, defun_result = self.evaluate([raw_result, defun_result]) self.assertEqual(raw_result, defun_result) class NameFunctionTest(tf.test.TestCase): def testToSnakeCase(self): test_cases = [ ("UpperCamelCase", "upper_camel_case"), ("lowerCamelCase", "lower_camel_case"), ("endsWithXYZ", "ends_with_xyz"), ("already_snake_case", "already_snake_case"), ("__private__", "private"), ("LSTMModule", "lstm_module"), ("version123p56vfxObject", "version_123p56vfx_object"), ("version123P56VFXObject", "version_123p56vfx_object"), ("versionVFX123P56Object", "version_vfx123p56_object"), ("versionVfx123P56Object", "version_vfx_123p56_object"), ("lstm1", "lstm_1"), ("LSTM1", "lstm1"), ] for camel_case, snake_case in test_cases: actual = util.to_snake_case(camel_case) self.assertEqual(actual, snake_case, "_to_snake_case(%s) -> %s != %s" % (camel_case, actual, snake_case)) def testNameForCallable_Function(self): def test(): pass self.assertName(test, "test") def testNameForCallable_Lambda(self): test = lambda x: x self.assertName(test, None) def testNameForCallable_Partial(self): def test(*unused_args): pass test = functools.partial(functools.partial(test, "a"), "b") self.assertName(test, "test") def testNameForCallable_Instance(self): class Test(object): def __call__(self): pass self.assertName(Test(), None) def assertName(self, func, expected): name = util.name_for_callable(func) self.assertEqual(name, expected) @contrib_eager.run_all_tests_in_graph_and_eager_modes class TestNotifyAboutVariables(parameterized.TestCase, tf.test.TestCase): def testNoVariables(self): variables = [] with util.notify_about_new_variables(variables.append): pass self.assertEqual(variables, []) def assertVariableType(self, variable, resource): type_name = type(variable).__name__ if resource: self.assertEqual(type_name, "ResourceVariable") else: # Current stable TF release uses "Variable", head uses "RefVariable". self.assertIn(type_name, ("Variable", "RefVariable")) @parameterized.parameters([True, False]) def testGetVariable(self, use_resource): if tf.executing_eagerly() and not use_resource: self.skipTest("Ref variables not supported in eager mode.") variables = [] with util.notify_about_new_variables(variables.append): with tf.variable_scope("", use_resource=use_resource): x = tf.get_variable("x", []) self.assertVariableType(x, use_resource) self.assertEqual(variables, [x]) @parameterized.parameters( itertools.product( ["ResourceVariable", "RefVariable"], [["notify", "custom_getter"], ["custom_getter", "notify"], ["notify", "variable_creator"], ["variable_creator", "notify"], ])) def testVariableCreatingCustomGetter(self, variable_type, stack_entries): use_resource = variable_type == "ResourceVariable" if tf.executing_eagerly() and not use_resource: self.skipTest("Ref variables not supported in eager mode.") def my_custom_getter(getter, **kwargs): var = getter(**kwargs) # Create an additional variable in the getter which is not returned. kwargs["name"] += "_additional" getter(**kwargs) return var variables = [] with contextlib2.ExitStack() as stack: stack.enter_context(tf.variable_scope("", use_resource=use_resource)) for stack_entry in stack_entries: if stack_entry == "notify": stack.enter_context(util.notify_about_new_variables(variables.append)) elif stack_entry == "custom_getter": stack.enter_context( tf.variable_scope("", custom_getter=my_custom_getter)) elif stack_entry == "variable_creator": stack.enter_context( variable_scope_ops.variable_creator_scope(my_custom_getter)) else: raise AssertionError v = tf.get_variable("v", []) self.assertVariableType(v, use_resource) if stack_entries == ["variable_creator", "notify"]: # When a variable creator is entered before `notify_about_new_variables` # there is no way for us to identify what additional variables that # creator created. self.assertEqual([v.name for v in variables], [u"v:0"]) else: self.assertEqual([v.name for v in variables], [u"v:0", u"v_additional:0"]) if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/modules/util_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tensorflow op that scales gradient for backwards pass.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf from tensorflow.contrib.eager.python import tfe as contrib_eager tfe = contrib_eager @tf.custom_gradient def _scale_gradient(x, scale): grad = lambda dy: (dy * scale, None) return x, grad def scale_gradient(net, scale, name="scale_gradient"): """Scales gradients for the backwards pass. This might be used to, for example, allow one part of a model to learn at a lower rate than the rest. WARNING: Think carefully about how your optimizer works. If, for example, you use rmsprop, the gradient is always rescaled (with some additional epsilon) towards unity. This means `scale_gradient` won't have the effect of lowering the learning rate. If `scale` is `0.0`, this op reduces to `tf.stop_gradient`. If `scale` is `1.0`, this op reduces to `tf.identity`. Args: net: A `tf.Tensor` or in eager mode a callable that produces a `tf.Tensor`. scale: The scale factor for the gradient on the backwards pass. name: A name for the operation (optional). Returns: In graph mode returns a `tf.Tensor` with the same type as the input tensor. In eager mode returns a callable wrapping `net` whose gradients are scaled. Raises: ValueError: If `net` dtype is non-float and `scale` is not zero or one. """ if tf.executing_eagerly(): if not callable(net): raise ValueError( "In eager mode `net` must be a callable (similar to how optimizers " "must be used when executing eagerly).") return tfe.defun(lambda *a, **k: scale_gradient(net(*a, **k), scale, name)) if scale == 0.0: return tf.stop_gradient(net, name=name) elif scale == 1.0: return tf.identity(net, name=name) else: if not net.dtype.is_floating: raise ValueError("clip_gradient does not support non-float `net` inputs.") with tf.name_scope(name, "scale_gradient", values=[net]): dtype = net.dtype.base_dtype # Convert ref dtypes to regular dtypes. scale_tensor = tf.convert_to_tensor(scale, dtype=dtype) output = _scale_gradient(net, scale_tensor) return output
sonnet-1
sonnet/python/modules/scale_gradient.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Relational Memory architecture. An implementation of the architecture described in "Relational Recurrent Neural Networks", Santoro et al., 2018. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports from sonnet.python.modules import basic from sonnet.python.modules import layer_norm from sonnet.python.modules import rnn_core from sonnet.python.modules.nets import mlp import tensorflow.compat.v1 as tf class RelationalMemory(rnn_core.RNNCore): """Relational Memory Core.""" def __init__(self, mem_slots, head_size, num_heads=1, num_blocks=1, forget_bias=1.0, input_bias=0.0, gate_style='unit', attention_mlp_layers=2, key_size=None, name='relational_memory'): """Constructs a `RelationalMemory` object. Args: mem_slots: The total number of memory slots to use. head_size: The size of an attention head. num_heads: The number of attention heads to use. Defaults to 1. num_blocks: Number of times to compute attention per time step. Defaults to 1. forget_bias: Bias to use for the forget gate, assuming we are using some form of gating. Defaults to 1. input_bias: Bias to use for the input gate, assuming we are using some form of gating. Defaults to 0. gate_style: Whether to use per-element gating ('unit'), per-memory slot gating ('memory'), or no gating at all (None). Defaults to `unit`. attention_mlp_layers: Number of layers to use in the post-attention MLP. Defaults to 2. key_size: Size of vector to use for key & query vectors in the attention computation. Defaults to None, in which case we use `head_size`. name: Name of the module. Raises: ValueError: gate_style not one of [None, 'memory', 'unit']. ValueError: num_blocks is < 1. ValueError: attention_mlp_layers is < 1. """ super(RelationalMemory, self).__init__(name=name) self._mem_slots = mem_slots self._head_size = head_size self._num_heads = num_heads self._mem_size = self._head_size * self._num_heads if num_blocks < 1: raise ValueError('num_blocks must be >= 1. Got: {}.'.format(num_blocks)) self._num_blocks = num_blocks self._forget_bias = forget_bias self._input_bias = input_bias if gate_style not in ['unit', 'memory', None]: raise ValueError( 'gate_style must be one of [\'unit\', \'memory\', None]. Got: ' '{}.'.format(gate_style)) self._gate_style = gate_style if attention_mlp_layers < 1: raise ValueError('attention_mlp_layers must be >= 1. Got: {}.'.format( attention_mlp_layers)) self._attention_mlp_layers = attention_mlp_layers self._key_size = key_size if key_size else self._head_size def initial_state(self, batch_size, trainable=False): """Creates the initial memory. We should ensure each row of the memory is initialized to be unique, so initialize the matrix to be the identity. We then pad or truncate as necessary so that init_state is of size (batch_size, self._mem_slots, self._mem_size). Args: batch_size: The size of the batch. trainable: Whether the initial state is trainable. This is always True. Returns: init_state: A truncated or padded matrix of size (batch_size, self._mem_slots, self._mem_size). """ init_state = tf.eye(self._mem_slots, batch_shape=[batch_size]) # Pad the matrix with zeros. if self._mem_size > self._mem_slots: difference = self._mem_size - self._mem_slots pad = tf.zeros((batch_size, self._mem_slots, difference)) init_state = tf.concat([init_state, pad], -1) # Truncation. Take the first `self._mem_size` components. elif self._mem_size < self._mem_slots: init_state = init_state[:, :, :self._mem_size] return init_state def _multihead_attention(self, memory): """Perform multi-head attention from 'Attention is All You Need'. Implementation of the attention mechanism from https://arxiv.org/abs/1706.03762. Args: memory: Memory tensor to perform attention on. Returns: new_memory: New memory tensor. """ key_size = self._key_size value_size = self._head_size qkv_size = 2 * key_size + value_size total_size = qkv_size * self._num_heads # Denote as F. qkv = basic.BatchApply(basic.Linear(total_size))(memory) qkv = basic.BatchApply(layer_norm.LayerNorm())(qkv) mem_slots = memory.get_shape().as_list()[1] # Denoted as N. # [B, N, F] -> [B, N, H, F/H] qkv_reshape = basic.BatchReshape([mem_slots, self._num_heads, qkv_size])(qkv) # [B, N, H, F/H] -> [B, H, N, F/H] qkv_transpose = tf.transpose(qkv_reshape, [0, 2, 1, 3]) q, k, v = tf.split(qkv_transpose, [key_size, key_size, value_size], -1) q *= key_size ** -0.5 dot_product = tf.matmul(q, k, transpose_b=True) # [B, H, N, N] weights = tf.nn.softmax(dot_product) output = tf.matmul(weights, v) # [B, H, N, V] # [B, H, N, V] -> [B, N, H, V] output_transpose = tf.transpose(output, [0, 2, 1, 3]) # [B, N, H, V] -> [B, N, H * V] new_memory = basic.BatchFlatten(preserve_dims=2)(output_transpose) return new_memory @property def state_size(self): return tf.TensorShape([self._mem_slots, self._mem_size]) @property def output_size(self): return tf.TensorShape(self._mem_slots * self._mem_size) def _calculate_gate_size(self): """Calculate the gate size from the gate_style. Returns: The per sample, per head parameter size of each gate. """ if self._gate_style == 'unit': return self._mem_size elif self._gate_style == 'memory': return 1 else: # self._gate_style == None return 0 def _create_gates(self, inputs, memory): """Create input and forget gates for this step using `inputs` and `memory`. Args: inputs: Tensor input. memory: The current state of memory. Returns: input_gate: A LSTM-like insert gate. forget_gate: A LSTM-like forget gate. """ # We'll create the input and forget gates at once. Hence, calculate double # the gate size. num_gates = 2 * self._calculate_gate_size() memory = tf.tanh(memory) inputs = basic.BatchFlatten()(inputs) gate_inputs = basic.BatchApply(basic.Linear(num_gates), n_dims=1)(inputs) gate_inputs = tf.expand_dims(gate_inputs, axis=1) gate_memory = basic.BatchApply(basic.Linear(num_gates))(memory) gates = tf.split(gate_memory + gate_inputs, num_or_size_splits=2, axis=2) input_gate, forget_gate = gates input_gate = tf.sigmoid(input_gate + self._input_bias) forget_gate = tf.sigmoid(forget_gate + self._forget_bias) return input_gate, forget_gate def _attend_over_memory(self, memory): """Perform multiheaded attention over `memory`. Args: memory: Current relational memory. Returns: The attended-over memory. """ attention_mlp = basic.BatchApply( mlp.MLP([self._mem_size] * self._attention_mlp_layers)) for _ in range(self._num_blocks): attended_memory = self._multihead_attention(memory) # Add a skip connection to the multiheaded attention's input. memory = basic.BatchApply(layer_norm.LayerNorm())( memory + attended_memory) # Add a skip connection to the attention_mlp's input. memory = basic.BatchApply(layer_norm.LayerNorm())( attention_mlp(memory) + memory) return memory def _build(self, inputs, memory, treat_input_as_matrix=False): """Adds relational memory to the TensorFlow graph. Args: inputs: Tensor input. memory: Memory output from the previous time step. treat_input_as_matrix: Optional, whether to treat `input` as a sequence of matrices. Defaulta to False, in which case the input is flattened into a vector. Returns: output: This time step's output. next_memory: The next version of memory to use. """ if treat_input_as_matrix: inputs = basic.BatchFlatten(preserve_dims=2)(inputs) inputs_reshape = basic.BatchApply( basic.Linear(self._mem_size), n_dims=2)(inputs) else: inputs = basic.BatchFlatten()(inputs) inputs = basic.Linear(self._mem_size)(inputs) inputs_reshape = tf.expand_dims(inputs, 1) memory_plus_input = tf.concat([memory, inputs_reshape], axis=1) next_memory = self._attend_over_memory(memory_plus_input) n = inputs_reshape.get_shape().as_list()[1] next_memory = next_memory[:, :-n, :] if self._gate_style == 'unit' or self._gate_style == 'memory': self._input_gate, self._forget_gate = self._create_gates( inputs_reshape, memory) next_memory = self._input_gate * tf.tanh(next_memory) next_memory += self._forget_gate * memory output = basic.BatchFlatten()(next_memory) return output, next_memory @property def input_gate(self): """Returns the input gate Tensor.""" self._ensure_is_connected() return self._input_gate @property def forget_gate(self): """Returns the forget gate Tensor.""" self._ensure_is_connected() return self._forget_gate
sonnet-1
sonnet/python/modules/relational_memory.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Batch normalization module for Sonnet. This contains the module BatchNormV2, which performs batch normalization on its inputs. It has an optional post-normalization scale and offset, and it maintains moving averages of the statistics for use at test time. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports from sonnet.python.modules import base from sonnet.python.modules import conv from sonnet.python.modules import util import tensorflow.compat.v1 as tf from tensorflow.contrib import framework as contrib_framework # pylint: disable=g-direct-tensorflow-import from tensorflow.python.layers import utils from tensorflow.python.training import moving_averages # pylint: enable=g-direct-tensorflow-import def create_beta_initializer(): """Returns a default initializer for the `beta` in batch norm.""" return tf.zeros_initializer() def create_gamma_initializer(): """Returns a default initializer for the `gamma` in batch norm.""" return tf.ones_initializer() def create_mean_initializer(): """Returns a default initializer for the `moving_mean` in batch norm.""" return tf.zeros_initializer() def create_variance_initializer(): """Returns a default initializer for the `moving_variance` in batch norm.""" return tf.ones_initializer() class BatchNormV2(base.AbstractModule): """Batch normalization module, including optional affine transformation. This module maintains exponential moving averages of the mean and variance, which can be optionally used to normalize at test time. At training time, batch statistics (mean, variance) are not shared between separate connections. The moving averages are shared between separate connections. At both training and test time, the optional affine transformation (`* gamma + beta`) is shared between separate connections. This is also the case for distributed replica training, where the batch statistics are not aggregated across replicas, but the moving averages are shared globally. When connecting the module to the graph, `is_training=True` means that - Update ops are created to update the moving averages with the current batch's statistics. - Features are normalized using the *current batch's statistics*. The `test_local_stats` setting is ignored. The moving averages are **not** used. whereas `is_training=False` means that - Update ops are not created. - Features are normalized using either: - The moving averages if `test_local_stats=False` (default). - The test batch statistics if `test_local_stats=True`. The moving averages are used by default at test time, but local batch statistics can be used by specifying a flag when connecting. One often wants to use local batch statistics at test time to track the progress while the model is trained as it would ensure that moving average updates do not affect the training curves. Once the training is finished, it's often advantageous to use moving average statistics, since it would make evaluation agnostic to the batch size, and might even lead to small improvements over the local batch statistics. The moving averages will be updated automatically by default, but not if `update_ops_collection` is provided: in that case they will only be updated when the ops in that collection are run. For example, to run the updates automatically: bn = BatchNormV2() train_net = bn(train_inputs, is_training=True) this does, however, have the effect of blocking the forwards pass of the network until the update ops have been run and may have a small performance penalty. For example, to run the updates manually: bn = BatchNormV2(update_ops_collection=tf.GraphKeys.UPDATE_OPS) train_net = bn(train_inputs, is_training=True) ... update_ops = tf.group(*tf.get_collection( tf.GraphKeys.UPDATE_OPS)) train_op = tf.group(train_op, update_ops) Then, whenever `train_op` is run so also are the moving average update ops. Some batch normalization caveats: - Batch normalization will remove the effect of adding a bias, so e.g. `use_bias=False` should be used for an immediately preceding snt.Linear module. - If your data batches aren't i.i.d. then batch normalization can allow your network to 'cheat' by using the batch statistics to peek at the rest of the batch. This can exhibit itself as a higher test score with `test_local_stats=True` than `test_local_stats=False`. """ GAMMA = "gamma" BETA = "beta" MOVING_MEAN = "moving_mean" MOVING_VARIANCE = "moving_variance" POSSIBLE_INITIALIZER_KEYS = {GAMMA, BETA, MOVING_MEAN, MOVING_VARIANCE} POSSIBLE_PARTITIONER_KEYS = {GAMMA, BETA} POSSIBLE_REGULARIZER_KEYS = {GAMMA, BETA} SUPPORTED_DATA_FORMATS = set.union({"NC"}, conv.SUPPORTED_1D_DATA_FORMATS, conv.SUPPORTED_2D_DATA_FORMATS, conv.SUPPORTED_3D_DATA_FORMATS) def __init__(self, data_format=None, offset=True, scale=False, decay_rate=0.999, eps=1e-3, initializers=None, partitioners=None, regularizers=None, update_ops_collection=None, fused=True, name="batch_norm"): """Constructs a BatchNormV2 module. Reduces over all input tensor dimensions apart from the channel dimension. This has the effect of treating pixels in 1D/2D/3D images as additional elements of the minibatch. Args: data_format: The data format. Can be "NC", "NWC", "NCW", "NHWC", "NCHW", "NDHWC", or "NCDHW". If not provided we assume the channel dimension is last. offset: Optional boolean to specify whether or not to apply a trained component-wise bias after the batch normalization and scaling. scale: Optional boolean to specify whether or not to apply a trained component-wise scale after the batch normalization. decay_rate: Decay rate of the exponential moving averages of the mean and variance. eps: Small number to avoid dividing by zero when diving by the standard deviation. initializers: Optional dict containing ops to initialize the weights of the affine transform (`gamma` and `beta`). partitioners: Optional dict containing partitioners to partition the weights of the affine transform (`gamma` and `beta`). regularizers: Optional dict containing regularizers for the weights of the affine transform ("gamma" and "beta"). As a default, no regularizers are used. A regularizer should be a function that takes a single `Tensor` as an input and returns a scalar `Tensor` output, e.g. the L1 and L2 regularizers in `tf.contrib.layers`. update_ops_collection: Optional name of TensorFlow variable collection to add the moving average update ops to. If not provided, we instead add the update ops as control dependencies of the output of the module. This may result in some slowdown, as the feed-forward of the network is now blocked. fused: Use nn.fused_batch_norm if True, nn.batch_normalization otherwise. name: Name of the module. Raises: KeyError: If `initializers` contains any keys other than `gamma`, `beta`, `moving_mean` or `moving_variance`. KeyError: If `partitioners` or `regularizers` contains any keys other than `gamma` or `beta`. TypeError: If any of the given initializers, partitioners or regularizers are not callable. ValueError: If `data_format` is invalid. """ super(BatchNormV2, self).__init__(name=name) if data_format not in self.SUPPORTED_DATA_FORMATS.union({None}): raise ValueError("Invalid data_format: %r" % (data_format,)) self._data_format = data_format self._offset = offset self._scale = scale self._decay_rate = decay_rate self._eps = eps self._update_ops_collection = update_ops_collection self._fused = fused self._initializers = util.check_initializers( initializers, self.POSSIBLE_INITIALIZER_KEYS) self._partitioners = util.check_partitioners( partitioners, self.POSSIBLE_PARTITIONER_KEYS) self._regularizers = util.check_regularizers( regularizers, self.POSSIBLE_REGULARIZER_KEYS) def _build_statistics(self, input_batch, use_batch_stats, stat_dtype): """Builds the statistics part of the graph when using moving variance. Args: input_batch: Input batch Tensor. use_batch_stats: Boolean to indicate if batch statistics should be calculated, otherwise moving averages are returned. stat_dtype: TensorFlow datatype to use for the moving mean and variance. Returns: Tuple of (mean, variance), each of the same datatype as `input_batch`. """ # Set up our moving statistics. When connecting in parallel, this is shared. if self.MOVING_MEAN not in self._initializers: self._initializers[self.MOVING_MEAN] = create_mean_initializer() self._moving_mean = tf.get_variable( "moving_mean", dtype=stat_dtype, shape=(self._num_channels,), collections=[ tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.GLOBAL_VARIABLES, ], initializer=self._initializers[self.MOVING_MEAN], trainable=False) if self.MOVING_VARIANCE not in self._initializers: self._initializers[self.MOVING_VARIANCE] = create_variance_initializer() self._moving_variance = tf.get_variable( "moving_variance", dtype=stat_dtype, shape=(self._num_channels,), collections=[ tf.GraphKeys.MOVING_AVERAGE_VARIABLES, tf.GraphKeys.GLOBAL_VARIABLES, ], initializer=self._initializers[self.MOVING_VARIANCE], trainable=False) def build_batch_stats(): """Builds the batch statistics calculation ops.""" mean, variance = tf.nn.moments(input_batch, self._axis, keep_dims=True, name="normalize_moments") return mean, variance def build_moving_stats(): """Retrieves the moving statistics.""" # If necessary, cast the moving statistics to match the input type. # This is required by tf.nn.batch_normalization. input_dtype = input_batch.dtype if stat_dtype == input_dtype: return ( tf.identity(self._moving_mean), tf.identity(self._moving_variance), ) else: return ( tf.cast(self._moving_mean, input_dtype), tf.cast(self._moving_variance, input_dtype), ) mean, variance = contrib_framework.smart_cond( use_batch_stats, build_batch_stats, build_moving_stats, ) return mean, variance def _build_update_ops(self, mean, variance, is_training): """Builds the moving average update ops when using moving variance. Args: mean: The mean value to update with. variance: The variance value to update with. is_training: Boolean Tensor to indicate if we're currently in training mode. Returns: Tuple of `(update_mean_op, update_variance_op)` when `is_training` is or could be `True`. Returns `None` when `is_training=False`. """ def build_update_ops(): """Builds the exponential moving average update ops.""" update_mean_op = moving_averages.assign_moving_average( variable=self._moving_mean, value=tf.reshape(mean, (self._num_channels,)), decay=self._decay_rate, zero_debias=False, name="update_moving_mean").op update_variance_op = moving_averages.assign_moving_average( variable=self._moving_variance, value=tf.reshape(variance, (self._num_channels,)), decay=self._decay_rate, zero_debias=False, name="update_moving_variance").op return update_mean_op, update_variance_op def build_no_ops(): return tf.no_op(), tf.no_op() # Only make the ops if we know that `is_training=True`, or the value of # `is_training` is unknown. is_training_const = utils.constant_value(is_training) if is_training_const is None or is_training_const: update_mean_op, update_variance_op = contrib_framework.smart_cond( is_training, build_update_ops, build_no_ops, ) return update_mean_op, update_variance_op else: return None def _fused_batch_norm_op(self, input_batch, mean, variance, use_batch_stats): """Creates a fused batch normalization op.""" # Store the original shape of the mean and variance. mean_shape = mean.get_shape() variance_shape = variance.get_shape() # The fused batch norm expects the mean, variance, gamma and beta # tensors to have dimension 1, so we flatten them to remove the # extra dimensions. In addition, it expects the input_batch to have # dimension 4, so we reshape it accordingly. gamma_flatten = tf.reshape(self._gamma, shape=(self._num_channels,)) beta_flatten = tf.reshape(self._beta, shape=(self._num_channels,)) flatten_mean = tf.reshape(mean, shape=(self._num_channels,)) flatten_variance = tf.reshape(variance, shape=(self._num_channels,)) use_batch_stats = tf.convert_to_tensor(use_batch_stats) input_shape = input_batch.get_shape() output_shape = tf.shape(input_batch) flat_image_size = tf.cast(tf.reduce_prod(self._image_shape, keepdims=True), tf.int64) if len(self._data_format) == 4: fusable_data_format = self._data_format fusable_batch = input_batch elif self._channel_index == 1 and input_shape.rank > 2: fusable_data_format = "NCHW" fusable_shape = tf.concat( [[-1, self._num_channels, 1], flat_image_size], axis=0) fusable_batch = tf.reshape(input_batch, shape=fusable_shape) else: # The CPU implementation of FusedBatchNorm only supports NHWC tensor # format for now. fusable_data_format = "NHWC" fusable_shape = tf.concat( [[-1, 1], flat_image_size, [self._num_channels]], axis=0) fusable_batch = tf.reshape(input_batch, shape=fusable_shape) common_args = { "scale": gamma_flatten, "offset": beta_flatten, "epsilon": self._eps, "data_format": fusable_data_format, "name": "batch_norm" } def use_batch_stats_fused_batch_norm(): return tf.nn.fused_batch_norm( fusable_batch, mean=None, variance=None, is_training=True, **common_args) def moving_average_fused_batch_norm(): return tf.nn.fused_batch_norm( fusable_batch, mean=flatten_mean, variance=flatten_variance, is_training=False, **common_args) batch_norm_op, mean, variance = contrib_framework.smart_cond( use_batch_stats, use_batch_stats_fused_batch_norm, moving_average_fused_batch_norm) if len(self._data_format) != 4: batch_norm_op = tf.reshape(batch_norm_op, output_shape) mean = tf.reshape(mean, mean_shape) variance = tf.reshape(variance, variance_shape) return batch_norm_op, mean, variance def _batch_norm_op(self, input_batch, mean, variance, use_batch_stats, stat_dtype): """Creates a batch normalization op. It uses the tf.nn.batch_normalization op by default and the tf.nn.fused_batch_norm op to support fused batch normalization. Args: input_batch: A input Tensor of arbitrary dimension. mean: A mean tensor, of the same dtype as `input_batch`. variance: A variance tensor, of the same dtype as `input_batch`. use_batch_stats: A bool value that indicates whether the operation should use the batch statistics. stat_dtype: TensorFlow datatype used for the moving mean and variance. Returns: A batch normalization operation. The current mean tensor, of datatype `stat_dtype`. The current variance tensor, of datatype `stat_dtype`. """ if self._fused: # For the non-training case where not using batch stats, # pass in the moving statistic variables directly. # These will already be in the correct dtype, even for float16 input. batch_norm_op, mean, variance = self._fused_batch_norm_op( input_batch, self._moving_mean, self._moving_variance, use_batch_stats) else: if self._beta is None: beta = None else: beta = tf.reshape(self._beta, self._expanded_mean_shape) if self._gamma is None: gamma = None else: gamma = tf.reshape(self._gamma, self._expanded_mean_shape) batch_norm_op = tf.nn.batch_normalization( input_batch, tf.reshape(mean, self._expanded_mean_shape), tf.reshape(variance, self._expanded_mean_shape), beta, gamma, self._eps, name="batch_norm") # We'll echo the supplied mean and variance so that they can also be used # to update the moving statistics. Cast to matching type if necessary. if input_batch.dtype != stat_dtype: mean = tf.cast(mean, stat_dtype) variance = tf.cast(variance, stat_dtype) return batch_norm_op, mean, variance def _build_scale_offset(self, dtype): """Sets up optional scale and offset factors.""" # tf.nn.fused_batch_norm accepts float16 batch data, but not scale/offset. if self._fused and dtype == tf.float16: dtype = tf.float32 # The fused batch norm operation needs the beta, gamma variables, # so in this case we build them and set the trainable option according # to the values of _offset and _scale. self._beta = None if self._offset or self._fused: if self.BETA not in self._initializers: self._initializers[self.BETA] = create_beta_initializer() self._beta = tf.get_variable( self.BETA, dtype=dtype, shape=(self._num_channels,), initializer=self._initializers[self.BETA], partitioner=self._partitioners.get(self.BETA, None), regularizer=self._regularizers.get(self.BETA, None), trainable=self._offset) self._gamma = None if self._scale or self._fused: if self.GAMMA not in self._initializers: self._initializers[self.GAMMA] = create_gamma_initializer() self._gamma = tf.get_variable( self.GAMMA, dtype=dtype, shape=(self._num_channels,), initializer=self._initializers[self.GAMMA], partitioner=self._partitioners.get(self.GAMMA, None), regularizer=self._regularizers.get(self.GAMMA, None), trainable=self._scale) def _build(self, input_batch, is_training, test_local_stats=False): """Connects the BatchNormV2 module into the graph. Args: input_batch: A Tensor of the same dimension as `len(data_format)`. is_training: A boolean to indicate if the module should be connected in training mode, meaning the moving averages are updated. Can be a Tensor. test_local_stats: A boolean to indicate if local batch statistics should be used when `is_training=False`. If not, moving averages are used. By default `False`. Can be a Tensor. Returns: A tensor with the same shape as `input_batch`. Raises: base.IncompatibleShapeError: If `data_format` is not valid for the input shape. base.NotSupportedError: If `input_batch` has data type of `tf.bfloat16`. """ input_shape = input_batch.get_shape() input_shape_list = input_shape.as_list() input_shape_len = len(input_shape_list) if not self._data_format: if input_shape_len == 2: self._data_format = "NC" elif input_shape_len == 3: self._data_format = "NWC" elif input_shape_len == 4: self._data_format = "NHWC" elif input_shape_len == 5: self._data_format = "NDHWC" else: raise base.IncompatibleShapeError( "Input shape {} has too many or too few dimensions.".format( input_shape)) self._channel_index = self._data_format.index("C") # Use list to turn range into iterator in python3. self._axis = list(range(len(self._data_format))) del self._axis[self._channel_index] if len(self._data_format) != input_shape_len: raise base.IncompatibleShapeError( "Incorrect data format {} for input shape {}.".format( self._data_format, input_shape)) dtype = input_batch.dtype if self._fused and dtype == tf.bfloat16: raise base.NotSupportedError( "Fused batch norm does not support tf.bfloat16.") # Maintain moving averages at a minimum precision of tf.float32. stat_dtype = tf.float32 if dtype in [tf.float16, tf.bfloat16] else dtype self._num_channels = int(input_shape_list[self._channel_index]) if self._channel_index == 1: spatial_dimensions_slice = slice(2, input_shape_len) else: assert self._channel_index == (input_shape_len - 1) spatial_dimensions_slice = slice(1, -1) # If the spatial dimensions are not fully defined, then self._image_shape # has to be a Tensor instead of a list of python ints. if input_shape[spatial_dimensions_slice].is_fully_defined(): self._image_shape = [ int(x) for x in input_shape[spatial_dimensions_slice]] else: self._image_shape = tf.shape(input_batch)[spatial_dimensions_slice] self._expanded_mean_shape = [1] * input_shape_len self._expanded_mean_shape[self._channel_index] = self._num_channels use_batch_stats = is_training | test_local_stats mean, variance = self._build_statistics(input_batch, use_batch_stats, stat_dtype) # Sets up optional gamma and beta parameters self._build_scale_offset(dtype) # Sets up the batch normalization op. out, mean, variance = self._batch_norm_op(input_batch, mean, variance, use_batch_stats, stat_dtype) # Sets up the update op. update_ops = self._build_update_ops(mean, variance, is_training) # Put update ops in the update ops collection if given, otherwise add as # control dependencies of the output. if update_ops: if self._update_ops_collection: for update_op in update_ops: tf.add_to_collection(self._update_ops_collection, update_op) else: with tf.control_dependencies(update_ops): out = tf.identity(out) return out @property def initializers(self): return self._initializers @property def partitioners(self): return self._partitioners @property def regularizers(self): return self._regularizers @property def moving_mean(self): self._ensure_is_connected() return tf.reshape(self._moving_mean, self._expanded_mean_shape) @property def moving_variance(self): self._ensure_is_connected() return tf.reshape(self._moving_variance, self._expanded_mean_shape) @property def beta(self): self._ensure_is_connected() if self._beta is None: raise base.Error( "Batch normalization doesn't have an offset, so no beta") else: return tf.reshape(self._beta, self._expanded_mean_shape) @property def gamma(self): self._ensure_is_connected() if self._gamma is None: raise base.Error( "Batch normalization doesn't have a scale, so no gamma") else: return tf.reshape(self._gamma, self._expanded_mean_shape)
sonnet-1
sonnet/python/modules/batch_norm_v2.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for sonnet.python.modules.relational_memory.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports from absl.testing import parameterized import numpy as np from sonnet.python.modules import relational_memory import tensorflow.compat.v1 as tf class RelationalMemoryTest(parameterized.TestCase, tf.test.TestCase): def testStateSizeOutputSize(self): """Checks for correct `state_size` and `output_size` return values.""" mem_slots = 4 head_size = 32 mem = relational_memory.RelationalMemory(mem_slots, head_size) self.assertItemsEqual([mem._mem_slots, mem._mem_size], mem.state_size.as_list()) self.assertItemsEqual([mem._mem_slots * mem._mem_size], mem.output_size.as_list()) @parameterized.named_parameters( ("PreserveMatrixInput", True), ("DontPreserveMatrixInput", False) ) def testOutputStateShapes(self, treat_input_as_matrix): """Checks the shapes of RelationalMemory output and state.""" mem_slots = 4 head_size = 32 num_heads = 2 batch_size = 5 input_shape = (batch_size, 3, 3) mem = relational_memory.RelationalMemory(mem_slots, head_size, num_heads) inputs = tf.placeholder(tf.float32, input_shape) init_state = mem.initial_state(batch_size) out = mem(inputs, init_state, treat_input_as_matrix=treat_input_as_matrix) with self.test_session() as session: tf.global_variables_initializer().run() new_out, new_memory = session.run( out, feed_dict={inputs: np.zeros(input_shape)} ) self.assertAllEqual(init_state.get_shape().as_list(), new_memory.shape) self.assertAllEqual(new_out.shape, [batch_size, mem_slots * head_size * num_heads]) # Check different combinations of mem_slots and mem_size # (ie, head_size * num_heads) size to make sure init_state construction # works correctly. @parameterized.parameters(*zip( (2, 4, 8, 16), (2, 4, 8, 16), (1, 2, 3, 4) )) def testRecurrence(self, mem_slots, head_size, num_heads): """Checks if you can run the relational memory for 2 steps.""" batch_size = 5 num_blocks = 5 input_shape = [batch_size, 3, 1] mem = relational_memory.RelationalMemory(mem_slots, head_size, num_heads, num_blocks=num_blocks) inputs = tf.placeholder(tf.float32, input_shape) hidden_0 = mem.initial_state(batch_size) _, hidden_1 = mem(inputs, hidden_0) _, hidden_2 = mem(inputs, hidden_1) with self.test_session() as session: tf.global_variables_initializer().run() results = session.run( {"hidden_2": hidden_2, "hidden_1": hidden_1}, feed_dict={inputs: np.zeros(input_shape)} ) self.assertAllEqual(results["hidden_1"].shape, results["hidden_2"].shape) def testBadInputs(self): """Test that verifies errors are thrown for bad input arguments.""" mem_slots = 4 head_size = 32 with self.assertRaisesRegexp(ValueError, "num_blocks must be >= 1"): relational_memory.RelationalMemory(mem_slots, head_size, num_blocks=0) with self.assertRaisesRegexp(ValueError, "attention_mlp_layers must be >= 1"): relational_memory.RelationalMemory(mem_slots, head_size, attention_mlp_layers=0) with self.assertRaisesRegexp(ValueError, "gate_style must be one of"): relational_memory.RelationalMemory(mem_slots, head_size, gate_style="bad_gate") @parameterized.named_parameters( ("GateStyleUnit", "unit"), ("GateStyleMemory", "memory") ) def testGateShapes(self, gate_style): """Checks the shapes of RelationalMemory gates.""" mem_slots = 4 head_size = 32 num_heads = 4 batch_size = 4 input_shape = (batch_size, 3, 3) mem = relational_memory.RelationalMemory(mem_slots, head_size, num_heads, gate_style=gate_style) inputs = tf.placeholder(tf.float32, input_shape) init_state = mem.initial_state(batch_size) mem(inputs, init_state) gate_size = mem._calculate_gate_size() expected_size = [batch_size, num_heads, gate_size] self.assertEqual(mem.input_gate.get_shape().as_list(), expected_size) self.assertEqual(mem.forget_gate.get_shape().as_list(), expected_size) def testMemoryUpdating(self): """Checks if memory is updating correctly.""" mem_slots = 2 head_size = 32 num_heads = 4 batch_size = 5 input_shape = (batch_size, 3, 3) mem = relational_memory.RelationalMemory(mem_slots, head_size, num_heads, gate_style=None) inputs = tf.placeholder(tf.float32, input_shape) memory_0 = mem.initial_state(batch_size) _, memory_1 = mem(inputs, memory_0) with self.test_session() as session: tf.global_variables_initializer().run() results = session.run( {"memory_1": memory_1, "memory_0": memory_0}, feed_dict={inputs: np.zeros(input_shape)}) self.assertTrue(np.any(np.not_equal(results["memory_0"], results["memory_1"]))) @parameterized.named_parameters( ("GateStyleUnit", "unit"), ("GateStyleMemory", "memory") ) def testInputErasureWorking(self, gate_style): """Checks if gating is working by ignoring the input.""" mem_slots = 2 head_size = 32 num_heads = 2 batch_size = 5 input_shape = (batch_size, 3, 3) mem = relational_memory.RelationalMemory(mem_slots, head_size, num_heads, forget_bias=float("+inf"), input_bias=float("-inf"), gate_style=gate_style) inputs = tf.placeholder(tf.float32, input_shape) memory_0 = mem.initial_state(batch_size) _, memory_1 = mem(inputs, memory_0) with self.test_session() as session: tf.global_variables_initializer().run() results = session.run( {"memory_1": memory_1, "memory_0": memory_0}, feed_dict={inputs: np.ones(input_shape)}) self.assertAllEqual(results["memory_0"], results["memory_1"]) @parameterized.named_parameters( ("GateStyleUnit", "unit"), ("GateStyleMemory", "memory") ) def testDifferingKeyHeadSizes(self, gate_style): """Checks if arbitrary key sizes are still supported.""" mem_slots = 2 head_size = 32 num_heads = 2 key_size = 128 batch_size = 5 input_shape = (batch_size, 3, 3) mem = relational_memory.RelationalMemory(mem_slots, head_size, num_heads, gate_style=gate_style, key_size=key_size) self.assertNotEqual(key_size, mem._head_size) inputs = tf.placeholder(tf.float32, input_shape) memory_0 = mem.initial_state(batch_size) _, memory_1 = mem(inputs, memory_0) with self.test_session() as session: tf.global_variables_initializer().run() results = session.run( {"memory_1": memory_1, "memory_0": memory_0}, feed_dict={inputs: np.ones(input_shape)}) self.assertTrue(np.any(np.not_equal(results["memory_0"], results["memory_1"]))) if __name__ == "__main__": tf.test.main()
sonnet-1
sonnet/python/modules/relational_memory_test.py
# Copyright 2017 The Sonnet Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Sequential Module for TensorFlow snt. A Module that wraps a list of other modules and ops, connecting the output of each to the input of the next. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports from sonnet.python.modules import base import tensorflow.compat.v1 as tf class Sequential(base.AbstractModule): """Builds a module out of a sequence of callables. Note that `Sequential` is limited in the range of possible architectures it can handle. This is a deliberate design decision; `Sequential` is only meant to be used for the simple case of fusing together modules/ops where the input of a particular module/op is the output of the previous one. Another restriction is that it is not possible to have extra arguments in the `_build` method that are passed to the constituents of the module - for example, if there is a `BatchNorm` module in `Sequential` and the user wishes to switch the `is_training` flag. If this is the desired use case, the recommended solution is to use `snt.Module` to wrap a custom function, as shown in the following example: https://github.com/deepmind/sonnet/blob/master/sonnet/examples/module_with_build_args.py """ def __init__(self, layers, name="sequential"): """Constructs a Sequential module. This feeds the output of each layer into the next and returns the output of the final layer. If a layer returns a tuple, it is assumed that this must be unpacked into the argument list of the next layer. If it is not a tuple, it is simply passed through to the next layer unchanged. Args: layers: Iterable of callables to stack together, which can be modules or ops. name: Name of the module. Raises: TypeError: If `layers` is None or contains any non-callable items. """ super(Sequential, self).__init__(name=name) # Store a copy of the iterable in a tuple to ensure users cannot modify the # iterable later, and protect against iterables which can only be read once. self._layers = tuple(layers) is_not_callable = [(i, mod) for i, mod in enumerate(self._layers) if not callable(mod)] if is_not_callable: raise TypeError("Items {} not callable with types: {}".format( ", ".join(str(i) for i, _ in is_not_callable), ", ".join(type(layer).__name__ for _, layer in is_not_callable))) def _build(self, *args): """Connects the Sequential module into the graph. Args: *args: A tuple of inputs, to be unpacked as the arguments to the first layer. Returns: The output value of the last layer. """ net = args if not self._layers: # If the sequential is passed a single arg, this will end up being # wrapped in an extra layer of tuple by *args. Normally we internally # handle this in the loop below, but if there are no layers we unpack here # in order to make Sequential([]) act like an identity, which seems right. if len(args) == 1: return args[0] else: return args for layer in self._layers: if isinstance(net, tuple): net = layer(*net) else: net = layer(net) return net @property def layers(self): return self._layers def get_variables(self, *args, **kwargs): """Provide a warning that get_variables on Sequential always returns ().""" tf.logging.warning( "Calling Sequential.get_variables, which will always return an empty " "tuple. get_variables() can only return variables created directly by " "a Module, or created by submodules directly created inside the " "Module. Sequential is constructed from already constructed submodules " "and so this will always be empty. See the documentation for more " "details, but tl;dr if you need to connect some modules sequentially " "and call get_variables on the result, writing a simple custom module " "is the simplest way. Another option is to call get_all_variables().") return super(Sequential, self).get_variables(*args, **kwargs)
sonnet-1
sonnet/python/modules/sequential.py