python_code
stringlengths
0
1.02M
repo_name
stringlengths
9
48
file_path
stringlengths
5
114
################################################################################ # Copyright 2019 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ """Training file to run most of the experiments in the paper. The default parameters corresponding to the first set of experiments in Section 4.2. For the expansion ablation, run with different ll_thresh values as in the paper. Note that n_y_active represents the number of *active* components at the start, and should be set to 1, while n_y represents the maximum number of components allowed, and should be set sufficiently high (eg. n_y = 100). For the MGR ablation, setting use_sup_replay = True switches to using SMGR, and the gen_replay_type flag can switch between fixed and dynamic replay. The generative snapshot period is set automatically in the train_curl.py file based on these settings (ie. the data_period variable), so the 0.1T runs can be reproduced by dividing this value by 10. """ from absl import app from absl import flags from curl import training flags.DEFINE_enum('dataset', 'mnist', ['mnist', 'omniglot'], 'Dataset.') FLAGS = flags.FLAGS def main(unused_argv): training.run_training( dataset=FLAGS.dataset, output_type='bernoulli', n_y=30, n_y_active=1, training_data_type='sequential', n_concurrent_classes=1, lr_init=1e-3, lr_factor=1., lr_schedule=[1], blend_classes=False, train_supervised=False, n_steps=100000, report_interval=10000, knn_values=[10], random_seed=1, encoder_kwargs={ 'encoder_type': 'multi', 'n_enc': [1200, 600, 300, 150], 'enc_strides': [1], }, decoder_kwargs={ 'decoder_type': 'single', 'n_dec': [500, 500], 'dec_up_strides': None, }, n_z=32, dynamic_expansion=True, ll_thresh=-200.0, classify_with_samples=False, gen_replay_type='fixed', use_supervised_replay=False, ) if __name__ == '__main__': app.run(main)
deepmind-research-master
curl/train_main.py
################################################################################ # Copyright 2019 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ """Script to train CURL.""" import collections import functools from absl import logging import numpy as np from sklearn import neighbors import sonnet as snt import tensorflow.compat.v1 as tf import tensorflow_datasets as tfds import tensorflow_probability as tfp from curl import model from curl import utils tfc = tf.compat.v1 # pylint: disable=g-long-lambda MainOps = collections.namedtuple('MainOps', [ 'elbo', 'll', 'log_p_x', 'kl_y', 'kl_z', 'elbo_supervised', 'll_supervised', 'log_p_x_supervised', 'kl_y_supervised', 'kl_z_supervised', 'cat_probs', 'confusion', 'purity', 'latents' ]) DatasetTuple = collections.namedtuple('DatasetTuple', [ 'train_data', 'train_iter_for_clf', 'train_data_for_clf', 'valid_iter', 'valid_data', 'test_iter', 'test_data', 'ds_info' ]) def compute_purity(confusion): return np.sum(np.max(confusion, axis=0)).astype(float) / np.sum(confusion) def process_dataset(iterator, ops_to_run, sess, feed_dict=None, aggregation_ops=np.stack, processing_ops=None): """Process a dataset by computing ops and accumulating batch by batch. Args: iterator: iterator through the dataset. ops_to_run: dict, tf ops to run as part of dataset processing. sess: tf.Session to use. feed_dict: dict, required placeholders. aggregation_ops: fn or dict of fns, aggregation op to apply for each op. processing_ops: fn or dict of fns, extra processing op to apply for each op. Returns: Results accumulated over dataset. """ if not isinstance(ops_to_run, dict): raise TypeError('ops_to_run must be specified as a dict') if not isinstance(aggregation_ops, dict): aggregation_ops = {k: aggregation_ops for k in ops_to_run} if not isinstance(processing_ops, dict): processing_ops = {k: processing_ops for k in ops_to_run} out_results = collections.OrderedDict() sess.run(iterator.initializer) while True: # Iterate over the whole dataset and append the results to a per-key list. try: outs = sess.run(ops_to_run, feed_dict=feed_dict) for key, value in outs.items(): out_results.setdefault(key, []).append(value) except tf.errors.OutOfRangeError: # end of dataset iterator break # Aggregate and process results. for key, value in out_results.items(): if aggregation_ops[key]: out_results[key] = aggregation_ops[key](value) if processing_ops[key]: out_results[key] = processing_ops[key](out_results[key], axis=0) return out_results def get_data_sources(dataset, dataset_kwargs, batch_size, test_batch_size, training_data_type, n_concurrent_classes, image_key, label_key): """Create and return data sources for training, validation, and testing. Args: dataset: str, name of dataset ('mnist', 'omniglot', etc). dataset_kwargs: dict, kwargs used in tf dataset constructors. batch_size: int, batch size used for training. test_batch_size: int, batch size used for evaluation. training_data_type: str, how training data is seen ('iid', or 'sequential'). n_concurrent_classes: int, # classes seen at a time (ignored for 'iid'). image_key: str, name if image key in dataset. label_key: str, name of label key in dataset. Returns: A namedtuple containing all of the dataset iterators and batches. """ # Load training data sources ds_train, ds_info = tfds.load( name=dataset, split=tfds.Split.TRAIN, with_info=True, as_dataset_kwargs={'shuffle_files': False}, **dataset_kwargs) # Validate assumption that data is in [0, 255] assert ds_info.features[image_key].dtype == tf.uint8 n_classes = ds_info.features[label_key].num_classes num_train_examples = ds_info.splits['train'].num_examples def preprocess_data(x): """Convert images from uint8 in [0, 255] to float in [0, 1].""" x[image_key] = tf.image.convert_image_dtype(x[image_key], tf.float32) return x if training_data_type == 'sequential': c = None # The index of the class number, None for now and updated later if n_concurrent_classes == 1: filter_fn = lambda v: tf.equal(v[label_key], c) else: # Define the lowest and highest class number at each data period. assert n_classes % n_concurrent_classes == 0, ( 'Number of total classes must be divisible by ' 'number of concurrent classes') cmin = [] cmax = [] for i in range(int(n_classes / n_concurrent_classes)): for _ in range(n_concurrent_classes): cmin.append(i * n_concurrent_classes) cmax.append((i + 1) * n_concurrent_classes) filter_fn = lambda v: tf.logical_and( tf.greater_equal(v[label_key], cmin[c]), tf.less( v[label_key], cmax[c])) # Set up data sources/queues (one for each class). train_datasets = [] train_iterators = [] train_data = [] full_ds = ds_train.repeat().shuffle(num_train_examples, seed=0) full_ds = full_ds.map(preprocess_data) for c in range(n_classes): filtered_ds = full_ds.filter(filter_fn).batch( batch_size, drop_remainder=True) train_datasets.append(filtered_ds) train_iterators.append(train_datasets[-1].make_one_shot_iterator()) train_data.append(train_iterators[-1].get_next()) else: # not sequential full_ds = ds_train.repeat().shuffle(num_train_examples, seed=0) full_ds = full_ds.map(preprocess_data) train_datasets = full_ds.batch(batch_size, drop_remainder=True) train_data = train_datasets.make_one_shot_iterator().get_next() # Set up data source to get full training set for classifier training full_ds = ds_train.repeat(1).shuffle(num_train_examples, seed=0) full_ds = full_ds.map(preprocess_data) train_datasets_for_classifier = full_ds.batch( test_batch_size, drop_remainder=True) train_iter_for_classifier = ( train_datasets_for_classifier.make_initializable_iterator()) train_data_for_classifier = train_iter_for_classifier.get_next() # Load validation dataset. try: valid_dataset = tfds.load( name=dataset, split=tfds.Split.VALIDATION, **dataset_kwargs) num_valid_examples = ds_info.splits[tfds.Split.VALIDATION].num_examples assert (num_valid_examples % test_batch_size == 0), ('test_batch_size must be a divisor of %d' % num_valid_examples) valid_dataset = valid_dataset.repeat(1).batch( test_batch_size, drop_remainder=True) valid_dataset = valid_dataset.map(preprocess_data) valid_iter = valid_dataset.make_initializable_iterator() valid_data = valid_iter.get_next() except (KeyError, ValueError): logging.warning('No validation set!!') valid_iter = None valid_data = None # Load test dataset. test_dataset = tfds.load( name=dataset, split=tfds.Split.TEST, **dataset_kwargs) num_test_examples = ds_info.splits['test'].num_examples assert (num_test_examples % test_batch_size == 0), ('test_batch_size must be a divisor of %d' % num_test_examples) test_dataset = test_dataset.repeat(1).batch( test_batch_size, drop_remainder=True) test_dataset = test_dataset.map(preprocess_data) test_iter = test_dataset.make_initializable_iterator() test_data = test_iter.get_next() logging.info('Loaded %s data', dataset) return DatasetTuple(train_data, train_iter_for_classifier, train_data_for_classifier, valid_iter, valid_data, test_iter, test_data, ds_info) def setup_training_and_eval_graphs(x, label, y, n_y, curl_model, classify_with_samples, is_training, name): """Set up the graph and return ops for training or evaluation. Args: x: tf placeholder for image. label: tf placeholder for ground truth label. y: tf placeholder for some self-supervised label/prediction. n_y: int, dimensionality of discrete latent variable y. curl_model: snt.AbstractModule representing the CURL model. classify_with_samples: bool, whether to *sample* latents for classification. is_training: bool, whether this graph is the training graph. name: str, graph name. Returns: A namedtuple with the required graph ops to perform training or evaluation. """ # kl_y_supervised is -log q(y=y_true | x) (log_p_x, kl_y, kl_z, log_p_x_supervised, kl_y_supervised, kl_z_supervised) = curl_model.log_prob_elbo_components(x, y) ll = log_p_x - kl_y - kl_z elbo = -tf.reduce_mean(ll) # Supervised loss, either for SMGR, or adaptation to supervised benchmark. ll_supervised = log_p_x_supervised - kl_y_supervised - kl_z_supervised elbo_supervised = -tf.reduce_mean(ll_supervised) # Summaries kl_y = tf.reduce_mean(kl_y) kl_z = tf.reduce_mean(kl_z) log_p_x_supervised = tf.reduce_mean(log_p_x_supervised) kl_y_supervised = tf.reduce_mean(kl_y_supervised) kl_z_supervised = tf.reduce_mean(kl_z_supervised) # Evaluation. hiddens = curl_model.get_shared_rep(x, is_training=is_training) cat = curl_model.infer_cluster(hiddens) cat_probs = cat.probs confusion = tf.confusion_matrix(label, tf.argmax(cat_probs, axis=1), num_classes=n_y, name=name + '_confusion') purity = (tf.reduce_sum(tf.reduce_max(confusion, axis=0)) / tf.reduce_sum(confusion)) if classify_with_samples: latents = curl_model.infer_latent( hiddens=hiddens, y=tf.to_float(cat.sample())).sample() else: latents = curl_model.infer_latent( hiddens=hiddens, y=tf.to_float(cat.mode())).mean() return MainOps(elbo, ll, log_p_x, kl_y, kl_z, elbo_supervised, ll_supervised, log_p_x_supervised, kl_y_supervised, kl_z_supervised, cat_probs, confusion, purity, latents) def get_generated_data(sess, gen_op, y_input, gen_buffer_size, component_counts): """Get generated model data (in place of saving a model snapshot). Args: sess: tf.Session. gen_op: tf op representing a batch of generated data. y_input: tf placeholder for which mixture components to generate from. gen_buffer_size: int, number of data points to generate. component_counts: np.array, prior probabilities over components. Returns: A tuple of two numpy arrays The generated data The corresponding labels """ batch_size, n_y = y_input.shape.as_list() # Sample based on the history of all components used. cluster_sample_probs = component_counts.astype(float) cluster_sample_probs = np.maximum(1e-12, cluster_sample_probs) cluster_sample_probs = cluster_sample_probs / np.sum(cluster_sample_probs) # Now generate the data based on the specified cluster prior. gen_buffer_images = [] gen_buffer_labels = [] for _ in range(gen_buffer_size): gen_label = np.random.choice( np.arange(n_y), size=(batch_size,), replace=True, p=cluster_sample_probs) y_gen_posterior_vals = np.zeros((batch_size, n_y)) y_gen_posterior_vals[np.arange(batch_size), gen_label] = 1 gen_image = sess.run(gen_op, feed_dict={y_input: y_gen_posterior_vals}) gen_buffer_images.append(gen_image) gen_buffer_labels.append(gen_label) gen_buffer_images = np.vstack(gen_buffer_images) gen_buffer_labels = np.concatenate(gen_buffer_labels) return gen_buffer_images, gen_buffer_labels def setup_dynamic_ops(n_y): """Set up ops to move / copy mixture component weights for dynamic expansion. Args: n_y: int, dimensionality of discrete latent variable y. Returns: A dict containing all of the ops required for dynamic updating. """ # Set up graph ops to dynamically modify component params. graph = tf.get_default_graph() # 1) Ops to get and set latent encoder params (entire tensors) latent_enc_tensors = {} for k in range(n_y): latent_enc_tensors['latent_w_' + str(k)] = graph.get_tensor_by_name( 'latent_encoder/mlp_latent_encoder_{}/w:0'.format(k)) latent_enc_tensors['latent_b_' + str(k)] = graph.get_tensor_by_name( 'latent_encoder/mlp_latent_encoder_{}/b:0'.format(k)) latent_enc_assign_ops = {} latent_enc_phs = {} for key, tensor in latent_enc_tensors.items(): latent_enc_phs[key] = tfc.placeholder(tensor.dtype, tensor.shape) latent_enc_assign_ops[key] = tf.assign(tensor, latent_enc_phs[key]) # 2) Ops to get and set cluster encoder params (columns of a tensor) # We will be copying column ind_from to column ind_to. cluster_w = graph.get_tensor_by_name( 'cluster_encoder/mlp_cluster_encoder_final/w:0') cluster_b = graph.get_tensor_by_name( 'cluster_encoder/mlp_cluster_encoder_final/b:0') ind_from = tfc.placeholder(dtype=tf.int32) ind_to = tfc.placeholder(dtype=tf.int32) # Determine indices of cluster encoder weights and biases to be updated w_indices = tf.transpose( tf.stack([ tf.range(cluster_w.shape[0], dtype=tf.int32), ind_to * tf.ones(shape=(cluster_w.shape[0],), dtype=tf.int32) ])) b_indices = ind_to # Determine updates themselves cluster_w_updates = tf.squeeze( tf.slice(cluster_w, begin=(0, ind_from), size=(cluster_w.shape[0], 1))) cluster_b_updates = cluster_b[ind_from] # Create update ops cluster_w_update_op = tf.scatter_nd_update(cluster_w, w_indices, cluster_w_updates) cluster_b_update_op = tf.scatter_update(cluster_b, b_indices, cluster_b_updates) # 3) Ops to get and set latent prior params (columns of a tensor) # We will be copying column ind_from to column ind_to. latent_prior_mu_w = graph.get_tensor_by_name( 'latent_decoder/latent_prior_mu/w:0') latent_prior_sigma_w = graph.get_tensor_by_name( 'latent_decoder/latent_prior_sigma/w:0') mu_indices = tf.transpose( tf.stack([ ind_to * tf.ones(shape=(latent_prior_mu_w.shape[1],), dtype=tf.int32), tf.range(latent_prior_mu_w.shape[1], dtype=tf.int32) ])) mu_updates = tf.squeeze( tf.slice( latent_prior_mu_w, begin=(ind_from, 0), size=(1, latent_prior_mu_w.shape[1]))) mu_update_op = tf.scatter_nd_update(latent_prior_mu_w, mu_indices, mu_updates) sigma_indices = tf.transpose( tf.stack([ ind_to * tf.ones(shape=(latent_prior_sigma_w.shape[1],), dtype=tf.int32), tf.range(latent_prior_sigma_w.shape[1], dtype=tf.int32) ])) sigma_updates = tf.squeeze( tf.slice( latent_prior_sigma_w, begin=(ind_from, 0), size=(1, latent_prior_sigma_w.shape[1]))) sigma_update_op = tf.scatter_nd_update(latent_prior_sigma_w, sigma_indices, sigma_updates) dynamic_ops = { 'ind_from_ph': ind_from, 'ind_to_ph': ind_to, 'latent_enc_tensors': latent_enc_tensors, 'latent_enc_assign_ops': latent_enc_assign_ops, 'latent_enc_phs': latent_enc_phs, 'cluster_w_update_op': cluster_w_update_op, 'cluster_b_update_op': cluster_b_update_op, 'mu_update_op': mu_update_op, 'sigma_update_op': sigma_update_op } return dynamic_ops def copy_component_params(ind_from, ind_to, sess, ind_from_ph, ind_to_ph, latent_enc_tensors, latent_enc_assign_ops, latent_enc_phs, cluster_w_update_op, cluster_b_update_op, mu_update_op, sigma_update_op): """Copy parameters from component i to component j. Args: ind_from: int, component index to copy from. ind_to: int, component index to copy to. sess: tf.Session. ind_from_ph: tf placeholder for component to copy from. ind_to_ph: tf placeholder for component to copy to. latent_enc_tensors: dict, tensors in the latent posterior encoder. latent_enc_assign_ops: dict, assignment ops for latent posterior encoder. latent_enc_phs: dict, placeholders for assignment ops. cluster_w_update_op: op for updating weights of cluster encoder. cluster_b_update_op: op for updating biased of cluster encoder. mu_update_op: op for updating mu weights of latent prior. sigma_update_op: op for updating sigma weights of latent prior. """ update_ops = [] feed_dict = {} # Copy for latent encoder. new_w_val, new_b_val = sess.run([ latent_enc_tensors['latent_w_' + str(ind_from)], latent_enc_tensors['latent_b_' + str(ind_from)] ]) update_ops.extend([ latent_enc_assign_ops['latent_w_' + str(ind_to)], latent_enc_assign_ops['latent_b_' + str(ind_to)] ]) feed_dict.update({ latent_enc_phs['latent_w_' + str(ind_to)]: new_w_val, latent_enc_phs['latent_b_' + str(ind_to)]: new_b_val }) # Copy for cluster encoder softmax. update_ops.extend([cluster_w_update_op, cluster_b_update_op]) feed_dict.update({ind_from_ph: ind_from, ind_to_ph: ind_to}) # Copy for latent prior. update_ops.extend([mu_update_op, sigma_update_op]) feed_dict.update({ind_from_ph: ind_from, ind_to_ph: ind_to}) sess.run(update_ops, feed_dict) def run_training( dataset, training_data_type, n_concurrent_classes, blend_classes, train_supervised, n_steps, random_seed, lr_init, lr_factor, lr_schedule, output_type, n_y, n_y_active, n_z, encoder_kwargs, decoder_kwargs, dynamic_expansion, ll_thresh, classify_with_samples, report_interval, knn_values, gen_replay_type, use_supervised_replay): """Run training script. Args: dataset: str, name of the dataset. training_data_type: str, type of training run ('iid' or 'sequential'). n_concurrent_classes: int, # of classes seen at a time (ignored for 'iid'). blend_classes: bool, whether to blend in samples from the next class. train_supervised: bool, whether to use supervision during training. n_steps: int, number of total training steps. random_seed: int, seed for tf and numpy RNG. lr_init: float, initial learning rate. lr_factor: float, learning rate decay factor. lr_schedule: float, epochs at which the decay should be applied. output_type: str, output distribution (currently only 'bernoulli'). n_y: int, maximum possible dimensionality of discrete latent variable y. n_y_active: int, starting dimensionality of discrete latent variable y. n_z: int, dimensionality of continuous latent variable z. encoder_kwargs: dict, parameters to specify encoder. decoder_kwargs: dict, parameters to specify decoder. dynamic_expansion: bool, whether to perform dynamic expansion. ll_thresh: float, log-likelihood threshold below which to keep poor samples. classify_with_samples: bool, whether to sample latents when classifying. report_interval: int, number of steps after which to evaluate and report. knn_values: list of ints, k values for different k-NN classifiers to run (values of 3, 5, and 10 were used in different parts of the paper). gen_replay_type: str, 'fixed', 'dynamic', or None. use_supervised_replay: str, whether to use supervised replay (aka 'SMGR'). """ # Set tf random seed. tfc.set_random_seed(random_seed) np.set_printoptions(precision=2, suppress=True) # First set up the data source(s) and get dataset info. if dataset == 'mnist': batch_size = 100 test_batch_size = 1000 dataset_kwargs = {} image_key = 'image' label_key = 'label' elif dataset == 'omniglot': batch_size = 15 test_batch_size = 1318 dataset_kwargs = {} image_key = 'image' label_key = 'alphabet' else: raise NotImplementedError dataset_ops = get_data_sources(dataset, dataset_kwargs, batch_size, test_batch_size, training_data_type, n_concurrent_classes, image_key, label_key) train_data = dataset_ops.train_data train_data_for_clf = dataset_ops.train_data_for_clf valid_data = dataset_ops.valid_data test_data = dataset_ops.test_data output_shape = dataset_ops.ds_info.features[image_key].shape n_x = np.prod(output_shape) n_classes = dataset_ops.ds_info.features[label_key].num_classes num_train_examples = dataset_ops.ds_info.splits['train'].num_examples # Check that the number of classes is compatible with the training scenario assert n_classes % n_concurrent_classes == 0 assert n_steps % (n_classes / n_concurrent_classes) == 0 # Set specific params depending on the type of gen replay if gen_replay_type == 'fixed': data_period = data_period = int(n_steps / (n_classes / n_concurrent_classes)) gen_every_n = 2 # Blend in a gen replay batch every 2 steps gen_refresh_period = data_period # How often to refresh the batches of # generated data (equivalent to snapshotting a generative model) gen_refresh_on_expansion = False # Don't refresh on dyn expansion elif gen_replay_type == 'dynamic': gen_every_n = 2 # Blend in a gen replay batch every 2 steps gen_refresh_period = 1e8 # Never refresh generated data periodically gen_refresh_on_expansion = True # Refresh on dyn expansion instead elif gen_replay_type is None: gen_every_n = 0 # Don't use any gen replay batches gen_refresh_period = 1e8 # Never refresh generated data periodically gen_refresh_on_expansion = False # Don't refresh on dyn expansion else: raise NotImplementedError max_gen_batches = 5000 # Max num of gen batches (proxy for storing a model) # Set dynamic expansion parameters exp_wait_steps = 100 # Steps to wait after expansion before eligible again exp_burn_in = 100 # Steps to wait at start of learning before eligible exp_buffer_size = 100 # Size of the buffer of poorly explained data num_buffer_train_steps = 10 # Num steps to train component on buffer # Define a global tf variable for the number of active components. n_y_active_np = n_y_active n_y_active = tfc.get_variable( initializer=tf.constant(n_y_active_np, dtype=tf.int32), trainable=False, name='n_y_active', dtype=tf.int32) logging.info('Starting CURL script on %s data.', dataset) # Set up placeholders for training. x_train_raw = tfc.placeholder( dtype=tf.float32, shape=(batch_size,) + output_shape) label_train = tfc.placeholder(dtype=tf.int32, shape=(batch_size,)) def binarize_fn(x): """Binarize a Bernoulli by rounding the probabilities. Args: x: tf tensor, input image. Returns: A tf tensor with the binarized image """ return tf.cast(tf.greater(x, 0.5 * tf.ones_like(x)), tf.float32) if dataset == 'mnist': x_train = binarize_fn(x_train_raw) x_valid = binarize_fn(valid_data[image_key]) if valid_data else None x_test = binarize_fn(test_data[image_key]) x_train_for_clf = binarize_fn(train_data_for_clf[image_key]) elif 'cifar' in dataset or dataset == 'omniglot': x_train = x_train_raw x_valid = valid_data[image_key] if valid_data else None x_test = test_data[image_key] x_train_for_clf = train_data_for_clf[image_key] else: raise ValueError('Unknown dataset {}'.format(dataset)) label_valid = valid_data[label_key] if valid_data else None label_test = test_data[label_key] # Set up CURL modules. shared_encoder = model.SharedEncoder(name='shared_encoder', **encoder_kwargs) latent_encoder = functools.partial(model.latent_encoder_fn, n_y=n_y, n_z=n_z) latent_encoder = snt.Module(latent_encoder, name='latent_encoder') latent_decoder = functools.partial(model.latent_decoder_fn, n_z=n_z) latent_decoder = snt.Module(latent_decoder, name='latent_decoder') cluster_encoder = functools.partial( model.cluster_encoder_fn, n_y_active=n_y_active, n_y=n_y) cluster_encoder = snt.Module(cluster_encoder, name='cluster_encoder') data_decoder = functools.partial( model.data_decoder_fn, output_type=output_type, output_shape=output_shape, n_x=n_x, n_y=n_y, **decoder_kwargs) data_decoder = snt.Module(data_decoder, name='data_decoder') # Uniform prior over y. prior_train_probs = utils.construct_prior_probs(batch_size, n_y, n_y_active) prior_train = snt.Module( lambda: tfp.distributions.OneHotCategorical(probs=prior_train_probs), name='prior_unconditional_train') prior_test_probs = utils.construct_prior_probs(test_batch_size, n_y, n_y_active) prior_test = snt.Module( lambda: tfp.distributions.OneHotCategorical(probs=prior_test_probs), name='prior_unconditional_test') model_train = model.Curl( prior_train, latent_decoder, data_decoder, shared_encoder, cluster_encoder, latent_encoder, n_y_active, is_training=True, name='curl_train') model_eval = model.Curl( prior_test, latent_decoder, data_decoder, shared_encoder, cluster_encoder, latent_encoder, n_y_active, is_training=False, name='curl_test') # Set up training graph y_train = label_train if train_supervised else None y_valid = label_valid if train_supervised else None y_test = label_test if train_supervised else None train_ops = setup_training_and_eval_graphs( x_train, label_train, y_train, n_y, model_train, classify_with_samples, is_training=True, name='train') hiddens_for_clf = model_eval.get_shared_rep(x_train_for_clf, is_training=False) cat_for_clf = model_eval.infer_cluster(hiddens_for_clf) if classify_with_samples: latents_for_clf = model_eval.infer_latent( hiddens=hiddens_for_clf, y=tf.to_float(cat_for_clf.sample())).sample() else: latents_for_clf = model_eval.infer_latent( hiddens=hiddens_for_clf, y=tf.to_float(cat_for_clf.mode())).mean() # Set up validation graph if valid_data is not None: valid_ops = setup_training_and_eval_graphs( x_valid, label_valid, y_valid, n_y, model_eval, classify_with_samples, is_training=False, name='valid') # Set up test graph test_ops = setup_training_and_eval_graphs( x_test, label_test, y_test, n_y, model_eval, classify_with_samples, is_training=False, name='test') # Set up optimizer (with scheduler). global_step = tf.train.get_or_create_global_step() lr_schedule = [ tf.cast(el * num_train_examples / batch_size, tf.int64) for el in lr_schedule ] num_schedule_steps = tf.reduce_sum( tf.cast(global_step >= lr_schedule, tf.float32)) lr = float(lr_init) * float(lr_factor)**num_schedule_steps optimizer = tf.train.AdamOptimizer(learning_rate=lr) with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): train_step = optimizer.minimize(train_ops.elbo) train_step_supervised = optimizer.minimize(train_ops.elbo_supervised) # For dynamic expansion, we want to train only new-component-related params cat_params = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, 'cluster_encoder/mlp_cluster_encoder_final') component_params = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, 'latent_encoder/mlp_latent_encoder_*') prior_params = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, 'latent_decoder/latent_prior*') train_step_expansion = optimizer.minimize( train_ops.elbo_supervised, var_list=cat_params+component_params+prior_params) # Set up ops for generative replay if gen_every_n > 0: # How many generative batches will we use each period? gen_buffer_size = min( int(gen_refresh_period / gen_every_n), max_gen_batches) # Class each sample should be drawn from (default to uniform prior) y_gen = tfp.distributions.OneHotCategorical( probs=np.ones((batch_size, n_y)) / n_y, dtype=tf.float32, name='extra_train_classes').sample() gen_samples = model_train.sample(y=y_gen, mean=True) if dataset == 'mnist' or dataset == 'omniglot': gen_samples = binarize_fn(gen_samples) # Set up ops to dynamically modify parameters (for dynamic expansion) dynamic_ops = setup_dynamic_ops(n_y) logging.info('Created computation graph.') n_steps_per_class = n_steps / n_classes # pylint: disable=invalid-name cumulative_component_counts = np.array([0] * n_y).astype(float) recent_component_counts = np.array([0] * n_y).astype(float) gen_buffer_ind = 0 # Buffer of poorly explained data (if we're doing dynamic expansion). poor_data_buffer = [] poor_data_labels = [] all_full_poor_data_buffers = [] all_full_poor_data_labels = [] has_expanded = False steps_since_expansion = 0 gen_buffer_ind = 0 eligible_for_expansion = False # Flag to ensure we wait a bit after expansion # Set up basic ops to run and quantities to log. ops_to_run = { 'train_ELBO': train_ops.elbo, 'train_log_p_x': train_ops.log_p_x, 'train_kl_y': train_ops.kl_y, 'train_kl_z': train_ops.kl_z, 'train_ll': train_ops.ll, 'train_batch_purity': train_ops.purity, 'train_probs': train_ops.cat_probs, 'n_y_active': n_y_active } if valid_data is not None: valid_ops_to_run = { 'valid_ELBO': valid_ops.elbo, 'valid_kl_y': valid_ops.kl_y, 'valid_kl_z': valid_ops.kl_z, 'valid_confusion': valid_ops.confusion } else: valid_ops_to_run = {} test_ops_to_run = { 'test_ELBO': test_ops.elbo, 'test_kl_y': test_ops.kl_y, 'test_kl_z': test_ops.kl_z, 'test_confusion': test_ops.confusion } to_log = ['train_batch_purity'] to_log_eval = ['test_purity', 'test_ELBO', 'test_kl_y', 'test_kl_z'] if valid_data is not None: to_log_eval += ['valid_ELBO', 'valid_purity'] if train_supervised: # Track supervised losses, train on supervised loss. ops_to_run.update({ 'train_ELBO_supervised': train_ops.elbo_supervised, 'train_log_p_x_supervised': train_ops.log_p_x_supervised, 'train_kl_y_supervised': train_ops.kl_y_supervised, 'train_kl_z_supervised': train_ops.kl_z_supervised, 'train_ll_supervised': train_ops.ll_supervised }) default_train_step = train_step_supervised to_log += [ 'train_ELBO_supervised', 'train_log_p_x_supervised', 'train_kl_y_supervised', 'train_kl_z_supervised' ] else: # Track unsupervised losses, train on unsupervised loss. ops_to_run.update({ 'train_ELBO': train_ops.elbo, 'train_kl_y': train_ops.kl_y, 'train_kl_z': train_ops.kl_z, 'train_ll': train_ops.ll }) default_train_step = train_step to_log += ['train_ELBO', 'train_kl_y', 'train_kl_z'] with tf.train.SingularMonitoredSession() as sess: for step in range(n_steps): feed_dict = {} # Use the default training loss, but vary it each step depending on the # training scenario (eg. for supervised gen replay, we alternate losses) ops_to_run['train_step'] = default_train_step ### 1) PERIODICALLY TAKE SNAPSHOTS FOR GENERATIVE REPLAY ### if (gen_refresh_period and step % gen_refresh_period == 0 and gen_every_n > 0): # First, increment cumulative count and reset recent probs count. cumulative_component_counts += recent_component_counts recent_component_counts = np.zeros(n_y) # Generate enough samples for the rest of the next period # (Functionally equivalent to storing and sampling from the model). gen_buffer_images, gen_buffer_labels = get_generated_data( sess=sess, gen_op=gen_samples, y_input=y_gen, gen_buffer_size=gen_buffer_size, component_counts=cumulative_component_counts) ### 2) DECIDE WHICH DATA SOURCE TO USE (GENERATIVE OR REAL DATA) ### periodic_refresh_started = ( gen_refresh_period and step >= gen_refresh_period) refresh_on_expansion_started = (gen_refresh_on_expansion and has_expanded) if ((periodic_refresh_started or refresh_on_expansion_started) and gen_every_n > 0 and step % gen_every_n == 1): # Use generated data for the training batch used_real_data = False s = gen_buffer_ind * batch_size e = (gen_buffer_ind + 1) * batch_size gen_data_array = { 'image': gen_buffer_images[s:e], 'label': gen_buffer_labels[s:e] } gen_buffer_ind = (gen_buffer_ind + 1) % gen_buffer_size # Feed it as x_train because it's already reshaped and binarized. feed_dict.update({ x_train: gen_data_array['image'], label_train: gen_data_array['label'] }) if use_supervised_replay: # Convert label to one-hot before feeding in. gen_label_onehot = np.eye(n_y)[gen_data_array['label']] feed_dict.update({model_train.y_label: gen_label_onehot}) ops_to_run['train_step'] = train_step_supervised else: # Else use the standard training data sources. used_real_data = True # Select appropriate data source for iid or sequential setup. if training_data_type == 'sequential': current_data_period = int( min(step / n_steps_per_class, len(train_data) - 1)) # If training supervised, set n_y_active directly based on how many # classes have been seen if train_supervised: assert not dynamic_expansion n_y_active_np = n_concurrent_classes * ( current_data_period // n_concurrent_classes +1) n_y_active.load(n_y_active_np, sess) train_data_array = sess.run(train_data[current_data_period]) # If we are blending classes, figure out where we are in the data # period and add some fraction of other samples. if blend_classes: # If in the first quarter, blend in examples from the previous class if (step % n_steps_per_class < n_steps_per_class / 4 and current_data_period > 0): other_train_data_array = sess.run( train_data[current_data_period - 1]) num_other = int( (n_steps_per_class / 2 - 2 * (step % n_steps_per_class)) * batch_size / n_steps_per_class) other_inds = np.random.permutation(batch_size)[:num_other] train_data_array[image_key][:num_other] = other_train_data_array[ image_key][other_inds] train_data_array[label_key][:num_other] = other_train_data_array[ label_key][other_inds] # If in the last quarter, blend in examples from the next class elif (step % n_steps_per_class > 3 * n_steps_per_class / 4 and current_data_period < n_classes - 1): other_train_data_array = sess.run(train_data[current_data_period + 1]) num_other = int( (2 * (step % n_steps_per_class) - 3 * n_steps_per_class / 2) * batch_size / n_steps_per_class) other_inds = np.random.permutation(batch_size)[:num_other] train_data_array[image_key][:num_other] = other_train_data_array[ image_key][other_inds] train_data_array['label'][:num_other] = other_train_data_array[ label_key][other_inds] # Otherwise, just use the current class else: train_data_array = sess.run(train_data) feed_dict.update({ x_train_raw: train_data_array[image_key], label_train: train_data_array[label_key] }) ### 3) PERFORM A GRADIENT STEP ### results = sess.run(ops_to_run, feed_dict=feed_dict) del results['train_step'] ### 4) COMPUTE ADDITIONAL DIAGNOSTIC OPS ON VALIDATION/TEST SETS. ### if (step+1) % report_interval == 0: if valid_data is not None: logging.info('Evaluating on validation and test set!') proc_ops = { k: (np.sum if 'confusion' in k else np.mean) for k in valid_ops_to_run } results.update( process_dataset( dataset_ops.valid_iter, valid_ops_to_run, sess, feed_dict=feed_dict, processing_ops=proc_ops)) results['valid_purity'] = compute_purity(results['valid_confusion']) else: logging.info('Evaluating on test set!') proc_ops = { k: (np.sum if 'confusion' in k else np.mean) for k in test_ops_to_run } results.update(process_dataset(dataset_ops.test_iter, test_ops_to_run, sess, feed_dict=feed_dict, processing_ops=proc_ops)) results['test_purity'] = compute_purity(results['test_confusion']) curr_to_log = to_log + to_log_eval else: curr_to_log = list(to_log) # copy to prevent in-place modifications ### 5) DYNAMIC EXPANSION ### if dynamic_expansion and used_real_data: # If we're doing dynamic expansion and below max capacity then add # poorly defined data points to a buffer. # First check whether the model is eligible for expansion (the model # becomes ineligible for a fixed time after each expansion, and when # it has hit max capacity). if (steps_since_expansion >= exp_wait_steps and step >= exp_burn_in and n_y_active_np < n_y): eligible_for_expansion = True steps_since_expansion += 1 if eligible_for_expansion: # Add poorly explained data samples to a buffer. poor_inds = results['train_ll'] < ll_thresh poor_data_buffer.extend(feed_dict[x_train_raw][poor_inds]) poor_data_labels.extend(feed_dict[label_train][poor_inds]) n_poor_data = len(poor_data_buffer) # If buffer is big enough, then add a new component and train just the # new component with several steps of gradient descent. # (We just feed in a onehot cluster vector to indicate which # component). if n_poor_data >= exp_buffer_size: # Dump the buffers so we can log them. all_full_poor_data_buffers.append(poor_data_buffer) all_full_poor_data_labels.append(poor_data_labels) # Take a new generative snapshot if specified. if gen_refresh_on_expansion and gen_every_n > 0: # Increment cumulative count and reset recent probs count. cumulative_component_counts += recent_component_counts recent_component_counts = np.zeros(n_y) gen_buffer_images, gen_buffer_labels = get_generated_data( sess=sess, gen_op=gen_samples, y_input=y_gen, gen_buffer_size=gen_buffer_size, component_counts=cumulative_component_counts) # Cull to a multiple of batch_size (keep the later data samples). n_poor_batches = int(n_poor_data / batch_size) poor_data_buffer = poor_data_buffer[-(n_poor_batches * batch_size):] poor_data_labels = poor_data_labels[-(n_poor_batches * batch_size):] # Find most probable component (on poor batch). poor_cprobs = [] for bs in range(n_poor_batches): poor_cprobs.append( sess.run( train_ops.cat_probs, feed_dict={ x_train_raw: poor_data_buffer[bs * batch_size:(bs + 1) * batch_size] })) best_cluster = np.argmax(np.sum(np.vstack(poor_cprobs), axis=0)) # Initialize parameters of the new component from most prob # existing. new_cluster = n_y_active_np copy_component_params(best_cluster, new_cluster, sess, **dynamic_ops) # Increment mixture component count n_y_active. n_y_active_np += 1 n_y_active.load(n_y_active_np, sess) # Perform a number of steps of gradient descent on the data buffer, # training only the new component (supervised loss). for _ in range(num_buffer_train_steps): for bs in range(n_poor_batches): x_batch = poor_data_buffer[bs * batch_size:(bs + 1) * batch_size] label_batch = [new_cluster] * batch_size label_onehot_batch = np.eye(n_y)[label_batch] _ = sess.run( train_step_expansion, feed_dict={ x_train_raw: x_batch, model_train.y_label: label_onehot_batch }) # Empty the buffer. poor_data_buffer = [] poor_data_labels = [] # Reset the threshold flag so we have a burn in before the next # component. eligible_for_expansion = False has_expanded = True steps_since_expansion = 0 # Accumulate counts. if used_real_data: train_cat_probs_vals = results['train_probs'] recent_component_counts += np.sum( train_cat_probs_vals, axis=0).astype(float) ### 6) LOGGING AND EVALUATION ### cleanup_for_print = lambda x: ', {}: %.{}f'.format( x.capitalize().replace('_', ' '), 3) log_str = 'Iteration %d' log_str += ''.join([cleanup_for_print(el) for el in curr_to_log]) log_str += ' n_active: %d' logging.info( log_str, *([step] + [results[el] for el in curr_to_log] + [n_y_active_np])) # Periodically perform evaluation if (step + 1) % report_interval == 0: # Report test purity and related measures logging.info( 'Iteration %d, Test purity: %.3f, Test ELBO: %.3f, Test ' 'KLy: %.3f, Test KLz: %.3f', step, results['test_purity'], results['test_ELBO'], results['test_kl_y'], results['test_kl_z']) # Flush data only once in a while to allow buffering of data for more # efficient writes. results['all_full_poor_data_buffers'] = all_full_poor_data_buffers results['all_full_poor_data_labels'] = all_full_poor_data_labels logging.info('Also training a classifier in latent space') # Perform knn classification from latents, to evaluate discriminability. # Get and encode training and test datasets. clf_train_vals = process_dataset( dataset_ops.train_iter_for_clf, { 'latents': latents_for_clf, 'labels': train_data_for_clf[label_key] }, sess, feed_dict, aggregation_ops=np.concatenate) clf_test_vals = process_dataset( dataset_ops.test_iter, { 'latents': test_ops.latents, 'labels': test_data[label_key] }, sess, aggregation_ops=np.concatenate) # Perform knn classification. knn_models = [] for nval in knn_values: # Fit training dataset. clf = neighbors.KNeighborsClassifier(n_neighbors=nval) clf.fit(clf_train_vals['latents'], clf_train_vals['labels']) knn_models.append(clf) results['train_' + str(nval) + 'nn_acc'] = clf.score( clf_train_vals['latents'], clf_train_vals['labels']) # Get test performance. results['test_' + str(nval) + 'nn_acc'] = clf.score( clf_test_vals['latents'], clf_test_vals['labels']) logging.info( 'Iteration %d %d-NN classifier accuracies, Training: ' '%.3f, Test: %.3f', step, nval, results['train_' + str(nval) + 'nn_acc'], results['test_' + str(nval) + 'nn_acc'])
deepmind-research-master
curl/training.py
# Copyright 2020 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """WideResNet and PreActResNet implementations in PyTorch.""" from typing import Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F CIFAR10_MEAN = (0.4914, 0.4822, 0.4465) CIFAR10_STD = (0.2471, 0.2435, 0.2616) CIFAR100_MEAN = (0.5071, 0.4865, 0.4409) CIFAR100_STD = (0.2673, 0.2564, 0.2762) class _Swish(torch.autograd.Function): """Custom implementation of swish.""" @staticmethod def forward(ctx, i): result = i * torch.sigmoid(i) ctx.save_for_backward(i) return result @staticmethod def backward(ctx, grad_output): i = ctx.saved_variables[0] sigmoid_i = torch.sigmoid(i) return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i))) class Swish(nn.Module): """Module using custom implementation.""" def forward(self, input_tensor): return _Swish.apply(input_tensor) class _Block(nn.Module): """WideResNet Block.""" def __init__(self, in_planes, out_planes, stride, activation_fn=nn.ReLU): super().__init__() self.batchnorm_0 = nn.BatchNorm2d(in_planes) self.relu_0 = activation_fn() # We manually pad to obtain the same effect as `SAME` (necessary when # `stride` is different than 1). self.conv_0 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=0, bias=False) self.batchnorm_1 = nn.BatchNorm2d(out_planes) self.relu_1 = activation_fn() self.conv_1 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False) self.has_shortcut = in_planes != out_planes if self.has_shortcut: self.shortcut = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False) else: self.shortcut = None self._stride = stride def forward(self, x): if self.has_shortcut: x = self.relu_0(self.batchnorm_0(x)) else: out = self.relu_0(self.batchnorm_0(x)) v = x if self.has_shortcut else out if self._stride == 1: v = F.pad(v, (1, 1, 1, 1)) elif self._stride == 2: v = F.pad(v, (0, 1, 0, 1)) else: raise ValueError('Unsupported `stride`.') out = self.conv_0(v) out = self.relu_1(self.batchnorm_1(out)) out = self.conv_1(out) out = torch.add(self.shortcut(x) if self.has_shortcut else x, out) return out class _BlockGroup(nn.Module): """WideResNet block group.""" def __init__(self, num_blocks, in_planes, out_planes, stride, activation_fn=nn.ReLU): super().__init__() block = [] for i in range(num_blocks): block.append( _Block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, activation_fn=activation_fn)) self.block = nn.Sequential(*block) def forward(self, x): return self.block(x) class WideResNet(nn.Module): """WideResNet.""" def __init__(self, num_classes: int = 10, depth: int = 28, width: int = 10, activation_fn: nn.Module = nn.ReLU, mean: Union[Tuple[float, ...], float] = CIFAR10_MEAN, std: Union[Tuple[float, ...], float] = CIFAR10_STD, padding: int = 0, num_input_channels: int = 3): super().__init__() self.mean = torch.tensor(mean).view(num_input_channels, 1, 1) self.std = torch.tensor(std).view(num_input_channels, 1, 1) self.mean_cuda = None self.std_cuda = None self.padding = padding num_channels = [16, 16 * width, 32 * width, 64 * width] assert (depth - 4) % 6 == 0 num_blocks = (depth - 4) // 6 self.init_conv = nn.Conv2d(num_input_channels, num_channels[0], kernel_size=3, stride=1, padding=1, bias=False) self.layer = nn.Sequential( _BlockGroup(num_blocks, num_channels[0], num_channels[1], 1, activation_fn=activation_fn), _BlockGroup(num_blocks, num_channels[1], num_channels[2], 2, activation_fn=activation_fn), _BlockGroup(num_blocks, num_channels[2], num_channels[3], 2, activation_fn=activation_fn)) self.batchnorm = nn.BatchNorm2d(num_channels[3]) self.relu = activation_fn() self.logits = nn.Linear(num_channels[3], num_classes) self.num_channels = num_channels[3] def forward(self, x): if self.padding > 0: x = F.pad(x, (self.padding,) * 4) if x.is_cuda: if self.mean_cuda is None: self.mean_cuda = self.mean.cuda() self.std_cuda = self.std.cuda() out = (x - self.mean_cuda) / self.std_cuda else: out = (x - self.mean) / self.std out = self.init_conv(out) out = self.layer(out) out = self.relu(self.batchnorm(out)) out = F.avg_pool2d(out, 8) out = out.view(-1, self.num_channels) return self.logits(out) class _PreActBlock(nn.Module): """Pre-activation ResNet Block.""" def __init__(self, in_planes, out_planes, stride, activation_fn=nn.ReLU): super().__init__() self._stride = stride self.batchnorm_0 = nn.BatchNorm2d(in_planes) self.relu_0 = activation_fn() # We manually pad to obtain the same effect as `SAME` (necessary when # `stride` is different than 1). self.conv_2d_1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=0, bias=False) self.batchnorm_1 = nn.BatchNorm2d(out_planes) self.relu_1 = activation_fn() self.conv_2d_2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False) self.has_shortcut = stride != 1 or in_planes != out_planes if self.has_shortcut: self.shortcut = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=0, bias=False) def _pad(self, x): if self._stride == 1: x = F.pad(x, (1, 1, 1, 1)) elif self._stride == 2: x = F.pad(x, (0, 1, 0, 1)) else: raise ValueError('Unsupported `stride`.') return x def forward(self, x): out = self.relu_0(self.batchnorm_0(x)) shortcut = self.shortcut(self._pad(x)) if self.has_shortcut else x out = self.conv_2d_1(self._pad(out)) out = self.conv_2d_2(self.relu_1(self.batchnorm_1(out))) return out + shortcut class PreActResNet(nn.Module): """Pre-activation ResNet.""" def __init__(self, num_classes: int = 10, depth: int = 18, width: int = 0, # Used to make the constructor consistent. activation_fn: nn.Module = nn.ReLU, mean: Union[Tuple[float, ...], float] = CIFAR10_MEAN, std: Union[Tuple[float, ...], float] = CIFAR10_STD, padding: int = 0, num_input_channels: int = 3): super().__init__() if width != 0: raise ValueError('Unsupported `width`.') self.mean = torch.tensor(mean).view(num_input_channels, 1, 1) self.std = torch.tensor(std).view(num_input_channels, 1, 1) self.mean_cuda = None self.std_cuda = None self.padding = padding self.conv_2d = nn.Conv2d(num_input_channels, 64, kernel_size=3, stride=1, padding=1, bias=False) if depth == 18: num_blocks = (2, 2, 2, 2) elif depth == 34: num_blocks = (3, 4, 6, 3) else: raise ValueError('Unsupported `depth`.') self.layer_0 = self._make_layer(64, 64, num_blocks[0], 1, activation_fn) self.layer_1 = self._make_layer(64, 128, num_blocks[1], 2, activation_fn) self.layer_2 = self._make_layer(128, 256, num_blocks[2], 2, activation_fn) self.layer_3 = self._make_layer(256, 512, num_blocks[3], 2, activation_fn) self.batchnorm = nn.BatchNorm2d(512) self.relu = activation_fn() self.logits = nn.Linear(512, num_classes) def _make_layer(self, in_planes, out_planes, num_blocks, stride, activation_fn): layers = [] for i, stride in enumerate([stride] + [1] * (num_blocks - 1)): layers.append( _PreActBlock(i == 0 and in_planes or out_planes, out_planes, stride, activation_fn)) return nn.Sequential(*layers) def forward(self, x): if self.padding > 0: x = F.pad(x, (self.padding,) * 4) if x.is_cuda: if self.mean_cuda is None: self.mean_cuda = self.mean.cuda() self.std_cuda = self.std.cuda() out = (x - self.mean_cuda) / self.std_cuda else: out = (x - self.mean) / self.std out = self.conv_2d(out) out = self.layer_0(out) out = self.layer_1(out) out = self.layer_2(out) out = self.layer_3(out) out = self.relu(self.batchnorm(out)) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), -1) return self.logits(out)
deepmind-research-master
adversarial_robustness/pytorch/model_zoo.py
# Copyright 2020 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Evaluates a PyTorch checkpoint on CIFAR-10/100 or MNIST.""" from absl import app from absl import flags import torch from torch.utils import data from torchvision import datasets from torchvision import transforms import tqdm from adversarial_robustness.pytorch import model_zoo _CKPT = flags.DEFINE_string( 'ckpt', None, 'Path to checkpoint.') _DATASET = flags.DEFINE_enum( 'dataset', 'cifar10', ['cifar10', 'cifar100', 'mnist'], 'Dataset on which the checkpoint is evaluated.') _WIDTH = flags.DEFINE_integer( 'width', 16, 'Width of WideResNet (if set to zero uses a PreActResNet).') _DEPTH = flags.DEFINE_integer( 'depth', 70, 'Depth of WideResNet or PreActResNet.') _USE_CUDA = flags.DEFINE_boolean( 'use_cuda', True, 'Whether to use CUDA.') _BATCH_SIZE = flags.DEFINE_integer( 'batch_size', 100, 'Batch size.') _NUM_BATCHES = flags.DEFINE_integer( 'num_batches', 0, 'Number of batches to evaluate (zero means the whole dataset).') def main(unused_argv): print(f'Loading "{_CKPT.value}"') # Create model and dataset. if _WIDTH.value == 0: print(f'Using a PreActResNet with depth {_DEPTH.value}.') model_ctor = model_zoo.PreActResNet else: print(f'Using a WideResNet with depth {_DEPTH.value} and width ' f'{_WIDTH.value}.') model_ctor = model_zoo.WideResNet if _DATASET.value == 'mnist': model = model_ctor( num_classes=10, depth=_DEPTH.value, width=_WIDTH.value, activation_fn=model_zoo.Swish, mean=.5, std=.5, padding=2, num_input_channels=1) dataset_fn = datasets.MNIST elif _DATASET.value == 'cifar10': model = model_ctor( num_classes=10, depth=_DEPTH.value, width=_WIDTH.value, activation_fn=model_zoo.Swish, mean=model_zoo.CIFAR10_MEAN, std=model_zoo.CIFAR10_STD) dataset_fn = datasets.CIFAR10 else: assert _DATASET.value == 'cifar100' model = model_ctor( num_classes=100, depth=_DEPTH.value, width=_WIDTH.value, activation_fn=model_zoo.Swish, mean=model_zoo.CIFAR100_MEAN, std=model_zoo.CIFAR100_STD) dataset_fn = datasets.CIFAR100 # Load model. if _CKPT.value != 'dummy': params = torch.load(_CKPT.value) model.load_state_dict(params) if _USE_CUDA.value: model.cuda() model.eval() print('Successfully loaded.') # Load dataset. transform_chain = transforms.Compose([transforms.ToTensor()]) ds = dataset_fn(root='/tmp/data', train=False, transform=transform_chain, download=True) test_loader = data.DataLoader(ds, batch_size=_BATCH_SIZE.value, shuffle=False, num_workers=0) # Evaluation. correct = 0 total = 0 batch_count = 0 total_batches = min((10_000 - 1) // _BATCH_SIZE.value + 1, _NUM_BATCHES.value) with torch.no_grad(): for images, labels in tqdm.tqdm(test_loader, total=total_batches): outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() batch_count += 1 if _NUM_BATCHES.value > 0 and batch_count >= _NUM_BATCHES.value: break print(f'Accuracy on the {total} test images: {100 * correct / total:.2f}%') if __name__ == '__main__': flags.mark_flag_as_required('ckpt') app.run(main)
deepmind-research-master
adversarial_robustness/pytorch/eval.py
# Copyright 2020 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """WideResNet implementation in JAX using Haiku.""" from typing import Any, Dict, Optional import chex import haiku as hk import jax import jax.numpy as jnp class _WideResNetBlock(hk.Module): """Block of a WideResNet.""" def __init__(self, num_filters, stride=1, projection_shortcut=False, activation=jax.nn.relu, norm_args=None, name=None): super().__init__(name=name) num_bottleneck_layers = 1 self._activation = activation if norm_args is None: norm_args = { 'create_offset': False, 'create_scale': True, 'decay_rate': .99, } self._bn_modules = [] self._conv_modules = [] for i in range(num_bottleneck_layers + 1): s = stride if i == 0 else 1 self._bn_modules.append(hk.BatchNorm( name='batchnorm_{}'.format(i), **norm_args)) self._conv_modules.append(hk.Conv2D( output_channels=num_filters, padding='SAME', kernel_shape=(3, 3), stride=s, with_bias=False, name='conv_{}'.format(i))) # pytype: disable=not-callable if projection_shortcut: self._shortcut = hk.Conv2D( output_channels=num_filters, kernel_shape=(1, 1), stride=stride, with_bias=False, name='shortcut') # pytype: disable=not-callable else: self._shortcut = None def __call__(self, inputs, **norm_kwargs): x = inputs orig_x = inputs for i, (bn, conv) in enumerate(zip(self._bn_modules, self._conv_modules)): x = bn(x, **norm_kwargs) x = self._activation(x) if self._shortcut is not None and i == 0: orig_x = x x = conv(x) if self._shortcut is not None: shortcut_x = self._shortcut(orig_x) x += shortcut_x else: x += orig_x return x class WideResNet(hk.Module): """WideResNet designed for CIFAR-10.""" def __init__(self, num_classes: int = 10, depth: int = 28, width: int = 10, activation: str = 'relu', norm_args: Optional[Dict[str, Any]] = None, name: Optional[str] = None): super(WideResNet, self).__init__(name=name) if (depth - 4) % 6 != 0: raise ValueError('depth should be 6n+4.') self._activation = getattr(jax.nn, activation) if norm_args is None: norm_args = { 'create_offset': True, 'create_scale': True, 'decay_rate': .99, } self._conv = hk.Conv2D( output_channels=16, kernel_shape=(3, 3), stride=1, with_bias=False, name='init_conv') # pytype: disable=not-callable self._bn = hk.BatchNorm( name='batchnorm', **norm_args) self._linear = hk.Linear( num_classes, w_init=jnp.zeros, name='logits') blocks_per_layer = (depth - 4) // 6 filter_sizes = [width * n for n in [16, 32, 64]] self._blocks = [] for layer_num, filter_size in enumerate(filter_sizes): blocks_of_layer = [] for i in range(blocks_per_layer): stride = 2 if (layer_num != 0 and i == 0) else 1 projection_shortcut = (i == 0) blocks_of_layer.append(_WideResNetBlock( num_filters=filter_size, stride=stride, projection_shortcut=projection_shortcut, activation=self._activation, norm_args=norm_args, name='resnet_lay_{}_block_{}'.format(layer_num, i))) self._blocks.append(blocks_of_layer) def __call__(self, inputs: chex.Array, **norm_kwargs) -> chex.Array: net = inputs net = self._conv(net) # Blocks. for blocks_of_layer in self._blocks: for block in blocks_of_layer: net = block(net, **norm_kwargs) net = self._bn(net, **norm_kwargs) net = self._activation(net) net = jnp.mean(net, axis=[1, 2]) return self._linear(net)
deepmind-research-master
adversarial_robustness/jax/model_zoo.py
# Copyright 2021 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Quick script to test that experiment can import and run.""" from absl import app import jax import jax.numpy as jnp from jaxline import utils as jl_utils from adversarial_robustness.jax import experiment @jl_utils.disable_pmap_jit def test_experiment(unused_argv): """Tests the main experiment.""" config = experiment.get_config() exp_config = config.experiment_kwargs.config exp_config.dry_run = True exp_config.emulated_workers = 0 exp_config.training.batch_size = 2 exp_config.evaluation.batch_size = 2 exp_config.model.kwargs.depth = 10 exp_config.model.kwargs.width = 1 xp = experiment.Experiment('train', exp_config, jax.random.PRNGKey(0)) bcast = jax.pmap(lambda x: x) global_step = bcast(jnp.zeros(jax.local_device_count())) rng = bcast(jnp.stack([jax.random.PRNGKey(0)] * jax.local_device_count())) print('Taking a single experiment step for test purposes!') result = xp.step(global_step, rng) print(f'Step successfully taken, resulting metrics are {result}') if __name__ == '__main__': app.run(test_experiment)
deepmind-research-master
adversarial_robustness/jax/experiment_test.py
# Copyright 2021 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Datasets.""" from typing import Sequence import chex import jax import jax.numpy as jnp import numpy as np import tensorflow.compat.v2 as tf import tensorflow_datasets as tfds _CIFAR10_MEAN = (0.4914, 0.4822, 0.4465) _CIFAR10_STD = (0.2471, 0.2435, 0.2616) _CIFAR100_MEAN = (0.5071, 0.4865, 0.4409) _CIFAR100_STD = (0.2673, 0.2564, 0.2762) _DATA_URL = 'https://storage.googleapis.com/dm-adversarial-robustness/' _ALLOWED_FILES = ('cifar10_ddpm.npz',) _WEBPAGE = ('https://github.com/deepmind/deepmind-research/tree/master/' 'adversarial_robustness') def cifar10_preprocess(mode: str = 'train'): """Preprocessing functions for CIFAR-10.""" def _preprocess_fn_train(example): """Preprocessing of CIFAR-10 images for training.""" image = example['image'] image = tf.image.convert_image_dtype(image, dtype=tf.float32) image = _random_jitter(image, pad=4, crop=32) image = tf.image.random_flip_left_right(image) label = tf.cast(example['label'], tf.int32) return {'image': image, 'label': label} def _preprocess_fn_test(example): """Preprocessing of CIFAR-10 images for testing.""" image = example['image'] image = tf.image.convert_image_dtype(image, dtype=tf.float32) label = tf.cast(example['label'], tf.int32) return {'image': image, 'label': label} return _preprocess_fn_train if mode == 'train' else _preprocess_fn_test def cifar10_normalize(image: chex.Array) -> chex.Array: means = jnp.array(_CIFAR10_MEAN, dtype=image.dtype) stds = jnp.array(_CIFAR10_STD, dtype=image.dtype) return (image - means) / stds def mnist_normalize(image: chex.Array) -> chex.Array: image = jnp.pad(image, ((0, 0), (2, 2), (2, 2), (0, 0)), 'constant', constant_values=0) return (image - .5) * 2. def cifar100_normalize(image: chex.Array) -> chex.Array: means = jnp.array(_CIFAR100_MEAN, dtype=image.dtype) stds = jnp.array(_CIFAR100_STD, dtype=image.dtype) return (image - means) / stds def load_cifar10(batch_sizes: Sequence[int], subset: str = 'train', is_training: bool = True, drop_remainder: bool = True, repeat: int = 1) -> tf.data.Dataset: """Loads CIFAR-10.""" if subset == 'train': ds = tfds.load(name='cifar10', split=tfds.Split.TRAIN) # In Gowal et al. (https://arxiv.org/abs/2010.03593) and Rebuffi et al. # (https://arxiv.org/abs/2103.01946), we also keep a separate validation # subset for early stopping and would run: ds = ds.skip(1_024). elif subset == 'test': ds = tfds.load(name='cifar10', split=tfds.Split.TEST) else: raise ValueError('Unknown subset: "{}"'.format(subset)) ds = ds.cache() if is_training: ds = ds.repeat() ds = ds.shuffle(buffer_size=50_000, seed=0) ds = _repeat_batch(batch_sizes, ds, repeat=repeat) ds = ds.map(cifar10_preprocess('train' if is_training else 'test'), num_parallel_calls=tf.data.AUTOTUNE) for batch_size in reversed(batch_sizes): ds = ds.batch(batch_size, drop_remainder=drop_remainder) return ds.prefetch(tf.data.AUTOTUNE) def load_extra(batch_sizes: Sequence[int], path_npz: str, is_training: bool = True, drop_remainder: bool = True) -> tf.data.Dataset: """Loads extra data from a given path.""" if not tf.io.gfile.exists(path_npz): if path_npz in _ALLOWED_FILES: path_npz = tf.keras.utils.get_file(path_npz, _DATA_URL + path_npz) else: raise ValueError(f'Extra data not found ({path_npz}). See {_WEBPAGE} for ' 'more details.') with tf.io.gfile.GFile(path_npz, 'rb') as fp: npzfile = np.load(fp) data = {'image': npzfile['image'], 'label': npzfile['label']} with tf.device('/device:cpu:0'): # Prevent allocation to happen on GPU. ds = tf.data.Dataset.from_tensor_slices(data) ds = ds.cache() if is_training: ds = ds.repeat() ds = ds.shuffle(buffer_size=50_000, seed=jax.host_id()) ds = ds.map(cifar10_preprocess('train' if is_training else 'test'), num_parallel_calls=tf.data.AUTOTUNE) for batch_size in reversed(batch_sizes): ds = ds.batch(batch_size, drop_remainder=drop_remainder) return ds.prefetch(tf.data.AUTOTUNE) def load_dummy_data(batch_sizes: Sequence[int], is_training: bool = True, **unused_kwargs) -> tf.data.Dataset: """Loads fictive data (use this function when testing).""" ds = tf.data.Dataset.from_tensor_slices({ 'image': np.zeros((1, 32, 32, 3), np.float32), 'label': np.zeros((1,), np.int32), }) ds = ds.repeat() if not is_training: total_batch_size = np.prod(batch_sizes) ds = ds.take(total_batch_size) ds = ds.map(cifar10_preprocess('train' if is_training else 'test'), num_parallel_calls=tf.data.AUTOTUNE) for batch_size in reversed(batch_sizes): ds = ds.batch(batch_size, drop_remainder=True) return ds.prefetch(tf.data.AUTOTUNE) def _random_jitter(image: tf.Tensor, pad: int, crop: int) -> tf.Tensor: shape = image.shape.as_list() image = tf.pad(image, [[pad, pad], [pad, pad], [0, 0]]) image = tf.image.random_crop(image, size=[crop, crop, shape[2]]) return image def _repeat_batch(batch_sizes: Sequence[int], ds: tf.data.Dataset, repeat: int = 1) -> tf.data.Dataset: """Tiles the inner most batch dimension.""" if repeat <= 1: return ds if batch_sizes[-1] % repeat != 0: raise ValueError(f'The last element of `batch_sizes` ({batch_sizes}) must ' f'be divisible by `repeat` ({repeat}).') # Perform regular batching with reduced number of elements. for i, batch_size in enumerate(reversed(batch_sizes)): ds = ds.batch(batch_size // repeat if i == 0 else batch_size, drop_remainder=True) # Repeat batch. fn = lambda x: tf.repeat(x, repeats=repeat, axis=len(batch_sizes) - 1) def repeat_inner_batch(example): return jax.tree_map(fn, example) ds = ds.map(repeat_inner_batch, num_parallel_calls=tf.data.AUTOTUNE) # Unbatch. for _ in batch_sizes: ds = ds.unbatch() return ds
deepmind-research-master
adversarial_robustness/jax/datasets.py
# Copyright 2021 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """JAXline experiment to perform robust adversarial training.""" import functools import os from typing import Callable, Optional, Tuple from absl import flags from absl import logging import chex import haiku as hk import jax import jax.numpy as jnp from jaxline import base_config from jaxline import experiment from jaxline import utils as jl_utils from ml_collections import config_dict import numpy as np import optax import tensorflow.compat.v2 as tf import tensorflow_datasets as tfds from adversarial_robustness.jax import attacks from adversarial_robustness.jax import datasets from adversarial_robustness.jax import model_zoo from adversarial_robustness.jax import utils FLAGS = flags.FLAGS def get_config(): """Return config object for training.""" config = base_config.get_base_config() # Batch size, training steps and data. num_classes = 10 num_epochs = 400 # Gowal et al. (2020) and Rebuffi et al. (2021) use 1024 as batch size. # Reducing this batch size may require further adjustments to the batch # normalization decay or the learning rate. If you have to use a batch size # of 256, reduce the number of emulated workers to 1 (it should match the # results of using a batch size of 1024 with 4 workers). train_batch_size = 1024 def steps_from_epochs(n): return max(int(n * 50_000 / train_batch_size), 1) num_steps = steps_from_epochs(num_epochs) test_batch_size = train_batch_size # Specify the path to the downloaded data. You can download data from # https://github.com/deepmind/deepmind-research/tree/master/adversarial_robustness. # If the path is set to "cifar10_ddpm.npz" and is not found in the current # directory, the corresponding data will be downloaded. extra_npz = 'cifar10_ddpm.npz' # Can be `None`. # Learning rate. learning_rate = .1 * max(train_batch_size / 256, 1.) learning_rate_warmup = steps_from_epochs(10) use_cosine_schedule = True if use_cosine_schedule: learning_rate_fn = utils.get_cosine_schedule(learning_rate, num_steps, learning_rate_warmup) else: learning_rate_fn = utils.get_step_schedule(learning_rate, num_steps, learning_rate_warmup) # Model definition. model_ctor = model_zoo.WideResNet model_kwargs = dict( num_classes=num_classes, depth=28, width=10, activation='swish') # Attack used during training (can be None). epsilon = 8 / 255 train_attack = attacks.UntargetedAttack( attacks.PGD( attacks.Adam(optax.piecewise_constant_schedule( init_value=.1, boundaries_and_scales={5: .1})), num_steps=10, initialize_fn=attacks.linf_initialize_fn(epsilon), project_fn=attacks.linf_project_fn(epsilon, bounds=(0., 1.))), loss_fn=attacks.untargeted_kl_divergence) # Attack used during evaluation (can be None). eval_attack = attacks.UntargetedAttack( attacks.PGD( attacks.Adam(learning_rate_fn=optax.piecewise_constant_schedule( init_value=.1, boundaries_and_scales={20: .1, 30: .01})), num_steps=40, initialize_fn=attacks.linf_initialize_fn(epsilon), project_fn=attacks.linf_project_fn(epsilon, bounds=(0., 1.))), loss_fn=attacks.untargeted_margin) config.experiment_kwargs = config_dict.ConfigDict(dict(config=dict( epsilon=epsilon, num_classes=num_classes, # Results from various publications use 4 worker machines, which results # in slight differences when using less worker machines. To compensate for # such discrepancies, we emulate these additional workers. Set to zero, # when using more than 4 workers. emulated_workers=4, dry_run=False, save_final_checkpoint_as_npy=True, model=dict( constructor=model_ctor, kwargs=model_kwargs), training=dict( batch_size=train_batch_size, learning_rate=learning_rate_fn, weight_decay=5e-4, swa_decay=.995, use_cutmix=False, supervised_batch_ratio=.3 if extra_npz is not None else 1., extra_data_path=extra_npz, extra_label_smoothing=.1, attack=train_attack), evaluation=dict( # If `interval` is positive, synchronously evaluate at regular # intervals. Setting it to zero will not evaluate while training, # unless `--jaxline_mode` is set to `train_eval_multithreaded`, which # asynchronously evaluates checkpoints. interval=steps_from_epochs(40), batch_size=test_batch_size, attack=eval_attack), ))) config.checkpoint_dir = '/tmp/jaxline/robust' config.train_checkpoint_all_hosts = False config.training_steps = num_steps config.interval_type = 'steps' config.log_train_data_interval = steps_from_epochs(.5) config.log_tensors_interval = steps_from_epochs(.5) config.save_checkpoint_interval = steps_from_epochs(40) config.eval_specific_checkpoint_dir = '' return config class Experiment(experiment.AbstractExperiment): """CIFAR-10 experiment.""" CHECKPOINT_ATTRS = { '_params': 'params', '_avg_params': 'avg_params', '_opt_state': 'opt_state', '_state': 'state', } def __init__(self, mode, config, init_rng): super().__init__(mode=mode) self.config = config self._params = None # Network weights. self._avg_params = None # Averaged network weights. self._state = None # Network state (e.g., batch statistics). self._opt_state = None # Optimizer state. # Build model. self.model = hk.transform_with_state(self._get_model()) if mode == 'train': self._initialize_training(init_rng) if self.config.evaluation.interval > 0: self._last_evaluation_scalars = {} self._initialize_evaluation() elif mode == 'eval': self._initialize_evaluation() elif mode == 'train_eval_multithreaded': self._initialize_training(init_rng) self._initialize_evaluation() else: raise ValueError(f'Unknown mode: "{mode}"') # _ _ # | |_ _ __ __ _(_)_ __ # | __| '__/ _` | | '_ \ # | |_| | | (_| | | | | | # \__|_| \__,_|_|_| |_| # def step(self, global_step, rng, *unused_args, **unused_kwargs): # Get next inputs. supervised_inputs = next(self.supervised_train_input) if self.extra_train_input is None: extra_inputs = None else: extra_inputs = next(self.extra_train_input) # Perform step. (self._params, self._avg_params, self._state, self._opt_state, scalars) = self.train_fn( params=self._params, avg_params=self._avg_params, state=self._state, opt_state=self._opt_state, global_step=global_step, supervised_inputs=supervised_inputs, extra_inputs=extra_inputs, rng=rng) scalars = jl_utils.get_first(scalars) # Save final checkpoint. if self.config.save_final_checkpoint_as_npy and not self.config.dry_run: global_step_value = jl_utils.get_first(global_step) if global_step_value == FLAGS.config.get('training_steps', 1) - 1: f_np = lambda x: np.array(jax.device_get(jl_utils.get_first(x))) np_params = jax.tree_map(f_np, self._avg_params or self._params) np_state = jax.tree_map(f_np, self._state) path_npy = os.path.join(FLAGS.config.checkpoint_dir, 'checkpoint.npy') with tf.io.gfile.GFile(path_npy, 'wb') as fp: np.save(fp, (np_params, np_state)) logging.info('Saved final checkpoint at %s', path_npy) # Run synchronous evaluation. if self.config.evaluation.interval <= 0: return scalars global_step_value = jl_utils.get_first(global_step) if (global_step_value % self.config.evaluation.interval != 0 and global_step_value != FLAGS.config.get('training_steps', 1) - 1): return _merge_eval_scalars(scalars, self._last_evaluation_scalars) logging.info('Running synchronous evaluation...') eval_scalars = self.evaluate(global_step, rng) f_list = lambda x: x.tolist() if isinstance(x, jnp.ndarray) else x self._last_evaluation_scalars = jax.tree_map(f_list, eval_scalars) logging.info('(eval) global_step: %d, %s', global_step_value, self._last_evaluation_scalars) return _merge_eval_scalars(scalars, self._last_evaluation_scalars) def _train_fn(self, params, avg_params, state, opt_state, global_step, supervised_inputs, extra_inputs, rng): scalars = {} images, labels, target_probs = self.concatenate(supervised_inputs, extra_inputs) # Apply CutMix. if self.config.training.use_cutmix: aug_rng, rng = jax.random.split(rng) images, target_probs = utils.cutmix(aug_rng, images, target_probs, split=self._repeat_batch) # Perform adversarial attack. if self.config.training.attack is None: adv_images = None grad_fn = jax.grad(self._cross_entropy_loss_fn, has_aux=True) else: attack = self.config.training.attack attack_rng, rng = jax.random.split(rng) def logits_fn(x): x = self.normalize_fn(x) return self.model.apply(params, state, rng, x, is_training=False, test_local_stats=True)[0] if attack.expects_labels(): if self.config.training.use_cutmix: raise ValueError('Use `untargeted_kl_divergence` when using CutMix.') target_labels = labels else: assert attack.expects_probabilities() if self.config.training.use_cutmix: # When using CutMix, regress the attack away from mixed labels. target_labels = target_probs else: target_labels = jax.nn.softmax(logits_fn(images)) adv_images = attack(logits_fn, attack_rng, images, target_labels) grad_fn = jax.grad(self._trades_loss_fn, has_aux=True) # Compute loss and gradients. scaled_grads, (state, loss_scalars) = grad_fn( params, state, images, adv_images, labels, target_probs, rng) grads = jax.lax.psum(scaled_grads, axis_name='i') scalars.update(loss_scalars) updates, opt_state = self.optimizer.update(grads, opt_state, params) params = optax.apply_updates(params, updates) # Stochastic weight averaging. if self.config.training.swa_decay > 0: avg_params = utils.ema_update(global_step, avg_params, params, decay_rate=self.config.training.swa_decay) learning_rate = self.config.training.learning_rate(global_step) scalars['learning_rate'] = learning_rate scalars = jax.lax.pmean(scalars, axis_name='i') return params, avg_params, state, opt_state, scalars def _cross_entropy_loss_fn(self, params, state, images, adv_images, labels, target_probs, rng): scalars = {} images = self.normalize_fn(images) logits, state = self.model.apply( params, state, rng, images, is_training=True) loss = jnp.mean(utils.cross_entropy(logits, target_probs)) loss += self.config.training.weight_decay * utils.weight_decay(params) if not self.config.training.use_cutmix: scalars['top_1_acc'] = utils.accuracy(logits, labels) scalars['train_loss'] = loss scaled_loss = loss / jax.device_count() return scaled_loss, (state, scalars) def _trades_loss_fn(self, params, state, images, adv_images, labels, target_probs, rng, beta=6.): """Calculates TRADES loss (https://arxiv.org/pdf/1901.08573).""" scalars = {} def apply_fn(x, **norm_kwargs): x = self.normalize_fn(x) return self.model.apply(params, state, rng, x, **norm_kwargs) # Clean images. clean_logits, _ = apply_fn(images, is_training=False, test_local_stats=True) if not self.config.training.use_cutmix: scalars['top_1_acc'] = utils.accuracy(clean_logits, labels) # Adversarial images. Update BN stats with adversarial images. adv_logits, state = apply_fn(adv_images, is_training=True) if not self.config.training.use_cutmix: scalars['top_1_adv_acc'] = utils.accuracy(adv_logits, labels) # Compute loss. clean_loss = jnp.mean(utils.cross_entropy(clean_logits, target_probs)) adv_loss = jnp.mean(utils.kl_divergence(adv_logits, clean_logits)) reg_loss = self.config.training.weight_decay * utils.weight_decay(params) loss = clean_loss + beta * adv_loss + reg_loss scalars['train_loss'] = loss scaled_loss = loss / jax.device_count() return scaled_loss, (state, scalars) # _ # _____ ____ _| | # / _ \ \ / / _` | | # | __/\ V / (_| | | # \___| \_/ \__,_|_| # def evaluate(self, global_step, rng, *unused_args, **unused_kwargs): scalars = self.eval_epoch(self._params, self._state, rng) if self._avg_params: avg_scalars = self.eval_epoch(self._avg_params or self._params, self._state, rng) for k, v in avg_scalars.items(): scalars[k + '_swa'] = v return scalars def eval_epoch(self, params, state, rng): host_id = jax.host_id() num_samples = 0 batch_axis = 1 summed_scalars = None # Converting to numpy here allows us to reset the generator. eval_input = tfds.as_numpy(self.eval_input) for all_inputs in eval_input: # The inputs are send to multiple workers. inputs = jax.tree_map(lambda x: x[host_id], all_inputs) num_samples += jax.device_count() * inputs['image'].shape[batch_axis] scalars = jl_utils.get_first(self.eval_fn(params, state, inputs, rng)) # Accumulate the sum of scalars for each step. scalars = jax.tree_map(lambda x: jnp.sum(x, axis=0), scalars) if summed_scalars is None: summed_scalars = scalars else: summed_scalars = jax.tree_multimap(jnp.add, summed_scalars, scalars) mean_scalars = jax.tree_map(lambda x: x / num_samples, summed_scalars) return mean_scalars def _eval_fn(self, params, state, inputs, rng): images = inputs['image'] labels = inputs['label'] attack_rng, rng = jax.random.split(rng) def logits_fn(x): x = self.normalize_fn(x) return self.model.apply(params, state, rng, x, is_training=False, test_local_stats=False)[0] # Clean accuracy. logits = logits_fn(images) predicted_label = jnp.argmax(logits, axis=-1) correct = jnp.equal(predicted_label, labels).astype(jnp.float32) scalars = {'top_1_acc': correct} # Adversarial accuracy. if self.config.evaluation.attack is not None: attack = self.config.evaluation.attack assert attack.expects_labels() adv_images = attack(logits_fn, attack_rng, images, labels) adv_logits = logits_fn(adv_images) predicted_label = jnp.argmax(adv_logits, axis=-1) correct = jnp.equal(predicted_label, labels).astype(jnp.float32) scalars['top_1_adv_acc'] = correct # Returned values will be summed and finally divided by num_samples. return jax.lax.psum(scalars, axis_name='i') def _initialize_training(self, rng): # Initialize inputs. if self.config.emulated_workers > 0: per_device_workers, ragged = divmod(self.config.emulated_workers, jax.host_count()) if ragged: raise ValueError('Number of emulated workers must be divisible by the ' 'number of physical workers `jax.host_count()`.') self._repeat_batch = per_device_workers else: self._repeat_batch = 1 self.supervised_train_input = jl_utils.py_prefetch( self._supervised_train_dataset) if self.config.training.extra_data_path is None: self.extra_train_input = None else: self.extra_train_input = jl_utils.py_prefetch( self._extra_train_dataset) self.normalize_fn = datasets.cifar10_normalize # Optimizer. self.optimizer = utils.sgd_momentum(self.config.training.learning_rate, momentum=.9, nesterov=True) # Initialize parameters. if self._params is None: logging.info('Initializing parameters randomly rather than restoring ' 'from checkpoint.') # Create inputs to initialize the network state. images, _, _ = jax.pmap(self.concatenate)( next(self.supervised_train_input), next(self.extra_train_input) if self.extra_train_input is not None else None) images = jax.pmap(self.normalize_fn)(images) # Initialize weights and biases. init_net = jax.pmap( lambda *a: self.model.init(*a, is_training=True), axis_name='i') init_rng = jl_utils.bcast_local_devices(rng) self._params, self._state = init_net(init_rng, images) # Setup weight averaging. if self.config.training.swa_decay > 0: self._avg_params = self._params else: self._avg_params = None # Initialize optimizer state. init_opt = jax.pmap(self.optimizer.init, axis_name='i') self._opt_state = init_opt(self._params) # Initialize step function. self.train_fn = jax.pmap(self._train_fn, axis_name='i', donate_argnums=(0, 1, 2, 3)) def _initialize_evaluation(self): load_fn = (datasets.load_dummy_data if self.config.dry_run else datasets.load_cifar10) self.eval_input = _dataset( functools.partial(load_fn, subset='test'), is_training=False, total_batch_size=self.config.evaluation.batch_size) self.normalize_fn = datasets.cifar10_normalize self.eval_fn = jax.pmap(self._eval_fn, axis_name='i') def _supervised_train_dataset(self) -> tfds.typing.Tree[np.ndarray]: """Creates the training dataset.""" load_fn = (datasets.load_dummy_data if self.config.dry_run else datasets.load_cifar10) load_fn = functools.partial(load_fn, subset='train', repeat=self._repeat_batch) ds = _dataset(load_fn, is_training=True, repeat=self._repeat_batch, total_batch_size=self.config.training.batch_size, ratio=self.config.training.supervised_batch_ratio) return tfds.as_numpy(ds) def _extra_train_dataset(self) -> tfds.typing.Tree[np.ndarray]: """Creates the training dataset.""" load_fn = (datasets.load_dummy_data if self.config.dry_run else datasets.load_extra) load_fn = functools.partial( load_fn, path_npz=self.config.training.extra_data_path) ds = _dataset( load_fn, is_training=True, repeat=self._repeat_batch, total_batch_size=self.config.training.batch_size, one_minus_ratio=self.config.training.supervised_batch_ratio) return tfds.as_numpy(ds) def _get_model(self) -> Callable[..., chex.Array]: config = self.config.model def forward_fn(inputs, **norm_kwargs): model_instance = config.constructor(**config.kwargs.to_dict()) return model_instance(inputs, **norm_kwargs) return forward_fn def concatenate( self, supervised_inputs: chex.ArrayTree, extra_inputs: chex.ArrayTree ) -> Tuple[chex.Array, chex.Array, chex.Array]: """Concatenate inputs.""" num_classes = self.config.num_classes supervised_images = supervised_inputs['image'] supervised_labels = supervised_inputs['label'] if extra_inputs is None: images = supervised_images labels = supervised_labels target_probs = hk.one_hot(labels, num_classes) else: extra_images = extra_inputs['image'] images = jnp.concatenate([supervised_images, extra_images], axis=0) extra_labels = extra_inputs['label'] labels = jnp.concatenate([supervised_labels, extra_labels], axis=0) supervised_one_hot_labels = hk.one_hot(supervised_labels, num_classes) extra_one_hot_labels = hk.one_hot(extra_labels, num_classes) if self.config.training.extra_label_smoothing > 0: pos = 1. - self.config.training.extra_label_smoothing neg = self.config.training.extra_label_smoothing / num_classes extra_one_hot_labels = pos * extra_one_hot_labels + neg target_probs = jnp.concatenate( [supervised_one_hot_labels, extra_one_hot_labels], axis=0) return images, labels, target_probs def _dataset(load_fn, is_training: bool, total_batch_size: int, ratio: Optional[float] = None, one_minus_ratio: Optional[float] = None, repeat: int = 1) -> tf.data.Dataset: """Creates a dataset.""" num_devices = jax.device_count() per_device_batch_size, ragged = divmod(total_batch_size, num_devices) if ragged: raise ValueError( f'Global batch size {total_batch_size} must be divisible by the ' f'total number of devices {num_devices}') if repeat > 1: if per_device_batch_size % repeat: raise ValueError( f'Per device batch size {per_device_batch_size} must be divisible ' f'by the number of repeated batches {repeat}') per_device_batch_size //= repeat if ratio is None and one_minus_ratio is None: pass # Use full batch size. elif one_minus_ratio is None: per_device_batch_size = max( 1, min(round(per_device_batch_size * ratio), per_device_batch_size - 1)) elif ratio is None: batch_size = max(1, min(round(per_device_batch_size * one_minus_ratio), per_device_batch_size - 1)) per_device_batch_size = per_device_batch_size - batch_size else: raise ValueError('Only one of `ratio` or `one_minus_ratio` must be ' 'specified') if repeat > 1: per_device_batch_size *= repeat # When testing, we need to batch data across all devices (not just local # devices). num_local_devices = jax.local_device_count() if is_training: batch_sizes = [num_local_devices, per_device_batch_size] else: num_hosts = jax.host_count() assert num_hosts * num_local_devices == num_devices batch_sizes = [num_hosts, num_local_devices, per_device_batch_size] return load_fn(batch_sizes, is_training=is_training) def _merge_eval_scalars(a, b): if b is None: return a for k, v in b.items(): a['eval_' + k] = v return a
deepmind-research-master
adversarial_robustness/jax/experiment.py
# Copyright 2021 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper functions.""" import re from typing import Optional, Sequence, Tuple import chex import einops import haiku as hk import jax import jax.numpy as jnp import optax def get_cosine_schedule( max_learning_rate: float, total_steps: int, warmup_steps: int = 0) -> optax.Schedule: """Builds a cosine decay schedule with initial warm-up.""" if total_steps < warmup_steps: return optax.linear_schedule(init_value=0., end_value=max_learning_rate, transition_steps=warmup_steps) return optax.join_schedules([ optax.linear_schedule(init_value=0., end_value=max_learning_rate, transition_steps=warmup_steps), optax.cosine_decay_schedule(init_value=max_learning_rate, decay_steps=total_steps - warmup_steps), ], [warmup_steps]) def get_step_schedule( max_learning_rate: float, total_steps: int, warmup_steps: int = 0) -> optax.Schedule: """Builds a step schedule with initial warm-up.""" if total_steps < warmup_steps: return optax.linear_schedule(init_value=0., end_value=max_learning_rate, transition_steps=warmup_steps) return optax.join_schedules([ optax.linear_schedule(init_value=0., end_value=max_learning_rate, transition_steps=warmup_steps), optax.piecewise_constant_schedule( init_value=max_learning_rate, boundaries_and_scales={total_steps * 2 // 3: .1}), ], [warmup_steps]) def sgd_momentum(learning_rate_fn: optax.Schedule, momentum: float = 0., nesterov: bool = False) -> optax.GradientTransformation: return optax.chain( optax.trace(decay=momentum, nesterov=nesterov), optax.scale_by_schedule(learning_rate_fn), optax.scale(-1.)) def cross_entropy(logits: chex.Array, labels: chex.Array) -> chex.Array: return -jnp.sum(labels * jax.nn.log_softmax(logits), axis=-1) def kl_divergence(q_logits: chex.Array, p_logits: chex.Array) -> chex.Array: """Compute the KL divergence.""" p_probs = jax.nn.softmax(p_logits) return cross_entropy(q_logits, p_probs) - cross_entropy(p_logits, p_probs) def accuracy(logits: chex.Array, labels: chex.Array) -> chex.Array: predicted_label = jnp.argmax(logits, axis=-1) correct = jnp.equal(predicted_label, labels).astype(jnp.float32) return jnp.sum(correct, axis=0) / logits.shape[0] def weight_decay(params: hk.Params, regex_match: Optional[Sequence[str]] = None, regex_ignore: Optional[Sequence[str]] = None) -> chex.Array: """Computes the L2 regularization loss.""" if regex_match is None: regex_match = ('.*w$', '.*b$') if regex_ignore is None: regex_ignore = ('.*batchnorm.*',) l2_norm = 0. for mod_name, mod_params in params.items(): for param_name, param in mod_params.items(): name = '/'.join([mod_name, param_name]) if (regex_match and all(not re.match(regex, name) for regex in regex_match)): continue if (regex_ignore and any(re.match(regex, name) for regex in regex_ignore)): continue l2_norm += jnp.sum(jnp.square(param)) return .5 * l2_norm def ema_update(step: chex.Array, avg_params: chex.ArrayTree, new_params: chex.ArrayTree, decay_rate: float = 0.99, warmup_steps: int = 0, dynamic_decay: bool = True) -> chex.ArrayTree: """Applies an exponential moving average.""" factor = (step >= warmup_steps).astype(jnp.float32) if dynamic_decay: # Uses TF-style EMA. delta = step - warmup_steps decay = jnp.minimum(decay_rate, (1. + delta) / (10. + delta)) else: decay = decay_rate decay *= factor def _weighted_average(p1, p2): d = decay.astype(p1.dtype) return (1 - d) * p1 + d * p2 return jax.tree_multimap(_weighted_average, new_params, avg_params) def cutmix(rng: chex.PRNGKey, images: chex.Array, labels: chex.Array, alpha: float = 1., beta: float = 1., split: int = 1) -> Tuple[chex.Array, chex.Array]: """Composing two images by inserting a patch into another image.""" batch_size, height, width, _ = images.shape split_batch_size = batch_size // split if split > 1 else batch_size # Masking bounding box. box_rng, lam_rng, rng = jax.random.split(rng, num=3) lam = jax.random.beta(lam_rng, a=alpha, b=beta, shape=()) cut_rat = jnp.sqrt(1. - lam) cut_w = jnp.array(width * cut_rat, dtype=jnp.int32) cut_h = jnp.array(height * cut_rat, dtype=jnp.int32) box_coords = _random_box(box_rng, height, width, cut_h, cut_w) # Adjust lambda. lam = 1. - (box_coords[2] * box_coords[3] / (height * width)) idx = jax.random.permutation(rng, split_batch_size) def _cutmix(x, y): images_a = x images_b = x[idx, :, :, :] y = lam * y + (1. - lam) * y[idx, :] x = _compose_two_images(images_a, images_b, box_coords) return x, y if split <= 1: return _cutmix(images, labels) # Apply CutMix separately on each sub-batch. This reverses the effect of # `repeat` in datasets. images = einops.rearrange(images, '(b1 b2) ... -> b1 b2 ...', b2=split) labels = einops.rearrange(labels, '(b1 b2) ... -> b1 b2 ...', b2=split) images, labels = jax.vmap(_cutmix, in_axes=1, out_axes=1)(images, labels) images = einops.rearrange(images, 'b1 b2 ... -> (b1 b2) ...', b2=split) labels = einops.rearrange(labels, 'b1 b2 ... -> (b1 b2) ...', b2=split) return images, labels def _random_box(rng: chex.PRNGKey, height: chex.Numeric, width: chex.Numeric, cut_h: chex.Array, cut_w: chex.Array) -> chex.Array: """Sample a random box of shape [cut_h, cut_w].""" height_rng, width_rng = jax.random.split(rng) i = jax.random.randint( height_rng, shape=(), minval=0, maxval=height, dtype=jnp.int32) j = jax.random.randint( width_rng, shape=(), minval=0, maxval=width, dtype=jnp.int32) bby1 = jnp.clip(i - cut_h // 2, 0, height) bbx1 = jnp.clip(j - cut_w // 2, 0, width) h = jnp.clip(i + cut_h // 2, 0, height) - bby1 w = jnp.clip(j + cut_w // 2, 0, width) - bbx1 return jnp.array([bby1, bbx1, h, w]) def _compose_two_images(images: chex.Array, image_permutation: chex.Array, bbox: chex.Array) -> chex.Array: """Inserting the second minibatch into the first at the target locations.""" def _single_compose_two_images(image1, image2): height, width, _ = image1.shape mask = _window_mask(bbox, (height, width)) return image1 * (1. - mask) + image2 * mask return jax.vmap(_single_compose_two_images)(images, image_permutation) def _window_mask(destination_box: chex.Array, size: Tuple[int, int]) -> jnp.ndarray: """Mask a part of the image.""" height_offset, width_offset, h, w = destination_box h_range = jnp.reshape(jnp.arange(size[0]), [size[0], 1, 1]) w_range = jnp.reshape(jnp.arange(size[1]), [1, size[1], 1]) return jnp.logical_and( jnp.logical_and(height_offset <= h_range, h_range < height_offset + h), jnp.logical_and(width_offset <= w_range, w_range < width_offset + w)).astype(jnp.float32)
deepmind-research-master
adversarial_robustness/jax/utils.py
# Copyright 2021 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Runs a JAXline experiment to perform robust adversarial training.""" import functools from absl import app from absl import flags from jaxline import platform import tensorflow.compat.v2 as tf from adversarial_robustness.jax import experiment if __name__ == '__main__': flags.mark_flag_as_required('config') try: tf.config.set_visible_devices([], 'GPU') # Prevent TF from using the GPU. except tf.errors.NotFoundError: pass app.run(functools.partial(platform.main, experiment.Experiment))
deepmind-research-master
adversarial_robustness/jax/train.py
# Copyright 2020 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Evaluates a JAX checkpoint on CIFAR-10/100 or MNIST.""" import functools from absl import app from absl import flags import haiku as hk import numpy as np import optax import tensorflow.compat.v2 as tf import tensorflow_datasets as tfds import tqdm from adversarial_robustness.jax import attacks from adversarial_robustness.jax import datasets from adversarial_robustness.jax import model_zoo _CKPT = flags.DEFINE_string( 'ckpt', None, 'Path to checkpoint.') _DATASET = flags.DEFINE_enum( 'dataset', 'cifar10', ['cifar10', 'cifar100', 'mnist'], 'Dataset on which the checkpoint is evaluated.') _WIDTH = flags.DEFINE_integer( 'width', 16, 'Width of WideResNet.') _DEPTH = flags.DEFINE_integer( 'depth', 70, 'Depth of WideResNet.') _BATCH_SIZE = flags.DEFINE_integer( 'batch_size', 100, 'Batch size.') _NUM_BATCHES = flags.DEFINE_integer( 'num_batches', 0, 'Number of batches to evaluate (zero means the whole dataset).') def main(unused_argv): print(f'Loading "{_CKPT.value}"') print(f'Using a WideResNet with depth {_DEPTH.value} and width ' f'{_WIDTH.value}.') # Create dataset. if _DATASET.value == 'mnist': _, data_test = tf.keras.datasets.mnist.load_data() normalize_fn = datasets.mnist_normalize elif _DATASET.value == 'cifar10': _, data_test = tf.keras.datasets.cifar10.load_data() normalize_fn = datasets.cifar10_normalize else: assert _DATASET.value == 'cifar100' _, data_test = tf.keras.datasets.cifar100.load_data() normalize_fn = datasets.cifar100_normalize # Create model. @hk.transform_with_state def model_fn(x, is_training=False): model = model_zoo.WideResNet( num_classes=10, depth=_DEPTH.value, width=_WIDTH.value, activation='swish') return model(normalize_fn(x), is_training=is_training) # Build dataset. images, labels = data_test samples = (images.astype(np.float32) / 255., np.squeeze(labels, axis=-1).astype(np.int64)) data = tf.data.Dataset.from_tensor_slices(samples).batch(_BATCH_SIZE.value) test_loader = tfds.as_numpy(data) # Load model parameters. rng_seq = hk.PRNGSequence(0) if _CKPT.value == 'dummy': for images, _ in test_loader: break params, state = model_fn.init(next(rng_seq), images, is_training=True) # Reset iterator. test_loader = tfds.as_numpy(data) else: params, state = np.load(_CKPT.value, allow_pickle=True) # Create adversarial attack. We run a PGD-40 attack with margin loss. epsilon = 8 / 255 eval_attack = attacks.UntargetedAttack( attacks.PGD( attacks.Adam(learning_rate_fn=optax.piecewise_constant_schedule( init_value=.1, boundaries_and_scales={20: .1, 30: .01})), num_steps=40, initialize_fn=attacks.linf_initialize_fn(epsilon), project_fn=attacks.linf_project_fn(epsilon, bounds=(0., 1.))), loss_fn=attacks.untargeted_margin) def logits_fn(x, rng): return model_fn.apply(params, state, rng, x)[0] # Evaluation. correct = 0 adv_correct = 0 total = 0 batch_count = 0 total_batches = min((10_000 - 1) // _BATCH_SIZE.value + 1, _NUM_BATCHES.value) for images, labels in tqdm.tqdm(test_loader, total=total_batches): rng = next(rng_seq) loop_logits_fn = functools.partial(logits_fn, rng=rng) # Clean examples. outputs = loop_logits_fn(images) correct += (np.argmax(outputs, 1) == labels).sum().item() # Adversarial examples. adv_images = eval_attack(loop_logits_fn, next(rng_seq), images, labels) outputs = loop_logits_fn(adv_images) predicted = np.argmax(outputs, 1) adv_correct += (predicted == labels).sum().item() total += labels.shape[0] batch_count += 1 if _NUM_BATCHES.value > 0 and batch_count >= _NUM_BATCHES.value: break print(f'Accuracy on the {total} test images: {100 * correct / total:.2f}%') print(f'Robust accuracy: {100 * adv_correct / total:.2f}%') if __name__ == '__main__': flags.mark_flag_as_required('ckpt') try: tf.config.set_visible_devices([], 'GPU') # Prevent TF from using the GPU. except tf.errors.NotFoundError: pass app.run(main)
deepmind-research-master
adversarial_robustness/jax/eval.py
# Copyright 2021 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Adversarial attacks. This file contains all the code necessary to create untargeted adversarial attacks in JAX (within an l-infinity ball). For example, to create an untargeted FGSM attack (with a single step), one can do the following: ``` import attacks epsilon = 8/255 # Perturbation radius for inputs between 0 and 1. fgsm_attack = attacks.UntargetedAttack( attacks.PGD( attacks.IteratedFGSM(epsilon), num_steps=1, initialize_fn=attacks.linf_initialize_fn(epsilon), project_fn=attacks.linf_project_fn(epsilon, bounds=(0., 1.))), loss_fn=attacks.untargeted_cross_entropy) ``` Just as elegantly, one can specify an adversarial attack on KL-divergence to a target distribution (using 10 steps with Adam and a piecewise constant step schedule): ``` kl_attack_with_adam = attacks.UntargetedAttack( attacks.PGD( attacks.Adam(optax.piecewise_constant_schedule( init_value=.1, boundaries_and_scales={5: .1})), num_steps=10, initialize_fn=attacks.linf_initialize_fn(epsilon), project_fn=attacks.linf_project_fn(epsilon, bounds=(0., 1.))), loss_fn=attacks.untargeted_kl_divergence) ``` The attack instances can be used later on to build adversarial examples: ``` my_model = ... # Model. We assume that 'my_model(.)' returns logits. clean_images, image_labels = ... # Batch of images and associated labels. rng = jax.random.PRNGKey(0) # A random generator state. adversarial_images = fgsm_attack(my_model, rng, clean_images, image_labels) ``` See `experiment.py` or `eval.py` for more examples. This file contains the following components: * Losses: * untargeted_cross_entropy: minimizes the likelihood of the label class. * untargeted_kl_divergence: maximizes the KL-divergence of the predictions with a target distribution. * untargeted_margin: maximizes the margin loss (distance from the highest non-true logits to the label class logit) * Step optimizers: * SGD: Stochastic Gradient Descent. * IteratedFGSM: Also called BIM (see https://arxiv.org/pdf/1607.02533). * Adam: See https://arxiv.org/pdf/1412.6980. * Initialization and projection functions: * linf_initialize_fn: Initialize function for l-infinity attacks. * linf_project_fn: Projection function for l-infinity attacks. * Projected Gradient Descent (PGD): * PGD: Runs Projected Gradient Descent using the specified optimizer, initialization and projection functions for a given number of steps. * Untargeted attack: * UntargetedAttack: Combines PGD and a specific loss function to find adversarial examples. """ import functools import inspect from typing import Callable, Optional, Tuple, Union import chex import haiku as hk import jax import jax.numpy as jnp import optax ModelFn = Callable[[chex.Array], chex.Array] LossFn = Callable[[chex.Array], chex.Array] ClassificationLossFn = Callable[[chex.Array, chex.Array], chex.Array] OptimizeFn = Callable[[LossFn, chex.PRNGKey, chex.Array], chex.Array] NormalizeFn = Callable[[chex.Array], chex.Array] InitializeFn = Callable[[chex.PRNGKey, chex.Array], chex.Array] ProjectFn = Callable[[chex.Array, chex.Array], chex.Array] def untargeted_cross_entropy(logits: chex.Array, labels: chex.Array) -> chex.Array: """Maximize the cross-entropy of the true class (make it less likely).""" num_classes = logits.shape[-1] log_probs = jax.nn.log_softmax(logits) return jnp.sum( hk.one_hot(labels, num_classes).astype(logits.dtype) * log_probs, axis=-1) def untargeted_kl_divergence(logits: chex.Array, label_probs: chex.Array) -> chex.Array: """Maximize the KL divergence between logits and label distribution.""" # We are explicitly maximizing the cross-entropy, as this is equivalent to # maximizing the KL divergence (when `label_probs` does not depend # on the values that produce `logits`). log_probs = jax.nn.log_softmax(logits) return jnp.sum(label_probs * log_probs, axis=-1) def untargeted_margin(logits: chex.Array, labels: chex.Array) -> chex.Array: """Make the highest non-correct logits higher than the true class logits.""" batch_size = logits.shape[0] num_classes = logits.shape[-1] label_logits = logits[jnp.arange(batch_size), labels] logit_mask = hk.one_hot(labels, num_classes).astype(logits.dtype) highest_logits = jnp.max(logits - 1e8 * logit_mask, axis=-1) return label_logits - highest_logits class UntargetedAttack: """Performs an untargeted attack.""" def __init__(self, optimize_fn: OptimizeFn, loss_fn: ClassificationLossFn = untargeted_cross_entropy): """Creates an untargeted attack. Args: optimize_fn: An `Optimizer` instance or any callable that takes a loss function and an initial input and outputs a new input that minimizes the loss function. loss_fn: `loss_fn` is a surrogate loss. Its goal should be make the true class less likely than any other class. Typical options for `loss_fn` are `untargeted_cross_entropy` or `untargeted_margin`. """ self._optimize_fn = optimize_fn self._loss_fn = loss_fn def __call__(self, logits_fn: ModelFn, rng: chex.PRNGKey, inputs: chex.Array, labels: chex.Array) -> chex.Array: """Returns adversarial inputs.""" def _loss_fn(x): return self._loss_fn(logits_fn(x), labels) return self._optimize_fn(_loss_fn, rng, inputs) # Convenience functions to detect the type of inputs required by the loss. def expects_labels(self): return 'labels' in inspect.getfullargspec(self._loss_fn).args def expects_probabilities(self): return 'label_probs' in inspect.getfullargspec(self._loss_fn).args class StepOptimizer: """Makes a single gradient step that minimizes a loss function.""" def __init__(self, gradient_transformation: optax.GradientTransformation): self._gradient_transformation = gradient_transformation def init(self, loss_fn: LossFn, x: chex.Array) -> optax.OptState: self._loss_fn = loss_fn return self._gradient_transformation.init(x) def minimize( self, x: chex.Array, state: optax.OptState) -> Tuple[chex.Array, chex.Array, optax.OptState]: """Performs a single minimization step.""" g, loss = gradients_fn(self._loss_fn, x) if g is None: raise ValueError('loss_fn does not depend on input.') updates, state = self._gradient_transformation.update(g, state, x) return optax.apply_updates(x, updates), loss, state class SGD(StepOptimizer): """Vanilla gradient descent optimizer.""" def __init__(self, learning_rate_fn: Union[float, int, optax.Schedule], normalize_fn: Optional[NormalizeFn] = None): # Accept schedules, as well as scalar values. if isinstance(learning_rate_fn, (float, int)): lr = float(learning_rate_fn) learning_rate_fn = lambda _: lr # Normalization. def update_fn(updates, state, params=None): del params updates = jax.tree_map(normalize_fn or (lambda x: x), updates) return updates, state gradient_transformation = optax.chain( optax.GradientTransformation(lambda _: optax.EmptyState(), update_fn), optax.scale_by_schedule(learning_rate_fn), optax.scale(-1.)) super(SGD, self).__init__(gradient_transformation) class IteratedFGSM(SGD): """L-infinity normalized steps.""" def __init__(self, learning_rate_fn: Union[float, int, optax.Schedule]): super(IteratedFGSM, self).__init__(learning_rate_fn, jnp.sign) class Adam(StepOptimizer): """The Adam optimizer defined in https://arxiv.org/abs/1412.6980.""" def __init__( self, learning_rate_fn: Union[float, int, optax.Schedule], normalize_fn: Optional[NormalizeFn] = None, beta1: float = .9, beta2: float = .999, epsilon: float = 1e-9): # Accept schedules, as well as scalar values. if isinstance(learning_rate_fn, (float, int)): lr = float(learning_rate_fn) learning_rate_fn = lambda _: lr # Normalization. def update_fn(updates, state, params=None): del params updates = jax.tree_map(normalize_fn or (lambda x: x), updates) return updates, state gradient_transformation = optax.chain( optax.GradientTransformation(lambda _: optax.EmptyState(), update_fn), optax.scale_by_adam(b1=beta1, b2=beta2, eps=epsilon), optax.scale_by_schedule(learning_rate_fn), optax.scale(-1.)) super(Adam, self).__init__(gradient_transformation) class PGD: """Runs Project Gradient Descent (see https://arxiv.org/pdf/1706.06083).""" def __init__(self, optimizer: StepOptimizer, num_steps: int, initialize_fn: Optional[InitializeFn] = None, project_fn: Optional[ProjectFn] = None): self._optimizer = optimizer if initialize_fn is None: initialize_fn = lambda rng, x: x self._initialize_fn = initialize_fn if project_fn is None: project_fn = lambda x, origin_x: x self._project_fn = project_fn self._num_steps = num_steps def __call__(self, loss_fn: LossFn, rng: chex.PRNGKey, x: chex.Array) -> chex.Array: def _optimize(rng, x): """Optimizes loss_fn when keep_best is False.""" def body_fn(_, inputs): opt_state, current_x = inputs current_x, _, opt_state = self._optimizer.minimize(current_x, opt_state) current_x = self._project_fn(current_x, x) return opt_state, current_x opt_state = self._optimizer.init(loss_fn, x) current_x = self._project_fn(self._initialize_fn(rng, x), x) _, current_x = jax.lax.fori_loop(0, self._num_steps, body_fn, (opt_state, current_x)) return current_x return jax.lax.stop_gradient(_optimize(rng, x)) def linf_project_fn(epsilon: float, bounds: Tuple[float, float]) -> ProjectFn: def project_fn(x, origin_x): dx = jnp.clip(x - origin_x, -epsilon, epsilon) return jnp.clip(origin_x + dx, bounds[0], bounds[1]) return project_fn def linf_initialize_fn(epsilon: float) -> InitializeFn: def initialize_fn(rng, x): return x + jax.random.uniform(rng, x.shape, minval=-epsilon, maxval=epsilon).astype(x.dtype) return initialize_fn def gradients_fn(loss_fn: LossFn, x: chex.Array) -> Tuple[chex.Array, chex.Array]: """Returns the analytical gradient as computed by `jax.grad`.""" @functools.partial(jax.grad, has_aux=True) def grad_reduced_loss_fn(x): loss = loss_fn(x) return jnp.sum(loss), loss return grad_reduced_loss_fn(x)
deepmind-research-master
adversarial_robustness/jax/attacks.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Training and evaluation loops for an experiment.""" import time from typing import Any, Mapping, Text, Type, Union from absl import app from absl import flags from absl import logging import jax import numpy as np from byol import byol_experiment from byol import eval_experiment from byol.configs import byol as byol_config from byol.configs import eval as eval_config flags.DEFINE_string('experiment_mode', 'pretrain', 'The experiment, pretrain or linear-eval') flags.DEFINE_string('worker_mode', 'train', 'The mode, train or eval') flags.DEFINE_string('worker_tpu_driver', '', 'The tpu driver to use') flags.DEFINE_integer('pretrain_epochs', 1000, 'Number of pre-training epochs') flags.DEFINE_integer('batch_size', 4096, 'Total batch size') flags.DEFINE_string('checkpoint_root', '/tmp/byol', 'The directory to save checkpoints to.') flags.DEFINE_integer('log_tensors_interval', 60, 'Log tensors every n seconds.') FLAGS = flags.FLAGS Experiment = Union[ Type[byol_experiment.ByolExperiment], Type[eval_experiment.EvalExperiment]] def train_loop(experiment_class: Experiment, config: Mapping[Text, Any]): """The main training loop. This loop periodically saves a checkpoint to be evaluated in the eval_loop. Args: experiment_class: the constructor for the experiment (either byol_experiment or eval_experiment). config: the experiment config. """ experiment = experiment_class(**config) rng = jax.random.PRNGKey(0) step = 0 host_id = jax.host_id() last_logging = time.time() if config['checkpointing_config']['use_checkpointing']: checkpoint_data = experiment.load_checkpoint() if checkpoint_data is None: step = 0 else: step, rng = checkpoint_data local_device_count = jax.local_device_count() while step < config['max_steps']: step_rng, rng = tuple(jax.random.split(rng)) # Broadcast the random seeds across the devices step_rng_device = jax.random.split(step_rng, num=jax.device_count()) step_rng_device = step_rng_device[ host_id * local_device_count:(host_id + 1) * local_device_count] step_device = np.broadcast_to(step, [local_device_count]) # Perform a training step and get scalars to log. scalars = experiment.step(global_step=step_device, rng=step_rng_device) # Checkpointing and logging. if config['checkpointing_config']['use_checkpointing']: experiment.save_checkpoint(step, rng) current_time = time.time() if current_time - last_logging > FLAGS.log_tensors_interval: logging.info('Step %d: %s', step, scalars) last_logging = current_time step += 1 logging.info('Saving final checkpoint') logging.info('Step %d: %s', step, scalars) experiment.save_checkpoint(step, rng) def eval_loop(experiment_class: Experiment, config: Mapping[Text, Any]): """The main evaluation loop. This loop periodically loads a checkpoint and evaluates its performance on the test set, by calling experiment.evaluate. Args: experiment_class: the constructor for the experiment (either byol_experiment or eval_experiment). config: the experiment config. """ experiment = experiment_class(**config) last_evaluated_step = -1 while True: checkpoint_data = experiment.load_checkpoint() if checkpoint_data is None: logging.info('No checkpoint found. Waiting for 10s.') time.sleep(10) continue step, _ = checkpoint_data if step <= last_evaluated_step: logging.info('Checkpoint at step %d already evaluated, waiting.', step) time.sleep(10) continue host_id = jax.host_id() local_device_count = jax.local_device_count() step_device = np.broadcast_to(step, [local_device_count]) scalars = experiment.evaluate(global_step=step_device) if host_id == 0: # Only perform logging in one host. logging.info('Evaluation at step %d: %s', step, scalars) last_evaluated_step = step if last_evaluated_step >= config['max_steps']: return def main(_): if FLAGS.worker_tpu_driver: jax.config.update('jax_xla_backend', 'tpu_driver') jax.config.update('jax_backend_target', FLAGS.worker_tpu_driver) logging.info('Backend: %s %r', FLAGS.worker_tpu_driver, jax.devices()) if FLAGS.experiment_mode == 'pretrain': experiment_class = byol_experiment.ByolExperiment config = byol_config.get_config(FLAGS.pretrain_epochs, FLAGS.batch_size) elif FLAGS.experiment_mode == 'linear-eval': experiment_class = eval_experiment.EvalExperiment config = eval_config.get_config(f'{FLAGS.checkpoint_root}/pretrain.pkl', FLAGS.batch_size) else: raise ValueError(f'Unknown experiment mode: {FLAGS.experiment_mode}') config['checkpointing_config']['checkpoint_dir'] = FLAGS.checkpoint_root if FLAGS.worker_mode == 'train': train_loop(experiment_class, config) elif FLAGS.worker_mode == 'eval': eval_loop(experiment_class, config) if __name__ == '__main__': app.run(main)
deepmind-research-master
byol/main_loop.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BYOL pre-training implementation. Use this experiment to pre-train a self-supervised representation. """ import functools from typing import Any, Generator, Mapping, NamedTuple, Text, Tuple, Union from absl import logging from acme.jax import utils as acme_utils import haiku as hk import jax import jax.numpy as jnp import numpy as np import optax from byol.utils import augmentations from byol.utils import checkpointing from byol.utils import dataset from byol.utils import helpers from byol.utils import networks from byol.utils import optimizers from byol.utils import schedules # Type declarations. LogsDict = Mapping[Text, jnp.ndarray] class _ByolExperimentState(NamedTuple): """Byol's model and optimization parameters and state.""" online_params: hk.Params target_params: hk.Params online_state: hk.State target_state: hk.State opt_state: optimizers.LarsState class ByolExperiment: """Byol's training and evaluation component definition.""" def __init__( self, random_seed: int, num_classes: int, batch_size: int, max_steps: int, enable_double_transpose: bool, base_target_ema: float, network_config: Mapping[Text, Any], optimizer_config: Mapping[Text, Any], lr_schedule_config: Mapping[Text, Any], evaluation_config: Mapping[Text, Any], checkpointing_config: Mapping[Text, Any]): """Constructs the experiment. Args: random_seed: the random seed to use when initializing network weights. num_classes: the number of classes; used for the online evaluation. batch_size: the total batch size; should be a multiple of the number of available accelerators. max_steps: the number of training steps; used for the lr/target network ema schedules. enable_double_transpose: see dataset.py; only has effect on TPU. base_target_ema: the initial value for the ema decay rate of the target network. network_config: the configuration for the network. optimizer_config: the configuration for the optimizer. lr_schedule_config: the configuration for the learning rate schedule. evaluation_config: the evaluation configuration. checkpointing_config: the configuration for checkpointing. """ self._random_seed = random_seed self._enable_double_transpose = enable_double_transpose self._num_classes = num_classes self._lr_schedule_config = lr_schedule_config self._batch_size = batch_size self._max_steps = max_steps self._base_target_ema = base_target_ema self._optimizer_config = optimizer_config self._evaluation_config = evaluation_config # Checkpointed experiment state. self._byol_state = None # Input pipelines. self._train_input = None self._eval_input = None # build the transformed ops forward_fn = functools.partial(self._forward, **network_config) self.forward = hk.without_apply_rng(hk.transform_with_state(forward_fn)) # training can handle multiple devices, thus the pmap self.update_pmap = jax.pmap(self._update_fn, axis_name='i') # evaluation can only handle single device self.eval_batch_jit = jax.jit(self._eval_batch) self._checkpointer = checkpointing.Checkpointer(**checkpointing_config) def _forward( self, inputs: dataset.Batch, projector_hidden_size: int, projector_output_size: int, predictor_hidden_size: int, encoder_class: Text, encoder_config: Mapping[Text, Any], bn_config: Mapping[Text, Any], is_training: bool, ) -> Mapping[Text, jnp.ndarray]: """Forward application of byol's architecture. Args: inputs: A batch of data, i.e. a dictionary, with either two keys, (`images` and `labels`) or three keys (`view1`, `view2`, `labels`). projector_hidden_size: hidden size of the projector MLP. projector_output_size: output size of the projector and predictor MLPs. predictor_hidden_size: hidden size of the predictor MLP. encoder_class: type of the encoder (should match a class in utils/networks). encoder_config: passed to the encoder constructor. bn_config: passed to the hk.BatchNorm constructors. is_training: Training or evaluating the model? When True, inputs must contain keys `view1` and `view2`. When False, inputs must contain key `images`. Returns: All outputs of the model, i.e. a dictionary with projection, prediction and logits keys, for either the two views, or the image. """ encoder = getattr(networks, encoder_class) net = encoder( num_classes=None, # Don't build the final linear layer bn_config=bn_config, **encoder_config) projector = networks.MLP( name='projector', hidden_size=projector_hidden_size, output_size=projector_output_size, bn_config=bn_config) predictor = networks.MLP( name='predictor', hidden_size=predictor_hidden_size, output_size=projector_output_size, bn_config=bn_config) classifier = hk.Linear( output_size=self._num_classes, name='classifier') def apply_once_fn(images: jnp.ndarray, suffix: Text = ''): images = dataset.normalize_images(images) embedding = net(images, is_training=is_training) proj_out = projector(embedding, is_training) pred_out = predictor(proj_out, is_training) # Note the stop_gradient: label information is not leaked into the # main network. classif_out = classifier(jax.lax.stop_gradient(embedding)) outputs = {} outputs['projection' + suffix] = proj_out outputs['prediction' + suffix] = pred_out outputs['logits' + suffix] = classif_out return outputs if is_training: outputs_view1 = apply_once_fn(inputs['view1'], '_view1') outputs_view2 = apply_once_fn(inputs['view2'], '_view2') return {**outputs_view1, **outputs_view2} else: return apply_once_fn(inputs['images'], '') def _optimizer(self, learning_rate: float) -> optax.GradientTransformation: """Build optimizer from config.""" return optimizers.lars( learning_rate, weight_decay_filter=optimizers.exclude_bias_and_norm, lars_adaptation_filter=optimizers.exclude_bias_and_norm, **self._optimizer_config) def loss_fn( self, online_params: hk.Params, target_params: hk.Params, online_state: hk.State, target_state: hk.Params, rng: jnp.ndarray, inputs: dataset.Batch, ) -> Tuple[jnp.ndarray, Tuple[Mapping[Text, hk.State], LogsDict]]: """Compute BYOL's loss function. Args: online_params: parameters of the online network (the loss is later differentiated with respect to the online parameters). target_params: parameters of the target network. online_state: internal state of online network. target_state: internal state of target network. rng: random number generator state. inputs: inputs, containing two batches of crops from the same images, view1 and view2 and labels Returns: BYOL's loss, a mapping containing the online and target networks updated states after processing inputs, and various logs. """ if self._should_transpose_images(): inputs = dataset.transpose_images(inputs) inputs = augmentations.postprocess(inputs, rng) labels = inputs['labels'] online_network_out, online_state = self.forward.apply( params=online_params, state=online_state, inputs=inputs, is_training=True) target_network_out, target_state = self.forward.apply( params=target_params, state=target_state, inputs=inputs, is_training=True) # Representation loss # The stop_gradient is not necessary as we explicitly take the gradient with # respect to online parameters only in `optax.apply_updates`. We leave it to # indicate that gradients are not backpropagated through the target network. repr_loss = helpers.regression_loss( online_network_out['prediction_view1'], jax.lax.stop_gradient(target_network_out['projection_view2'])) repr_loss = repr_loss + helpers.regression_loss( online_network_out['prediction_view2'], jax.lax.stop_gradient(target_network_out['projection_view1'])) repr_loss = jnp.mean(repr_loss) # Classification loss (with gradient flows stopped from flowing into the # ResNet). This is used to provide an evaluation of the representation # quality during training. classif_loss = helpers.softmax_cross_entropy( logits=online_network_out['logits_view1'], labels=jax.nn.one_hot(labels, self._num_classes)) top1_correct = helpers.topk_accuracy( online_network_out['logits_view1'], inputs['labels'], topk=1, ) top5_correct = helpers.topk_accuracy( online_network_out['logits_view1'], inputs['labels'], topk=5, ) top1_acc = jnp.mean(top1_correct) top5_acc = jnp.mean(top5_correct) classif_loss = jnp.mean(classif_loss) loss = repr_loss + classif_loss logs = dict( loss=loss, repr_loss=repr_loss, classif_loss=classif_loss, top1_accuracy=top1_acc, top5_accuracy=top5_acc, ) return loss, (dict(online_state=online_state, target_state=target_state), logs) def _should_transpose_images(self): """Should we transpose images (saves host-to-device time on TPUs).""" return (self._enable_double_transpose and jax.local_devices()[0].platform == 'tpu') def _update_fn( self, byol_state: _ByolExperimentState, global_step: jnp.ndarray, rng: jnp.ndarray, inputs: dataset.Batch, ) -> Tuple[_ByolExperimentState, LogsDict]: """Update online and target parameters. Args: byol_state: current BYOL state. global_step: current training step. rng: current random number generator inputs: inputs, containing two batches of crops from the same images, view1 and view2 and labels Returns: Tuple containing the updated Byol state after processing the inputs, and various logs. """ online_params = byol_state.online_params target_params = byol_state.target_params online_state = byol_state.online_state target_state = byol_state.target_state opt_state = byol_state.opt_state # update online network grad_fn = jax.grad(self.loss_fn, argnums=0, has_aux=True) grads, (net_states, logs) = grad_fn(online_params, target_params, online_state, target_state, rng, inputs) # cross-device grad and logs reductions grads = jax.tree_map(lambda v: jax.lax.pmean(v, axis_name='i'), grads) logs = jax.tree_multimap(lambda x: jax.lax.pmean(x, axis_name='i'), logs) learning_rate = schedules.learning_schedule( global_step, batch_size=self._batch_size, total_steps=self._max_steps, **self._lr_schedule_config) updates, opt_state = self._optimizer(learning_rate).update( grads, opt_state, online_params) online_params = optax.apply_updates(online_params, updates) # update target network tau = schedules.target_ema( global_step, base_ema=self._base_target_ema, max_steps=self._max_steps) target_params = jax.tree_multimap(lambda x, y: x + (1 - tau) * (y - x), target_params, online_params) logs['tau'] = tau logs['learning_rate'] = learning_rate return _ByolExperimentState( online_params=online_params, target_params=target_params, online_state=net_states['online_state'], target_state=net_states['target_state'], opt_state=opt_state), logs def _make_initial_state( self, rng: jnp.ndarray, dummy_input: dataset.Batch, ) -> _ByolExperimentState: """BYOL's _ByolExperimentState initialization. Args: rng: random number generator used to initialize parameters. If working in a multi device setup, this need to be a ShardedArray. dummy_input: a dummy image, used to compute intermediate outputs shapes. Returns: Initial Byol state. """ rng_online, rng_target = jax.random.split(rng) if self._should_transpose_images(): dummy_input = dataset.transpose_images(dummy_input) # Online and target parameters are initialized using different rngs, # in our experiments we did not notice a significant different with using # the same rng for both. online_params, online_state = self.forward.init( rng_online, dummy_input, is_training=True, ) target_params, target_state = self.forward.init( rng_target, dummy_input, is_training=True, ) opt_state = self._optimizer(0).init(online_params) return _ByolExperimentState( online_params=online_params, target_params=target_params, opt_state=opt_state, online_state=online_state, target_state=target_state, ) def step(self, *, global_step: jnp.ndarray, rng: jnp.ndarray) -> Mapping[Text, np.ndarray]: """Performs a single training step.""" if self._train_input is None: self._initialize_train() inputs = next(self._train_input) self._byol_state, scalars = self.update_pmap( self._byol_state, global_step=global_step, rng=rng, inputs=inputs, ) return helpers.get_first(scalars) def save_checkpoint(self, step: int, rng: jnp.ndarray): self._checkpointer.maybe_save_checkpoint( self._byol_state, step=step, rng=rng, is_final=step >= self._max_steps) def load_checkpoint(self) -> Union[Tuple[int, jnp.ndarray], None]: checkpoint_data = self._checkpointer.maybe_load_checkpoint() if checkpoint_data is None: return None self._byol_state, step, rng = checkpoint_data return step, rng def _initialize_train(self): """Initialize train. This includes initializing the input pipeline and Byol's state. """ self._train_input = acme_utils.prefetch(self._build_train_input()) # Check we haven't already restored params if self._byol_state is None: logging.info( 'Initializing parameters rather than restoring from checkpoint.') # initialize Byol and setup optimizer state inputs = next(self._train_input) init_byol = jax.pmap(self._make_initial_state, axis_name='i') # Init uses the same RNG key on all hosts+devices to ensure everyone # computes the same initial state and parameters. init_rng = jax.random.PRNGKey(self._random_seed) init_rng = helpers.bcast_local_devices(init_rng) self._byol_state = init_byol(rng=init_rng, dummy_input=inputs) def _build_train_input(self) -> Generator[dataset.Batch, None, None]: """Loads the (infinitely looping) dataset iterator.""" num_devices = jax.device_count() global_batch_size = self._batch_size per_device_batch_size, ragged = divmod(global_batch_size, num_devices) if ragged: raise ValueError( f'Global batch size {global_batch_size} must be divisible by ' f'num devices {num_devices}') return dataset.load( dataset.Split.TRAIN_AND_VALID, preprocess_mode=dataset.PreprocessMode.PRETRAIN, transpose=self._should_transpose_images(), batch_dims=[jax.local_device_count(), per_device_batch_size]) def _eval_batch( self, params: hk.Params, state: hk.State, batch: dataset.Batch, ) -> Mapping[Text, jnp.ndarray]: """Evaluates a batch. Args: params: Parameters of the model to evaluate. Typically Byol's online parameters. state: State of the model to evaluate. Typically Byol's online state. batch: Batch of data to evaluate (must contain keys images and labels). Returns: Unreduced evaluation loss and top1 accuracy on the batch. """ if self._should_transpose_images(): batch = dataset.transpose_images(batch) outputs, _ = self.forward.apply(params, state, batch, is_training=False) logits = outputs['logits'] labels = hk.one_hot(batch['labels'], self._num_classes) loss = helpers.softmax_cross_entropy(logits, labels, reduction=None) top1_correct = helpers.topk_accuracy(logits, batch['labels'], topk=1) top5_correct = helpers.topk_accuracy(logits, batch['labels'], topk=5) # NOTE: Returned values will be summed and finally divided by num_samples. return { 'eval_loss': loss, 'top1_accuracy': top1_correct, 'top5_accuracy': top5_correct, } def _eval_epoch(self, subset: Text, batch_size: int): """Evaluates an epoch.""" num_samples = 0. summed_scalars = None params = helpers.get_first(self._byol_state.online_params) state = helpers.get_first(self._byol_state.online_state) split = dataset.Split.from_string(subset) dataset_iterator = dataset.load( split, preprocess_mode=dataset.PreprocessMode.EVAL, transpose=self._should_transpose_images(), batch_dims=[batch_size]) for inputs in dataset_iterator: num_samples += inputs['labels'].shape[0] scalars = self.eval_batch_jit(params, state, inputs) # Accumulate the sum of scalars for each step. scalars = jax.tree_map(lambda x: jnp.sum(x, axis=0), scalars) if summed_scalars is None: summed_scalars = scalars else: summed_scalars = jax.tree_multimap(jnp.add, summed_scalars, scalars) mean_scalars = jax.tree_map(lambda x: x / num_samples, summed_scalars) return mean_scalars def evaluate(self, global_step, **unused_args): """Thin wrapper around _eval_epoch.""" global_step = np.array(helpers.get_first(global_step)) scalars = jax.device_get(self._eval_epoch(**self._evaluation_config)) logging.info('[Step %d] Eval scalars: %s', global_step, scalars) return scalars
deepmind-research-master
byol/byol_experiment.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for BYOL's main training loop.""" from absl import flags from absl.testing import absltest import tensorflow_datasets as tfds from byol import byol_experiment from byol import eval_experiment from byol import main_loop from byol.configs import byol as byol_config from byol.configs import eval as eval_config FLAGS = flags.FLAGS class MainLoopTest(absltest.TestCase): def test_pretrain(self): config = byol_config.get_config(num_epochs=40, batch_size=4) temp_dir = self.create_tempdir().full_path # Override some config fields to make test lighter. config['network_config']['encoder_class'] = 'TinyResNet' config['network_config']['projector_hidden_size'] = 256 config['network_config']['predictor_hidden_size'] = 256 config['checkpointing_config']['checkpoint_dir'] = temp_dir config['evaluation_config']['batch_size'] = 16 config['max_steps'] = 16 with tfds.testing.mock_data(num_examples=64): experiment_class = byol_experiment.ByolExperiment main_loop.train_loop(experiment_class, config) main_loop.eval_loop(experiment_class, config) def test_linear_eval(self): config = eval_config.get_config(checkpoint_to_evaluate=None, batch_size=4) temp_dir = self.create_tempdir().full_path # Override some config fields to make test lighter. config['network_config']['encoder_class'] = 'TinyResNet' config['allow_train_from_scratch'] = True config['checkpointing_config']['checkpoint_dir'] = temp_dir config['evaluation_config']['batch_size'] = 16 config['max_steps'] = 16 with tfds.testing.mock_data(num_examples=64): experiment_class = eval_experiment.EvalExperiment main_loop.train_loop(experiment_class, config) main_loop.eval_loop(experiment_class, config) def test_pipeline(self): b_config = byol_config.get_config(num_epochs=40, batch_size=4) temp_dir = self.create_tempdir().full_path # Override some config fields to make test lighter. b_config['network_config']['encoder_class'] = 'TinyResNet' b_config['network_config']['projector_hidden_size'] = 256 b_config['network_config']['predictor_hidden_size'] = 256 b_config['checkpointing_config']['checkpoint_dir'] = temp_dir b_config['evaluation_config']['batch_size'] = 16 b_config['max_steps'] = 16 with tfds.testing.mock_data(num_examples=64): main_loop.train_loop(byol_experiment.ByolExperiment, b_config) e_config = eval_config.get_config( checkpoint_to_evaluate=f'{temp_dir}/pretrain.pkl', batch_size=4) # Override some config fields to make test lighter. e_config['network_config']['encoder_class'] = 'TinyResNet' e_config['allow_train_from_scratch'] = True e_config['checkpointing_config']['checkpoint_dir'] = temp_dir e_config['evaluation_config']['batch_size'] = 16 e_config['max_steps'] = 16 with tfds.testing.mock_data(num_examples=64): main_loop.train_loop(eval_experiment.EvalExperiment, e_config) if __name__ == '__main__': absltest.main()
deepmind-research-master
byol/main_loop_test.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Linear evaluation or fine-tuning pipeline. Use this experiment to evaluate a checkpoint from byol_experiment. """ import functools from typing import Any, Generator, Mapping, NamedTuple, Optional, Text, Tuple, Union from absl import logging from acme.jax import utils as acme_utils import haiku as hk import jax import jax.numpy as jnp import numpy as np import optax from byol.utils import checkpointing from byol.utils import dataset from byol.utils import helpers from byol.utils import networks from byol.utils import schedules # Type declarations. OptState = Tuple[optax.TraceState, optax.ScaleByScheduleState, optax.ScaleState] LogsDict = Mapping[Text, jnp.ndarray] class _EvalExperimentState(NamedTuple): backbone_params: hk.Params classif_params: hk.Params backbone_state: hk.State backbone_opt_state: Union[None, OptState] classif_opt_state: OptState class EvalExperiment: """Linear evaluation experiment.""" def __init__( self, random_seed: int, num_classes: int, batch_size: int, max_steps: int, enable_double_transpose: bool, checkpoint_to_evaluate: Optional[Text], allow_train_from_scratch: bool, freeze_backbone: bool, network_config: Mapping[Text, Any], optimizer_config: Mapping[Text, Any], lr_schedule_config: Mapping[Text, Any], evaluation_config: Mapping[Text, Any], checkpointing_config: Mapping[Text, Any]): """Constructs the experiment. Args: random_seed: the random seed to use when initializing network weights. num_classes: the number of classes; used for the online evaluation. batch_size: the total batch size; should be a multiple of the number of available accelerators. max_steps: the number of training steps; used for the lr/target network ema schedules. enable_double_transpose: see dataset.py; only has effect on TPU. checkpoint_to_evaluate: the path to the checkpoint to evaluate. allow_train_from_scratch: whether to allow training without specifying a checkpoint to evaluate (training from scratch). freeze_backbone: whether the backbone resnet should remain frozen (linear evaluation) or be trainable (fine-tuning). network_config: the configuration for the network. optimizer_config: the configuration for the optimizer. lr_schedule_config: the configuration for the learning rate schedule. evaluation_config: the evaluation configuration. checkpointing_config: the configuration for checkpointing. """ self._random_seed = random_seed self._enable_double_transpose = enable_double_transpose self._num_classes = num_classes self._lr_schedule_config = lr_schedule_config self._batch_size = batch_size self._max_steps = max_steps self._checkpoint_to_evaluate = checkpoint_to_evaluate self._allow_train_from_scratch = allow_train_from_scratch self._freeze_backbone = freeze_backbone self._optimizer_config = optimizer_config self._evaluation_config = evaluation_config # Checkpointed experiment state. self._experiment_state = None # Input pipelines. self._train_input = None self._eval_input = None backbone_fn = functools.partial(self._backbone_fn, **network_config) self.forward_backbone = hk.without_apply_rng( hk.transform_with_state(backbone_fn)) self.forward_classif = hk.without_apply_rng(hk.transform(self._classif_fn)) self.update_pmap = jax.pmap(self._update_func, axis_name='i') self.eval_batch_jit = jax.jit(self._eval_batch) self._is_backbone_training = not self._freeze_backbone self._checkpointer = checkpointing.Checkpointer(**checkpointing_config) def _should_transpose_images(self): """Should we transpose images (saves host-to-device time on TPUs).""" return (self._enable_double_transpose and jax.local_devices()[0].platform == 'tpu') def _backbone_fn( self, inputs: dataset.Batch, encoder_class: Text, encoder_config: Mapping[Text, Any], bn_decay_rate: float, is_training: bool, ) -> jnp.ndarray: """Forward of the encoder (backbone).""" bn_config = {'decay_rate': bn_decay_rate} encoder = getattr(networks, encoder_class) model = encoder( None, bn_config=bn_config, **encoder_config) if self._should_transpose_images(): inputs = dataset.transpose_images(inputs) images = dataset.normalize_images(inputs['images']) return model(images, is_training=is_training) def _classif_fn( self, embeddings: jnp.ndarray, ) -> jnp.ndarray: classifier = hk.Linear(output_size=self._num_classes) return classifier(embeddings) # _ _ # | |_ _ __ __ _(_)_ __ # | __| '__/ _` | | '_ \ # | |_| | | (_| | | | | | # \__|_| \__,_|_|_| |_| # def step(self, *, global_step: jnp.ndarray, rng: jnp.ndarray) -> Mapping[Text, np.ndarray]: """Performs a single training step.""" if self._train_input is None: self._initialize_train(rng) inputs = next(self._train_input) self._experiment_state, scalars = self.update_pmap( self._experiment_state, global_step, inputs) scalars = helpers.get_first(scalars) return scalars def save_checkpoint(self, step: int, rng: jnp.ndarray): self._checkpointer.maybe_save_checkpoint( self._experiment_state, step=step, rng=rng, is_final=step >= self._max_steps) def load_checkpoint(self) -> Union[Tuple[int, jnp.ndarray], None]: checkpoint_data = self._checkpointer.maybe_load_checkpoint() if checkpoint_data is None: return None self._experiment_state, step, rng = checkpoint_data return step, rng def _initialize_train(self, rng): """BYOL's _ExperimentState initialization. Args: rng: random number generator used to initialize parameters. If working in a multi device setup, this need to be a ShardedArray. dummy_input: a dummy image, used to compute intermediate outputs shapes. Returns: Initial EvalExperiment state. Raises: RuntimeError: invalid or empty checkpoint. """ self._train_input = acme_utils.prefetch(self._build_train_input()) # Check we haven't already restored params if self._experiment_state is None: inputs = next(self._train_input) if self._checkpoint_to_evaluate is not None: # Load params from checkpoint checkpoint_data = checkpointing.load_checkpoint( self._checkpoint_to_evaluate) if checkpoint_data is None: raise RuntimeError('Invalid checkpoint.') backbone_params = checkpoint_data['experiment_state'].online_params backbone_state = checkpoint_data['experiment_state'].online_state backbone_params = helpers.bcast_local_devices(backbone_params) backbone_state = helpers.bcast_local_devices(backbone_state) else: if not self._allow_train_from_scratch: raise ValueError( 'No checkpoint specified, but `allow_train_from_scratch` ' 'set to False') # Initialize with random parameters logging.info( 'No checkpoint specified, initializing the networks from scratch ' '(dry run mode)') backbone_params, backbone_state = jax.pmap( functools.partial(self.forward_backbone.init, is_training=True), axis_name='i')(rng=rng, inputs=inputs) init_experiment = jax.pmap(self._make_initial_state, axis_name='i') # Init uses the same RNG key on all hosts+devices to ensure everyone # computes the same initial state and parameters. init_rng = jax.random.PRNGKey(self._random_seed) init_rng = helpers.bcast_local_devices(init_rng) self._experiment_state = init_experiment( rng=init_rng, dummy_input=inputs, backbone_params=backbone_params, backbone_state=backbone_state) # Clear the backbone optimizer's state when the backbone is frozen. if self._freeze_backbone: self._experiment_state = _EvalExperimentState( backbone_params=self._experiment_state.backbone_params, classif_params=self._experiment_state.classif_params, backbone_state=self._experiment_state.backbone_state, backbone_opt_state=None, classif_opt_state=self._experiment_state.classif_opt_state, ) def _make_initial_state( self, rng: jnp.ndarray, dummy_input: dataset.Batch, backbone_params: hk.Params, backbone_state: hk.Params, ) -> _EvalExperimentState: """_EvalExperimentState initialization.""" # Initialize the backbone params # Always create the batchnorm weights (is_training=True), they will be # overwritten when loading the checkpoint. embeddings, _ = self.forward_backbone.apply( backbone_params, backbone_state, dummy_input, is_training=True) backbone_opt_state = self._optimizer(0.).init(backbone_params) # Initialize the classifier params and optimizer_state classif_params = self.forward_classif.init(rng, embeddings) classif_opt_state = self._optimizer(0.).init(classif_params) return _EvalExperimentState( backbone_params=backbone_params, classif_params=classif_params, backbone_state=backbone_state, backbone_opt_state=backbone_opt_state, classif_opt_state=classif_opt_state, ) def _build_train_input(self) -> Generator[dataset.Batch, None, None]: """See base class.""" num_devices = jax.device_count() global_batch_size = self._batch_size per_device_batch_size, ragged = divmod(global_batch_size, num_devices) if ragged: raise ValueError( f'Global batch size {global_batch_size} must be divisible by ' f'num devices {num_devices}') return dataset.load( dataset.Split.TRAIN_AND_VALID, preprocess_mode=dataset.PreprocessMode.LINEAR_TRAIN, transpose=self._should_transpose_images(), batch_dims=[jax.local_device_count(), per_device_batch_size]) def _optimizer(self, learning_rate: float): """Build optimizer from config.""" return optax.sgd(learning_rate, **self._optimizer_config) def _loss_fn( self, backbone_params: hk.Params, classif_params: hk.Params, backbone_state: hk.State, inputs: dataset.Batch, ) -> Tuple[jnp.ndarray, Tuple[jnp.ndarray, hk.State]]: """Compute the classification loss function. Args: backbone_params: parameters of the encoder network. classif_params: parameters of the linear classifier. backbone_state: internal state of encoder network. inputs: inputs, containing `images` and `labels`. Returns: The classification loss and various logs. """ embeddings, backbone_state = self.forward_backbone.apply( backbone_params, backbone_state, inputs, is_training=not self._freeze_backbone) logits = self.forward_classif.apply(classif_params, embeddings) labels = hk.one_hot(inputs['labels'], self._num_classes) loss = helpers.softmax_cross_entropy(logits, labels, reduction='mean') scaled_loss = loss / jax.device_count() return scaled_loss, (loss, backbone_state) def _update_func( self, experiment_state: _EvalExperimentState, global_step: jnp.ndarray, inputs: dataset.Batch, ) -> Tuple[_EvalExperimentState, LogsDict]: """Applies an update to parameters and returns new state.""" # This function computes the gradient of the first output of loss_fn and # passes through the other arguments unchanged. # Gradient of the first output of _loss_fn wrt the backbone (arg 0) and the # classifier parameters (arg 1). The auxiliary outputs are returned as-is. grad_loss_fn = jax.grad(self._loss_fn, has_aux=True, argnums=(0, 1)) grads, aux_outputs = grad_loss_fn( experiment_state.backbone_params, experiment_state.classif_params, experiment_state.backbone_state, inputs, ) backbone_grads, classifier_grads = grads train_loss, new_backbone_state = aux_outputs classifier_grads = jax.lax.psum(classifier_grads, axis_name='i') # Compute the decayed learning rate learning_rate = schedules.learning_schedule( global_step, batch_size=self._batch_size, total_steps=self._max_steps, **self._lr_schedule_config) # Compute and apply updates via our optimizer. classif_updates, new_classif_opt_state = \ self._optimizer(learning_rate).update( classifier_grads, experiment_state.classif_opt_state) new_classif_params = optax.apply_updates(experiment_state.classif_params, classif_updates) if self._freeze_backbone: del backbone_grads, new_backbone_state # Unused # The backbone is not updated. new_backbone_params = experiment_state.backbone_params new_backbone_opt_state = None new_backbone_state = experiment_state.backbone_state else: backbone_grads = jax.lax.psum(backbone_grads, axis_name='i') # Compute and apply updates via our optimizer. backbone_updates, new_backbone_opt_state = \ self._optimizer(learning_rate).update( backbone_grads, experiment_state.backbone_opt_state) new_backbone_params = optax.apply_updates( experiment_state.backbone_params, backbone_updates) experiment_state = _EvalExperimentState( new_backbone_params, new_classif_params, new_backbone_state, new_backbone_opt_state, new_classif_opt_state, ) # Scalars to log (note: we log the mean across all hosts/devices). scalars = {'train_loss': train_loss} scalars = jax.lax.pmean(scalars, axis_name='i') return experiment_state, scalars # _ # _____ ____ _| | # / _ \ \ / / _` | | # | __/\ V / (_| | | # \___| \_/ \__,_|_| # def evaluate(self, global_step, **unused_args): """See base class.""" global_step = np.array(helpers.get_first(global_step)) scalars = jax.device_get(self._eval_epoch(**self._evaluation_config)) logging.info('[Step %d] Eval scalars: %s', global_step, scalars) return scalars def _eval_batch( self, backbone_params: hk.Params, classif_params: hk.Params, backbone_state: hk.State, inputs: dataset.Batch, ) -> LogsDict: """Evaluates a batch.""" embeddings, backbone_state = self.forward_backbone.apply( backbone_params, backbone_state, inputs, is_training=False) logits = self.forward_classif.apply(classif_params, embeddings) labels = hk.one_hot(inputs['labels'], self._num_classes) loss = helpers.softmax_cross_entropy(logits, labels, reduction=None) top1_correct = helpers.topk_accuracy(logits, inputs['labels'], topk=1) top5_correct = helpers.topk_accuracy(logits, inputs['labels'], topk=5) # NOTE: Returned values will be summed and finally divided by num_samples. return { 'eval_loss': loss, 'top1_accuracy': top1_correct, 'top5_accuracy': top5_correct } def _eval_epoch(self, subset: Text, batch_size: int): """Evaluates an epoch.""" num_samples = 0. summed_scalars = None backbone_params = helpers.get_first(self._experiment_state.backbone_params) classif_params = helpers.get_first(self._experiment_state.classif_params) backbone_state = helpers.get_first(self._experiment_state.backbone_state) split = dataset.Split.from_string(subset) dataset_iterator = dataset.load( split, preprocess_mode=dataset.PreprocessMode.EVAL, transpose=self._should_transpose_images(), batch_dims=[batch_size]) for inputs in dataset_iterator: num_samples += inputs['labels'].shape[0] scalars = self.eval_batch_jit( backbone_params, classif_params, backbone_state, inputs, ) # Accumulate the sum of scalars for each step. scalars = jax.tree_map(lambda x: jnp.sum(x, axis=0), scalars) if summed_scalars is None: summed_scalars = scalars else: summed_scalars = jax.tree_multimap(jnp.add, summed_scalars, scalars) mean_scalars = jax.tree_map(lambda x: x / num_samples, summed_scalars) return mean_scalars
deepmind-research-master
byol/eval_experiment.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implementation of LARS Optimizer with optax.""" from typing import Any, Callable, List, NamedTuple, Optional, Tuple import jax import jax.numpy as jnp import optax import tree as nest # A filter function takes a path and a value as input and outputs True for # variable to apply update and False not to apply the update FilterFn = Callable[[Tuple[Any], jnp.ndarray], jnp.ndarray] def exclude_bias_and_norm(path: Tuple[Any], val: jnp.ndarray) -> jnp.ndarray: """Filter to exclude biaises and normalizations weights.""" del val if path[-1] == "b" or "norm" in path[-2]: return False return True def _partial_update(updates: optax.Updates, new_updates: optax.Updates, params: optax.Params, filter_fn: Optional[FilterFn] = None) -> optax.Updates: """Returns new_update for params which filter_fn is True else updates.""" if filter_fn is None: return new_updates wrapped_filter_fn = lambda x, y: jnp.array(filter_fn(x, y)) params_to_filter = nest.map_structure_with_path(wrapped_filter_fn, params) def _update_fn(g: jnp.ndarray, t: jnp.ndarray, m: jnp.ndarray) -> jnp.ndarray: m = m.astype(g.dtype) return g * (1. - m) + t * m return jax.tree_multimap(_update_fn, updates, new_updates, params_to_filter) class ScaleByLarsState(NamedTuple): mu: jnp.ndarray def scale_by_lars( momentum: float = 0.9, eta: float = 0.001, filter_fn: Optional[FilterFn] = None) -> optax.GradientTransformation: """Rescales updates according to the LARS algorithm. Does not include weight decay. References: [You et al, 2017](https://arxiv.org/abs/1708.03888) Args: momentum: momentum coeficient. eta: LARS coefficient. filter_fn: an optional filter function. Returns: An (init_fn, update_fn) tuple. """ def init_fn(params: optax.Params) -> ScaleByLarsState: mu = jax.tree_multimap(jnp.zeros_like, params) # momentum return ScaleByLarsState(mu=mu) def update_fn(updates: optax.Updates, state: ScaleByLarsState, params: optax.Params) -> Tuple[optax.Updates, ScaleByLarsState]: def lars_adaptation( update: jnp.ndarray, param: jnp.ndarray, ) -> jnp.ndarray: param_norm = jnp.linalg.norm(param) update_norm = jnp.linalg.norm(update) return update * jnp.where( param_norm > 0., jnp.where(update_norm > 0, (eta * param_norm / update_norm), 1.0), 1.0) adapted_updates = jax.tree_multimap(lars_adaptation, updates, params) adapted_updates = _partial_update(updates, adapted_updates, params, filter_fn) mu = jax.tree_multimap(lambda g, t: momentum * g + t, state.mu, adapted_updates) return mu, ScaleByLarsState(mu=mu) return optax.GradientTransformation(init_fn, update_fn) class AddWeightDecayState(NamedTuple): """Stateless transformation.""" def add_weight_decay( weight_decay: float, filter_fn: Optional[FilterFn] = None) -> optax.GradientTransformation: """Adds a weight decay to the update. Args: weight_decay: weight_decay coeficient. filter_fn: an optional filter function. Returns: An (init_fn, update_fn) tuple. """ def init_fn(_) -> AddWeightDecayState: return AddWeightDecayState() def update_fn( updates: optax.Updates, state: AddWeightDecayState, params: optax.Params, ) -> Tuple[optax.Updates, AddWeightDecayState]: new_updates = jax.tree_multimap(lambda g, p: g + weight_decay * p, updates, params) new_updates = _partial_update(updates, new_updates, params, filter_fn) return new_updates, state return optax.GradientTransformation(init_fn, update_fn) LarsState = List # Type for the lars optimizer def lars( learning_rate: float, weight_decay: float = 0., momentum: float = 0.9, eta: float = 0.001, weight_decay_filter: Optional[FilterFn] = None, lars_adaptation_filter: Optional[FilterFn] = None, ) -> optax.GradientTransformation: """Creates lars optimizer with weight decay. References: [You et al, 2017](https://arxiv.org/abs/1708.03888) Args: learning_rate: learning rate coefficient. weight_decay: weight decay coefficient. momentum: momentum coefficient. eta: LARS coefficient. weight_decay_filter: optional filter function to only apply the weight decay on a subset of parameters. The filter function takes as input the parameter path (as a tuple) and its associated update, and return a True for params to apply the weight decay and False for params to not apply the weight decay. When weight_decay_filter is set to None, the weight decay is not applied to the bias, i.e. when the variable name is 'b', and the weight decay is not applied to nornalization params, i.e. the panultimate path contains 'norm'. lars_adaptation_filter: similar to weight decay filter but for lars adaptation Returns: An optax.GradientTransformation, i.e. a (init_fn, update_fn) tuple. """ if weight_decay_filter is None: weight_decay_filter = lambda *_: True if lars_adaptation_filter is None: lars_adaptation_filter = lambda *_: True return optax.chain( add_weight_decay( weight_decay=weight_decay, filter_fn=weight_decay_filter), scale_by_lars( momentum=momentum, eta=eta, filter_fn=lars_adaptation_filter), optax.scale(-learning_rate), )
deepmind-research-master
byol/utils/optimizers.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ImageNet dataset with typical pre-processing.""" import enum from typing import Generator, Mapping, Optional, Sequence, Text, Tuple import jax import jax.numpy as jnp import numpy as np import tensorflow.compat.v2 as tf import tensorflow_datasets as tfds Batch = Mapping[Text, np.ndarray] class Split(enum.Enum): """Imagenet dataset split.""" TRAIN = 1 TRAIN_AND_VALID = 2 VALID = 3 TEST = 4 @classmethod def from_string(cls, name: Text) -> 'Split': return { 'TRAIN': Split.TRAIN, 'TRAIN_AND_VALID': Split.TRAIN_AND_VALID, 'VALID': Split.VALID, 'VALIDATION': Split.VALID, 'TEST': Split.TEST }[name.upper()] @property def num_examples(self): return { Split.TRAIN_AND_VALID: 1281167, Split.TRAIN: 1271167, Split.VALID: 10000, Split.TEST: 50000 }[self] class PreprocessMode(enum.Enum): """Preprocessing modes for the dataset.""" PRETRAIN = 1 # Generates two augmented views (random crop + augmentations). LINEAR_TRAIN = 2 # Generates a single random crop. EVAL = 3 # Generates a single center crop. def normalize_images(images: jnp.ndarray) -> jnp.ndarray: """Normalize the image using ImageNet statistics.""" mean_rgb = (0.485, 0.456, 0.406) stddev_rgb = (0.229, 0.224, 0.225) normed_images = images - jnp.array(mean_rgb).reshape((1, 1, 1, 3)) normed_images = normed_images / jnp.array(stddev_rgb).reshape((1, 1, 1, 3)) return normed_images def load(split: Split, *, preprocess_mode: PreprocessMode, batch_dims: Sequence[int], transpose: bool = False, allow_caching: bool = False) -> Generator[Batch, None, None]: """Loads the given split of the dataset.""" start, end = _shard(split, jax.host_id(), jax.host_count()) total_batch_size = np.prod(batch_dims) tfds_split = tfds.core.ReadInstruction( _to_tfds_split(split), from_=start, to=end, unit='abs') ds = tfds.load( 'imagenet2012:5.*.*', split=tfds_split, decoders={'image': tfds.decode.SkipDecoding()}) options = tf.data.Options() options.experimental_threading.private_threadpool_size = 48 options.experimental_threading.max_intra_op_parallelism = 1 if preprocess_mode is not PreprocessMode.EVAL: options.experimental_deterministic = False if jax.host_count() > 1 and allow_caching: # Only cache if we are reading a subset of the dataset. ds = ds.cache() ds = ds.repeat() ds = ds.shuffle(buffer_size=10 * total_batch_size, seed=0) else: if split.num_examples % total_batch_size != 0: raise ValueError(f'Test/valid must be divisible by {total_batch_size}') ds = ds.with_options(options) def preprocess_pretrain(example): view1 = _preprocess_image(example['image'], mode=preprocess_mode) view2 = _preprocess_image(example['image'], mode=preprocess_mode) label = tf.cast(example['label'], tf.int32) return {'view1': view1, 'view2': view2, 'labels': label} def preprocess_linear_train(example): image = _preprocess_image(example['image'], mode=preprocess_mode) label = tf.cast(example['label'], tf.int32) return {'images': image, 'labels': label} def preprocess_eval(example): image = _preprocess_image(example['image'], mode=preprocess_mode) label = tf.cast(example['label'], tf.int32) return {'images': image, 'labels': label} if preprocess_mode is PreprocessMode.PRETRAIN: ds = ds.map( preprocess_pretrain, num_parallel_calls=tf.data.experimental.AUTOTUNE) elif preprocess_mode is PreprocessMode.LINEAR_TRAIN: ds = ds.map( preprocess_linear_train, num_parallel_calls=tf.data.experimental.AUTOTUNE) else: ds = ds.map( preprocess_eval, num_parallel_calls=tf.data.experimental.AUTOTUNE) def transpose_fn(batch): # We use the double-transpose-trick to improve performance for TPUs. Note # that this (typically) requires a matching HWCN->NHWC transpose in your # model code. The compiler cannot make this optimization for us since our # data pipeline and model are compiled separately. batch = dict(**batch) if preprocess_mode is PreprocessMode.PRETRAIN: batch['view1'] = tf.transpose(batch['view1'], (1, 2, 3, 0)) batch['view2'] = tf.transpose(batch['view2'], (1, 2, 3, 0)) else: batch['images'] = tf.transpose(batch['images'], (1, 2, 3, 0)) return batch for i, batch_size in enumerate(reversed(batch_dims)): ds = ds.batch(batch_size) if i == 0 and transpose: ds = ds.map(transpose_fn) # NHWC -> HWCN ds = ds.prefetch(tf.data.experimental.AUTOTUNE) yield from tfds.as_numpy(ds) def _to_tfds_split(split: Split) -> tfds.Split: """Returns the TFDS split appropriately sharded.""" # NOTE: Imagenet did not release labels for the test split used in the # competition, we consider the VALID split the TEST split and reserve # 10k images from TRAIN for VALID. if split in (Split.TRAIN, Split.TRAIN_AND_VALID, Split.VALID): return tfds.Split.TRAIN else: assert split == Split.TEST return tfds.Split.VALIDATION def _shard(split: Split, shard_index: int, num_shards: int) -> Tuple[int, int]: """Returns [start, end) for the given shard index.""" assert shard_index < num_shards arange = np.arange(split.num_examples) shard_range = np.array_split(arange, num_shards)[shard_index] start, end = shard_range[0], (shard_range[-1] + 1) if split == Split.TRAIN: # Note that our TRAIN=TFDS_TRAIN[10000:] and VALID=TFDS_TRAIN[:10000]. offset = Split.VALID.num_examples start += offset end += offset return start, end def _preprocess_image( image_bytes: tf.Tensor, mode: PreprocessMode, ) -> tf.Tensor: """Returns processed and resized images.""" if mode is PreprocessMode.PRETRAIN: image = _decode_and_random_crop(image_bytes) # Random horizontal flipping is optionally done in augmentations.preprocess. elif mode is PreprocessMode.LINEAR_TRAIN: image = _decode_and_random_crop(image_bytes) image = tf.image.random_flip_left_right(image) else: image = _decode_and_center_crop(image_bytes) # NOTE: Bicubic resize (1) casts uint8 to float32 and (2) resizes without # clamping overshoots. This means values returned will be outside the range # [0.0, 255.0] (e.g. we have observed outputs in the range [-51.1, 336.6]). assert image.dtype == tf.uint8 image = tf.image.resize(image, [224, 224], tf.image.ResizeMethod.BICUBIC) image = tf.clip_by_value(image / 255., 0., 1.) return image def _decode_and_random_crop(image_bytes: tf.Tensor) -> tf.Tensor: """Make a random crop of 224.""" img_size = tf.image.extract_jpeg_shape(image_bytes) area = tf.cast(img_size[1] * img_size[0], tf.float32) target_area = tf.random.uniform([], 0.08, 1.0, dtype=tf.float32) * area log_ratio = (tf.math.log(3 / 4), tf.math.log(4 / 3)) aspect_ratio = tf.math.exp( tf.random.uniform([], *log_ratio, dtype=tf.float32)) w = tf.cast(tf.round(tf.sqrt(target_area * aspect_ratio)), tf.int32) h = tf.cast(tf.round(tf.sqrt(target_area / aspect_ratio)), tf.int32) w = tf.minimum(w, img_size[1]) h = tf.minimum(h, img_size[0]) offset_w = tf.random.uniform((), minval=0, maxval=img_size[1] - w + 1, dtype=tf.int32) offset_h = tf.random.uniform((), minval=0, maxval=img_size[0] - h + 1, dtype=tf.int32) crop_window = tf.stack([offset_h, offset_w, h, w]) image = tf.io.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) return image def transpose_images(batch: Batch): """Transpose images for TPU training..""" new_batch = dict(batch) # Avoid mutating in place. if 'images' in batch: new_batch['images'] = jnp.transpose(batch['images'], (3, 0, 1, 2)) else: new_batch['view1'] = jnp.transpose(batch['view1'], (3, 0, 1, 2)) new_batch['view2'] = jnp.transpose(batch['view2'], (3, 0, 1, 2)) return new_batch def _decode_and_center_crop( image_bytes: tf.Tensor, jpeg_shape: Optional[tf.Tensor] = None, ) -> tf.Tensor: """Crops to center of image with padding then scales.""" if jpeg_shape is None: jpeg_shape = tf.image.extract_jpeg_shape(image_bytes) image_height = jpeg_shape[0] image_width = jpeg_shape[1] padded_center_crop_size = tf.cast( ((224 / (224 + 32)) * tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32) offset_height = ((image_height - padded_center_crop_size) + 1) // 2 offset_width = ((image_width - padded_center_crop_size) + 1) // 2 crop_window = tf.stack([ offset_height, offset_width, padded_center_crop_size, padded_center_crop_size ]) image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3) return image
deepmind-research-master
byol/utils/dataset.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Networks used in BYOL.""" from typing import Any, Mapping, Optional, Sequence, Text import haiku as hk import jax import jax.numpy as jnp class MLP(hk.Module): """One hidden layer perceptron, with normalization.""" def __init__( self, name: Text, hidden_size: int, output_size: int, bn_config: Mapping[Text, Any], ): super().__init__(name=name) self._hidden_size = hidden_size self._output_size = output_size self._bn_config = bn_config def __call__(self, inputs: jnp.ndarray, is_training: bool) -> jnp.ndarray: out = hk.Linear(output_size=self._hidden_size, with_bias=True)(inputs) out = hk.BatchNorm(**self._bn_config)(out, is_training=is_training) out = jax.nn.relu(out) out = hk.Linear(output_size=self._output_size, with_bias=False)(out) return out def check_length(length, value, name): if len(value) != length: raise ValueError(f'`{name}` must be of length 4 not {len(value)}') class ResNetTorso(hk.Module): """ResNet model.""" def __init__( self, blocks_per_group: Sequence[int], num_classes: Optional[int] = None, bn_config: Optional[Mapping[str, float]] = None, resnet_v2: bool = False, bottleneck: bool = True, channels_per_group: Sequence[int] = (256, 512, 1024, 2048), use_projection: Sequence[bool] = (True, True, True, True), width_multiplier: int = 1, name: Optional[str] = None, ): """Constructs a ResNet model. Args: blocks_per_group: A sequence of length 4 that indicates the number of blocks created in each group. num_classes: The number of classes to classify the inputs into. bn_config: A dictionary of three elements, `decay_rate`, `eps`, and `cross_replica_axis`, to be passed on to the `BatchNorm` layers. By default the `decay_rate` is `0.9` and `eps` is `1e-5`, and the axis is `None`. resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to False. bottleneck: Whether the block should bottleneck or not. Defaults to True. channels_per_group: A sequence of length 4 that indicates the number of channels used for each block in each group. use_projection: A sequence of length 4 that indicates whether each residual block should use projection. width_multiplier: An integer multiplying the number of channels per group. name: Name of the module. """ super().__init__(name=name) self.resnet_v2 = resnet_v2 bn_config = dict(bn_config or {}) bn_config.setdefault('decay_rate', 0.9) bn_config.setdefault('eps', 1e-5) bn_config.setdefault('create_scale', True) bn_config.setdefault('create_offset', True) # Number of blocks in each group for ResNet. check_length(4, blocks_per_group, 'blocks_per_group') check_length(4, channels_per_group, 'channels_per_group') self.initial_conv = hk.Conv2D( output_channels=64 * width_multiplier, kernel_shape=7, stride=2, with_bias=False, padding='SAME', name='initial_conv') if not self.resnet_v2: self.initial_batchnorm = hk.BatchNorm(name='initial_batchnorm', **bn_config) self.block_groups = [] strides = (1, 2, 2, 2) for i in range(4): self.block_groups.append( hk.nets.ResNet.BlockGroup( channels=width_multiplier * channels_per_group[i], num_blocks=blocks_per_group[i], stride=strides[i], bn_config=bn_config, resnet_v2=resnet_v2, bottleneck=bottleneck, use_projection=use_projection[i], name='block_group_%d' % (i))) if self.resnet_v2: self.final_batchnorm = hk.BatchNorm(name='final_batchnorm', **bn_config) self.logits = hk.Linear(num_classes, w_init=jnp.zeros, name='logits') def __call__(self, inputs, is_training, test_local_stats=False): out = inputs out = self.initial_conv(out) if not self.resnet_v2: out = self.initial_batchnorm(out, is_training, test_local_stats) out = jax.nn.relu(out) out = hk.max_pool(out, window_shape=(1, 3, 3, 1), strides=(1, 2, 2, 1), padding='SAME') for block_group in self.block_groups: out = block_group(out, is_training, test_local_stats) if self.resnet_v2: out = self.final_batchnorm(out, is_training, test_local_stats) out = jax.nn.relu(out) out = jnp.mean(out, axis=[1, 2]) return out class TinyResNet(ResNetTorso): """Tiny resnet for local runs and tests.""" def __init__(self, num_classes: Optional[int] = None, bn_config: Optional[Mapping[str, float]] = None, resnet_v2: bool = False, width_multiplier: int = 1, name: Optional[str] = None): """Constructs a ResNet model. Args: num_classes: The number of classes to classify the inputs into. bn_config: A dictionary of two elements, `decay_rate` and `eps` to be passed on to the `BatchNorm` layers. resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to False. width_multiplier: An integer multiplying the number of channels per group. name: Name of the module. """ super().__init__(blocks_per_group=(1, 1, 1, 1), channels_per_group=(8, 8, 8, 8), num_classes=num_classes, bn_config=bn_config, resnet_v2=resnet_v2, bottleneck=False, width_multiplier=width_multiplier, name=name) class ResNet18(ResNetTorso): """ResNet18.""" def __init__(self, num_classes: Optional[int] = None, bn_config: Optional[Mapping[str, float]] = None, resnet_v2: bool = False, width_multiplier: int = 1, name: Optional[str] = None): """Constructs a ResNet model. Args: num_classes: The number of classes to classify the inputs into. bn_config: A dictionary of two elements, `decay_rate` and `eps` to be passed on to the `BatchNorm` layers. resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to False. width_multiplier: An integer multiplying the number of channels per group. name: Name of the module. """ super().__init__(blocks_per_group=(2, 2, 2, 2), num_classes=num_classes, bn_config=bn_config, resnet_v2=resnet_v2, bottleneck=False, channels_per_group=(64, 128, 256, 512), width_multiplier=width_multiplier, name=name) class ResNet34(ResNetTorso): """ResNet34.""" def __init__(self, num_classes: Optional[int], bn_config: Optional[Mapping[str, float]] = None, resnet_v2: bool = False, width_multiplier: int = 1, name: Optional[str] = None): """Constructs a ResNet model. Args: num_classes: The number of classes to classify the inputs into. bn_config: A dictionary of two elements, `decay_rate` and `eps` to be passed on to the `BatchNorm` layers. resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to False. width_multiplier: An integer multiplying the number of channels per group. name: Name of the module. """ super().__init__(blocks_per_group=(3, 4, 6, 3), num_classes=num_classes, bn_config=bn_config, resnet_v2=resnet_v2, bottleneck=False, channels_per_group=(64, 128, 256, 512), width_multiplier=width_multiplier, name=name) class ResNet50(ResNetTorso): """ResNet50.""" def __init__(self, num_classes: Optional[int] = None, bn_config: Optional[Mapping[str, float]] = None, resnet_v2: bool = False, width_multiplier: int = 1, name: Optional[str] = None): """Constructs a ResNet model. Args: num_classes: The number of classes to classify the inputs into. bn_config: A dictionary of two elements, `decay_rate` and `eps` to be passed on to the `BatchNorm` layers. resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to False. width_multiplier: An integer multiplying the number of channels per group. name: Name of the module. """ super().__init__(blocks_per_group=(3, 4, 6, 3), num_classes=num_classes, bn_config=bn_config, resnet_v2=resnet_v2, bottleneck=True, width_multiplier=width_multiplier, name=name) class ResNet101(ResNetTorso): """ResNet101.""" def __init__(self, num_classes: Optional[int], bn_config: Optional[Mapping[str, float]] = None, resnet_v2: bool = False, width_multiplier: int = 1, name: Optional[str] = None): """Constructs a ResNet model. Args: num_classes: The number of classes to classify the inputs into. bn_config: A dictionary of two elements, `decay_rate` and `eps` to be passed on to the `BatchNorm` layers. resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to False. width_multiplier: An integer multiplying the number of channels per group. name: Name of the module. """ super().__init__(blocks_per_group=(3, 4, 23, 3), num_classes=num_classes, bn_config=bn_config, resnet_v2=resnet_v2, bottleneck=True, width_multiplier=width_multiplier, name=name) class ResNet152(ResNetTorso): """ResNet152.""" def __init__(self, num_classes: Optional[int], bn_config: Optional[Mapping[str, float]] = None, resnet_v2: bool = False, width_multiplier: int = 1, name: Optional[str] = None): """Constructs a ResNet model. Args: num_classes: The number of classes to classify the inputs into. bn_config: A dictionary of two elements, `decay_rate` and `eps` to be passed on to the `BatchNorm` layers. resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to False. width_multiplier: An integer multiplying the number of channels per group. name: Name of the module. """ super().__init__(blocks_per_group=(3, 8, 36, 3), num_classes=num_classes, bn_config=bn_config, resnet_v2=resnet_v2, bottleneck=True, width_multiplier=width_multiplier, name=name) class ResNet200(ResNetTorso): """ResNet200.""" def __init__(self, num_classes: Optional[int], bn_config: Optional[Mapping[str, float]] = None, resnet_v2: bool = False, width_multiplier: int = 1, name: Optional[str] = None): """Constructs a ResNet model. Args: num_classes: The number of classes to classify the inputs into. bn_config: A dictionary of two elements, `decay_rate` and `eps` to be passed on to the `BatchNorm` layers. resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to False. width_multiplier: An integer multiplying the number of channels per group. name: Name of the module. """ super().__init__(blocks_per_group=(3, 24, 36, 3), num_classes=num_classes, bn_config=bn_config, resnet_v2=resnet_v2, bottleneck=True, width_multiplier=width_multiplier, name=name)
deepmind-research-master
byol/utils/networks.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions.""" from typing import Optional, Text from absl import logging import jax import jax.numpy as jnp def topk_accuracy( logits: jnp.ndarray, labels: jnp.ndarray, topk: int, ignore_label_above: Optional[int] = None, ) -> jnp.ndarray: """Top-num_codes accuracy.""" assert len(labels.shape) == 1, 'topk expects 1d int labels.' assert len(logits.shape) == 2, 'topk expects 2d logits.' if ignore_label_above is not None: logits = logits[labels < ignore_label_above, :] labels = labels[labels < ignore_label_above] prds = jnp.argsort(logits, axis=1)[:, ::-1] prds = prds[:, :topk] total = jnp.any(prds == jnp.tile(labels[:, jnp.newaxis], [1, topk]), axis=1) return total def softmax_cross_entropy( logits: jnp.ndarray, labels: jnp.ndarray, reduction: Optional[Text] = 'mean', ) -> jnp.ndarray: """Computes softmax cross entropy given logits and one-hot class labels. Args: logits: Logit output values. labels: Ground truth one-hot-encoded labels. reduction: Type of reduction to apply to loss. Returns: Loss value. If `reduction` is `none`, this has the same shape as `labels`; otherwise, it is scalar. Raises: ValueError: If the type of `reduction` is unsupported. """ loss = -jnp.sum(labels * jax.nn.log_softmax(logits), axis=-1) if reduction == 'sum': return jnp.sum(loss) elif reduction == 'mean': return jnp.mean(loss) elif reduction == 'none' or reduction is None: return loss else: raise ValueError(f'Incorrect reduction mode {reduction}') def l2_normalize( x: jnp.ndarray, axis: Optional[int] = None, epsilon: float = 1e-12, ) -> jnp.ndarray: """l2 normalize a tensor on an axis with numerical stability.""" square_sum = jnp.sum(jnp.square(x), axis=axis, keepdims=True) x_inv_norm = jax.lax.rsqrt(jnp.maximum(square_sum, epsilon)) return x * x_inv_norm def l2_weight_regularizer(params): """Helper to do lasso on weights. Args: params: the entire param set. Returns: Scalar of the l2 norm of the weights. """ l2_norm = 0. for mod_name, mod_params in params.items(): if 'norm' not in mod_name: for param_k, param_v in mod_params.items(): if param_k != 'b' not in param_k: # Filter out biases l2_norm += jnp.sum(jnp.square(param_v)) else: logging.warning('Excluding %s/%s from optimizer weight decay!', mod_name, param_k) else: logging.warning('Excluding %s from optimizer weight decay!', mod_name) return 0.5 * l2_norm def regression_loss(x: jnp.ndarray, y: jnp.ndarray) -> jnp.ndarray: """Byol's regression loss. This is a simple cosine similarity.""" normed_x, normed_y = l2_normalize(x, axis=-1), l2_normalize(y, axis=-1) return jnp.sum((normed_x - normed_y)**2, axis=-1) def bcast_local_devices(value): """Broadcasts an object to all local devices.""" devices = jax.local_devices() def _replicate(x): """Replicate an object on each device.""" x = jnp.array(x) return jax.device_put_sharded(len(devices) * [x], devices) return jax.tree_util.tree_map(_replicate, value) def get_first(xs): """Gets values from the first device.""" return jax.tree_map(lambda x: x[0], xs)
deepmind-research-master
byol/utils/helpers.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Data preprocessing and augmentation.""" import functools from typing import Any, Mapping, Text import jax import jax.numpy as jnp # typing JaxBatch = Mapping[Text, jnp.ndarray] ConfigDict = Mapping[Text, Any] augment_config = dict( view1=dict( random_flip=True, # Random left/right flip color_transform=dict( apply_prob=1.0, # Range of jittering brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1, # Probability of applying color jittering color_jitter_prob=0.8, # Probability of converting to grayscale to_grayscale_prob=0.2, # Shuffle the order of color transforms shuffle=True), gaussian_blur=dict( apply_prob=1.0, # Kernel size ~ image_size / blur_divider blur_divider=10., # Kernel distribution sigma_min=0.1, sigma_max=2.0), solarize=dict(apply_prob=0.0, threshold=0.5), ), view2=dict( random_flip=True, color_transform=dict( apply_prob=1.0, brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1, color_jitter_prob=0.8, to_grayscale_prob=0.2, shuffle=True), gaussian_blur=dict( apply_prob=0.1, blur_divider=10., sigma_min=0.1, sigma_max=2.0), solarize=dict(apply_prob=0.2, threshold=0.5), )) def postprocess(inputs: JaxBatch, rng: jnp.ndarray): """Apply the image augmentations to crops in inputs (view1 and view2).""" def _postprocess_image( images: jnp.ndarray, rng: jnp.ndarray, presets: ConfigDict, ) -> JaxBatch: """Applies augmentations in post-processing. Args: images: an NHWC tensor (with C=3), with float values in [0, 1]. rng: a single PRNGKey. presets: a dict of presets for the augmentations. Returns: A batch of augmented images with shape NHWC, with keys view1, view2 and labels. """ flip_rng, color_rng, blur_rng, solarize_rng = jax.random.split(rng, 4) out = images if presets['random_flip']: out = random_flip(out, flip_rng) if presets['color_transform']['apply_prob'] > 0: out = color_transform(out, color_rng, **presets['color_transform']) if presets['gaussian_blur']['apply_prob'] > 0: out = gaussian_blur(out, blur_rng, **presets['gaussian_blur']) if presets['solarize']['apply_prob'] > 0: out = solarize(out, solarize_rng, **presets['solarize']) out = jnp.clip(out, 0., 1.) return jax.lax.stop_gradient(out) rng1, rng2 = jax.random.split(rng, num=2) view1 = _postprocess_image(inputs['view1'], rng1, augment_config['view1']) view2 = _postprocess_image(inputs['view2'], rng2, augment_config['view2']) return dict(view1=view1, view2=view2, labels=inputs['labels']) def _maybe_apply(apply_fn, inputs, rng, apply_prob): should_apply = jax.random.uniform(rng, shape=()) <= apply_prob return jax.lax.cond(should_apply, inputs, apply_fn, inputs, lambda x: x) def _depthwise_conv2d(inputs, kernel, strides, padding): """Computes a depthwise conv2d in Jax. Args: inputs: an NHWC tensor with N=1. kernel: a [H", W", 1, C] tensor. strides: a 2d tensor. padding: "SAME" or "VALID". Returns: The depthwise convolution of inputs with kernel, as [H, W, C]. """ return jax.lax.conv_general_dilated( inputs, kernel, strides, padding, feature_group_count=inputs.shape[-1], dimension_numbers=('NHWC', 'HWIO', 'NHWC')) def _gaussian_blur_single_image(image, kernel_size, padding, sigma): """Applies gaussian blur to a single image, given as NHWC with N=1.""" radius = int(kernel_size / 2) kernel_size_ = 2 * radius + 1 x = jnp.arange(-radius, radius + 1).astype(jnp.float32) blur_filter = jnp.exp(-x**2 / (2. * sigma**2)) blur_filter = blur_filter / jnp.sum(blur_filter) blur_v = jnp.reshape(blur_filter, [kernel_size_, 1, 1, 1]) blur_h = jnp.reshape(blur_filter, [1, kernel_size_, 1, 1]) num_channels = image.shape[-1] blur_h = jnp.tile(blur_h, [1, 1, 1, num_channels]) blur_v = jnp.tile(blur_v, [1, 1, 1, num_channels]) expand_batch_dim = len(image.shape) == 3 if expand_batch_dim: image = image[jnp.newaxis, ...] blurred = _depthwise_conv2d(image, blur_h, strides=[1, 1], padding=padding) blurred = _depthwise_conv2d(blurred, blur_v, strides=[1, 1], padding=padding) blurred = jnp.squeeze(blurred, axis=0) return blurred def _random_gaussian_blur(image, rng, kernel_size, padding, sigma_min, sigma_max, apply_prob): """Applies a random gaussian blur.""" apply_rng, transform_rng = jax.random.split(rng) def _apply(image): sigma_rng, = jax.random.split(transform_rng, 1) sigma = jax.random.uniform( sigma_rng, shape=(), minval=sigma_min, maxval=sigma_max, dtype=jnp.float32) return _gaussian_blur_single_image(image, kernel_size, padding, sigma) return _maybe_apply(_apply, image, apply_rng, apply_prob) def rgb_to_hsv(r, g, b): """Converts R, G, B values to H, S, V values. Reference TF implementation: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/adjust_saturation_op.cc Only input values between 0 and 1 are guaranteed to work properly, but this function complies with the TF implementation outside of this range. Args: r: A tensor representing the red color component as floats. g: A tensor representing the green color component as floats. b: A tensor representing the blue color component as floats. Returns: H, S, V values, each as tensors of shape [...] (same as the input without the last dimension). """ vv = jnp.maximum(jnp.maximum(r, g), b) range_ = vv - jnp.minimum(jnp.minimum(r, g), b) sat = jnp.where(vv > 0, range_ / vv, 0.) norm = jnp.where(range_ != 0, 1. / (6. * range_), 1e9) hr = norm * (g - b) hg = norm * (b - r) + 2. / 6. hb = norm * (r - g) + 4. / 6. hue = jnp.where(r == vv, hr, jnp.where(g == vv, hg, hb)) hue = hue * (range_ > 0) hue = hue + (hue < 0) return hue, sat, vv def hsv_to_rgb(h, s, v): """Converts H, S, V values to an R, G, B tuple. Reference TF implementation: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/adjust_saturation_op.cc Only input values between 0 and 1 are guaranteed to work properly, but this function complies with the TF implementation outside of this range. Args: h: A float tensor of arbitrary shape for the hue (0-1 values). s: A float tensor of the same shape for the saturation (0-1 values). v: A float tensor of the same shape for the value channel (0-1 values). Returns: An (r, g, b) tuple, each with the same dimension as the inputs. """ c = s * v m = v - c dh = (h % 1.) * 6. fmodu = dh % 2. x = c * (1 - jnp.abs(fmodu - 1)) hcat = jnp.floor(dh).astype(jnp.int32) rr = jnp.where( (hcat == 0) | (hcat == 5), c, jnp.where( (hcat == 1) | (hcat == 4), x, 0)) + m gg = jnp.where( (hcat == 1) | (hcat == 2), c, jnp.where( (hcat == 0) | (hcat == 3), x, 0)) + m bb = jnp.where( (hcat == 3) | (hcat == 4), c, jnp.where( (hcat == 2) | (hcat == 5), x, 0)) + m return rr, gg, bb def adjust_brightness(rgb_tuple, delta): return jax.tree_map(lambda x: x + delta, rgb_tuple) def adjust_contrast(image, factor): def _adjust_contrast_channel(channel): mean = jnp.mean(channel, axis=(-2, -1), keepdims=True) return factor * (channel - mean) + mean return jax.tree_map(_adjust_contrast_channel, image) def adjust_saturation(h, s, v, factor): return h, jnp.clip(s * factor, 0., 1.), v def adjust_hue(h, s, v, delta): # Note: this method exactly matches TF"s adjust_hue (combined with the hsv/rgb # conversions) when running on GPU. When running on CPU, the results will be # different if all RGB values for a pixel are outside of the [0, 1] range. return (h + delta) % 1.0, s, v def _random_brightness(rgb_tuple, rng, max_delta): delta = jax.random.uniform(rng, shape=(), minval=-max_delta, maxval=max_delta) return adjust_brightness(rgb_tuple, delta) def _random_contrast(rgb_tuple, rng, max_delta): factor = jax.random.uniform( rng, shape=(), minval=1 - max_delta, maxval=1 + max_delta) return adjust_contrast(rgb_tuple, factor) def _random_saturation(rgb_tuple, rng, max_delta): h, s, v = rgb_to_hsv(*rgb_tuple) factor = jax.random.uniform( rng, shape=(), minval=1 - max_delta, maxval=1 + max_delta) return hsv_to_rgb(*adjust_saturation(h, s, v, factor)) def _random_hue(rgb_tuple, rng, max_delta): h, s, v = rgb_to_hsv(*rgb_tuple) delta = jax.random.uniform(rng, shape=(), minval=-max_delta, maxval=max_delta) return hsv_to_rgb(*adjust_hue(h, s, v, delta)) def _to_grayscale(image): rgb_weights = jnp.array([0.2989, 0.5870, 0.1140]) grayscale = jnp.tensordot(image, rgb_weights, axes=(-1, -1))[..., jnp.newaxis] return jnp.tile(grayscale, (1, 1, 3)) # Back to 3 channels. def _color_transform_single_image(image, rng, brightness, contrast, saturation, hue, to_grayscale_prob, color_jitter_prob, apply_prob, shuffle): """Applies color jittering to a single image.""" apply_rng, transform_rng = jax.random.split(rng) perm_rng, b_rng, c_rng, s_rng, h_rng, cj_rng, gs_rng = jax.random.split( transform_rng, 7) # Whether the transform should be applied at all. should_apply = jax.random.uniform(apply_rng, shape=()) <= apply_prob # Whether to apply grayscale transform. should_apply_gs = jax.random.uniform(gs_rng, shape=()) <= to_grayscale_prob # Whether to apply color jittering. should_apply_color = jax.random.uniform(cj_rng, shape=()) <= color_jitter_prob # Decorator to conditionally apply fn based on an index. def _make_cond(fn, idx): def identity_fn(x, unused_rng, unused_param): return x def cond_fn(args, i): def clip(args): return jax.tree_map(lambda arg: jnp.clip(arg, 0., 1.), args) out = jax.lax.cond(should_apply & should_apply_color & (i == idx), args, lambda a: clip(fn(*a)), args, lambda a: identity_fn(*a)) return jax.lax.stop_gradient(out) return cond_fn random_brightness_cond = _make_cond(_random_brightness, idx=0) random_contrast_cond = _make_cond(_random_contrast, idx=1) random_saturation_cond = _make_cond(_random_saturation, idx=2) random_hue_cond = _make_cond(_random_hue, idx=3) def _color_jitter(x): rgb_tuple = tuple(jax.tree_map(jnp.squeeze, jnp.split(x, 3, axis=-1))) if shuffle: order = jax.random.permutation(perm_rng, jnp.arange(4, dtype=jnp.int32)) else: order = range(4) for idx in order: if brightness > 0: rgb_tuple = random_brightness_cond((rgb_tuple, b_rng, brightness), idx) if contrast > 0: rgb_tuple = random_contrast_cond((rgb_tuple, c_rng, contrast), idx) if saturation > 0: rgb_tuple = random_saturation_cond((rgb_tuple, s_rng, saturation), idx) if hue > 0: rgb_tuple = random_hue_cond((rgb_tuple, h_rng, hue), idx) return jnp.stack(rgb_tuple, axis=-1) out_apply = _color_jitter(image) out_apply = jax.lax.cond(should_apply & should_apply_gs, out_apply, _to_grayscale, out_apply, lambda x: x) return jnp.clip(out_apply, 0., 1.) def _random_flip_single_image(image, rng): _, flip_rng = jax.random.split(rng) should_flip_lr = jax.random.uniform(flip_rng, shape=()) <= 0.5 image = jax.lax.cond(should_flip_lr, image, jnp.fliplr, image, lambda x: x) return image def random_flip(images, rng): rngs = jax.random.split(rng, images.shape[0]) return jax.vmap(_random_flip_single_image)(images, rngs) def color_transform(images, rng, brightness=0.8, contrast=0.8, saturation=0.8, hue=0.2, color_jitter_prob=0.8, to_grayscale_prob=0.2, apply_prob=1.0, shuffle=True): """Applies color jittering and/or grayscaling to a batch of images. Args: images: an NHWC tensor, with C=3. rng: a single PRNGKey. brightness: the range of jitter on brightness. contrast: the range of jitter on contrast. saturation: the range of jitter on saturation. hue: the range of jitter on hue. color_jitter_prob: the probability of applying color jittering. to_grayscale_prob: the probability of converting the image to grayscale. apply_prob: the probability of applying the transform to a batch element. shuffle: whether to apply the transforms in a random order. Returns: A NHWC tensor of the transformed images. """ rngs = jax.random.split(rng, images.shape[0]) jitter_fn = functools.partial( _color_transform_single_image, brightness=brightness, contrast=contrast, saturation=saturation, hue=hue, color_jitter_prob=color_jitter_prob, to_grayscale_prob=to_grayscale_prob, apply_prob=apply_prob, shuffle=shuffle) return jax.vmap(jitter_fn)(images, rngs) def gaussian_blur(images, rng, blur_divider=10., sigma_min=0.1, sigma_max=2.0, apply_prob=1.0): """Applies gaussian blur to a batch of images. Args: images: an NHWC tensor, with C=3. rng: a single PRNGKey. blur_divider: the blurring kernel will have size H / blur_divider. sigma_min: the minimum value for sigma in the blurring kernel. sigma_max: the maximum value for sigma in the blurring kernel. apply_prob: the probability of applying the transform to a batch element. Returns: A NHWC tensor of the blurred images. """ rngs = jax.random.split(rng, images.shape[0]) kernel_size = images.shape[1] / blur_divider blur_fn = functools.partial( _random_gaussian_blur, kernel_size=kernel_size, padding='SAME', sigma_min=sigma_min, sigma_max=sigma_max, apply_prob=apply_prob) return jax.vmap(blur_fn)(images, rngs) def _solarize_single_image(image, rng, threshold, apply_prob): def _apply(image): return jnp.where(image < threshold, image, 1. - image) return _maybe_apply(_apply, image, rng, apply_prob) def solarize(images, rng, threshold=0.5, apply_prob=1.0): """Applies solarization. Args: images: an NHWC tensor (with C=3). rng: a single PRNGKey. threshold: the solarization threshold. apply_prob: the probability of applying the transform to a batch element. Returns: A NHWC tensor of the transformed images. """ rngs = jax.random.split(rng, images.shape[0]) solarize_fn = functools.partial( _solarize_single_image, threshold=threshold, apply_prob=apply_prob) return jax.vmap(solarize_fn)(images, rngs)
deepmind-research-master
byol/utils/augmentations.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Learning rate schedules.""" import jax.numpy as jnp def target_ema(global_step: jnp.ndarray, base_ema: float, max_steps: int) -> jnp.ndarray: decay = _cosine_decay(global_step, max_steps, 1.) return 1. - (1. - base_ema) * decay def learning_schedule(global_step: jnp.ndarray, batch_size: int, base_learning_rate: float, total_steps: int, warmup_steps: int) -> float: """Cosine learning rate scheduler.""" # Compute LR & Scaled LR scaled_lr = base_learning_rate * batch_size / 256. learning_rate = ( global_step.astype(jnp.float32) / int(warmup_steps) * scaled_lr if warmup_steps > 0 else scaled_lr) # Cosine schedule after warmup. return jnp.where( global_step < warmup_steps, learning_rate, _cosine_decay(global_step - warmup_steps, total_steps - warmup_steps, scaled_lr)) def _cosine_decay(global_step: jnp.ndarray, max_steps: int, initial_value: float) -> jnp.ndarray: """Simple implementation of cosine decay from TF1.""" global_step = jnp.minimum(global_step, max_steps) cosine_decay_value = 0.5 * (1 + jnp.cos(jnp.pi * global_step / max_steps)) decayed_learning_rate = initial_value * cosine_decay_value return decayed_learning_rate
deepmind-research-master
byol/utils/schedules.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Checkpoint saving and restoring utilities.""" import os import time from typing import Mapping, Text, Tuple, Union from absl import logging import dill import jax import jax.numpy as jnp from byol.utils import helpers class Checkpointer: """A checkpoint saving and loading class.""" def __init__( self, use_checkpointing: bool, checkpoint_dir: Text, save_checkpoint_interval: int, filename: Text): if (not use_checkpointing or checkpoint_dir is None or save_checkpoint_interval <= 0): self._checkpoint_enabled = False return self._checkpoint_enabled = True self._checkpoint_dir = checkpoint_dir os.makedirs(self._checkpoint_dir, exist_ok=True) self._filename = filename self._checkpoint_path = os.path.join(self._checkpoint_dir, filename) self._last_checkpoint_time = 0 self._checkpoint_every = save_checkpoint_interval def maybe_save_checkpoint( self, experiment_state: Mapping[Text, jnp.ndarray], step: int, rng: jnp.ndarray, is_final: bool): """Saves a checkpoint if enough time has passed since the previous one.""" current_time = time.time() if (not self._checkpoint_enabled or jax.host_id() != 0 or # Only checkpoint the first worker. (not is_final and current_time - self._last_checkpoint_time < self._checkpoint_every)): return checkpoint_data = dict( experiment_state=jax.tree_map( lambda x: jax.device_get(x[0]), experiment_state), step=step, rng=rng) with open(self._checkpoint_path + '_tmp', 'wb') as checkpoint_file: dill.dump(checkpoint_data, checkpoint_file, protocol=2) try: os.rename(self._checkpoint_path, self._checkpoint_path + '_old') remove_old = True except FileNotFoundError: remove_old = False # No previous checkpoint to remove os.rename(self._checkpoint_path + '_tmp', self._checkpoint_path) if remove_old: os.remove(self._checkpoint_path + '_old') self._last_checkpoint_time = current_time def maybe_load_checkpoint( self) -> Union[Tuple[Mapping[Text, jnp.ndarray], int, jnp.ndarray], None]: """Loads a checkpoint if any is found.""" checkpoint_data = load_checkpoint(self._checkpoint_path) if checkpoint_data is None: logging.info('No existing checkpoint found at %s', self._checkpoint_path) return None step = checkpoint_data['step'] rng = checkpoint_data['rng'] experiment_state = jax.tree_map( helpers.bcast_local_devices, checkpoint_data['experiment_state']) del checkpoint_data return experiment_state, step, rng def load_checkpoint(checkpoint_path): try: with open(checkpoint_path, 'rb') as checkpoint_file: checkpoint_data = dill.load(checkpoint_file) logging.info('Loading checkpoint from %s, saved at step %d', checkpoint_path, checkpoint_data['step']) return checkpoint_data except FileNotFoundError: return None
deepmind-research-master
byol/utils/checkpointing.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Config file for BYOL experiment.""" from byol.utils import dataset # Preset values for certain number of training epochs. _LR_PRESETS = {40: 0.45, 100: 0.45, 300: 0.3, 1000: 0.2} _WD_PRESETS = {40: 1e-6, 100: 1e-6, 300: 1e-6, 1000: 1.5e-6} _EMA_PRESETS = {40: 0.97, 100: 0.99, 300: 0.99, 1000: 0.996} def get_config(num_epochs: int, batch_size: int): """Return config object, containing all hyperparameters for training.""" train_images_per_epoch = dataset.Split.TRAIN_AND_VALID.num_examples assert num_epochs in [40, 100, 300, 1000] config = dict( random_seed=0, num_classes=1000, batch_size=batch_size, max_steps=num_epochs * train_images_per_epoch // batch_size, enable_double_transpose=True, base_target_ema=_EMA_PRESETS[num_epochs], network_config=dict( projector_hidden_size=4096, projector_output_size=256, predictor_hidden_size=4096, encoder_class='ResNet50', # Should match a class in utils/networks. encoder_config=dict( resnet_v2=False, width_multiplier=1), bn_config={ 'decay_rate': .9, 'eps': 1e-5, # Accumulate batchnorm statistics across devices. # This should be equal to the `axis_name` argument passed # to jax.pmap. 'cross_replica_axis': 'i', 'create_scale': True, 'create_offset': True, }), optimizer_config=dict( weight_decay=_WD_PRESETS[num_epochs], eta=1e-3, momentum=.9, ), lr_schedule_config=dict( base_learning_rate=_LR_PRESETS[num_epochs], warmup_steps=10 * train_images_per_epoch // batch_size, ), evaluation_config=dict( subset='test', batch_size=100, ), checkpointing_config=dict( use_checkpointing=True, checkpoint_dir='/tmp/byol', save_checkpoint_interval=300, filename='pretrain.pkl' ), ) return config
deepmind-research-master
byol/configs/byol.py
# Copyright 2020 DeepMind Technologies Limited. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Config file for evaluation experiment.""" from typing import Text from byol.utils import dataset def get_config(checkpoint_to_evaluate: Text, batch_size: int): """Return config object for training.""" train_images_per_epoch = dataset.Split.TRAIN_AND_VALID.num_examples config = dict( random_seed=0, enable_double_transpose=True, max_steps=80 * train_images_per_epoch // batch_size, num_classes=1000, batch_size=batch_size, checkpoint_to_evaluate=checkpoint_to_evaluate, # If True, allows training without loading a checkpoint. allow_train_from_scratch=False, # Whether the backbone should be frozen (linear evaluation) or # trainable (fine-tuning). freeze_backbone=True, optimizer_config=dict( momentum=0.9, nesterov=True, ), lr_schedule_config=dict( base_learning_rate=0.2, warmup_steps=0, ), network_config=dict( # Should match the evaluated checkpoint encoder_class='ResNet50', # Should match a class in utils/networks. encoder_config=dict( resnet_v2=False, width_multiplier=1), bn_decay_rate=0.9, ), evaluation_config=dict( subset='test', batch_size=100, ), checkpointing_config=dict( use_checkpointing=True, checkpoint_dir='/tmp/byol', save_checkpoint_interval=300, filename='linear-eval.pkl' ), ) return config
deepmind-research-master
byol/configs/eval.py
# Copyright 2019 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Trains a graph-based network to predict particle mobilities in glasses.""" import os from absl import app from absl import flags from glassy_dynamics import train as train_using_tf from glassy_dynamics import train_using_jax FLAGS = flags.FLAGS flags.DEFINE_string( 'data_directory', '', 'Directory which contains the train and test datasets.') flags.DEFINE_integer( 'time_index', 9, 'The time index of the target mobilities.') flags.DEFINE_integer( 'max_files_to_load', None, 'The maximum number of files to load from the train and test datasets.') flags.DEFINE_string( 'checkpoint_path', None, 'Path used to store a checkpoint of the best model.') flags.DEFINE_boolean( 'use_jax', False, 'Uses jax to train model.') def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') train_file_pattern = os.path.join(FLAGS.data_directory, 'train/aggregated*') test_file_pattern = os.path.join(FLAGS.data_directory, 'test/aggregated*') train = train_using_jax if FLAGS.use_jax else train_using_tf train.train_model( train_file_pattern=train_file_pattern, test_file_pattern=test_file_pattern, max_files_to_load=FLAGS.max_files_to_load, time_index=FLAGS.time_index, checkpoint_path=FLAGS.checkpoint_path) if __name__ == '__main__': app.run(main)
deepmind-research-master
glassy_dynamics/train_binary.py
# Copyright 2019 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A graph neural network based model to predict particle mobilities. The architecture and performance of this model is described in our publication: "Unveiling the predictive power of static structure in glassy systems". """ import functools from typing import Any, Dict, Text, Tuple, Optional from graph_nets import graphs from graph_nets import modules as gn_modules from graph_nets import utils_tf import sonnet as snt import tensorflow.compat.v1 as tf def make_graph_from_static_structure( positions: tf.Tensor, types: tf.Tensor, box: tf.Tensor, edge_threshold: float) -> graphs.GraphsTuple: """Returns graph representing the static structure of the glass. Each particle is represented by a node in the graph. The particle type is stored as a node feature. Two particles at a distance less than the threshold are connected by an edge. The relative distance vector is stored as an edge feature. Args: positions: particle positions with shape [n_particles, 3]. types: particle types with shape [n_particles]. box: dimensions of the cubic box that contains the particles with shape [3]. edge_threshold: particles at distance less than threshold are connected by an edge. """ # Calculate pairwise relative distances between particles: shape [n, n, 3]. cross_positions = positions[tf.newaxis, :, :] - positions[:, tf.newaxis, :] # Enforces periodic boundary conditions. box_ = box[tf.newaxis, tf.newaxis, :] cross_positions += tf.cast(cross_positions < -box_ / 2., tf.float32) * box_ cross_positions -= tf.cast(cross_positions > box_ / 2., tf.float32) * box_ # Calculates adjacency matrix in a sparse format (indices), based on the given # distances and threshold. distances = tf.norm(cross_positions, axis=-1) indices = tf.where(distances < edge_threshold) # Defines graph. nodes = types[:, tf.newaxis] senders = indices[:, 0] receivers = indices[:, 1] edges = tf.gather_nd(cross_positions, indices) return graphs.GraphsTuple( nodes=tf.cast(nodes, tf.float32), n_node=tf.reshape(tf.shape(nodes)[0], [1]), edges=tf.cast(edges, tf.float32), n_edge=tf.reshape(tf.shape(edges)[0], [1]), globals=tf.zeros((1, 1), dtype=tf.float32), receivers=tf.cast(receivers, tf.int32), senders=tf.cast(senders, tf.int32) ) def apply_random_rotation(graph: graphs.GraphsTuple) -> graphs.GraphsTuple: """Returns randomly rotated graph representation. The rotation is an element of O(3) with rotation angles multiple of pi/2. This function assumes that the relative particle distances are stored in the edge features. Args: graph: The graphs tuple as defined in `graph_nets.graphs`. """ # Transposes edge features, so that the axes are in the first dimension. # Outputs a tensor of shape [3, n_particles]. xyz = tf.transpose(graph.edges) # Random pi/2 rotation(s) permutation = tf.random.shuffle(tf.constant([0, 1, 2], dtype=tf.int32)) xyz = tf.gather(xyz, permutation) # Random reflections. symmetry = tf.random_uniform([3], minval=0, maxval=2, dtype=tf.int32) symmetry = 1 - 2 * tf.cast(tf.reshape(symmetry, [3, 1]), tf.float32) xyz = xyz * symmetry edges = tf.transpose(xyz) return graph.replace(edges=edges) class GraphBasedModel(snt.AbstractModule): """Graph based model which predicts particle mobilities from their positions. This network encodes the nodes and edges of the input graph independently, and then performs message-passing on this graph, updating its edges based on their associated nodes, then updating the nodes based on the input nodes' features and their associated updated edge features. This update is repeated several times. Afterwards the resulting node embeddings are decoded to predict the particle mobility. """ def __init__(self, n_recurrences: int, mlp_sizes: Tuple[int], mlp_kwargs: Optional[Dict[Text, Any]] = None, name='Graph'): """Creates a new GraphBasedModel object. Args: n_recurrences: the number of message passing steps in the graph network. mlp_sizes: the number of neurons in each layer of the MLP. mlp_kwargs: additional keyword aguments passed to the MLP. name: the name of the Sonnet module. """ super(GraphBasedModel, self).__init__(name=name) self._n_recurrences = n_recurrences if mlp_kwargs is None: mlp_kwargs = {} model_fn = functools.partial( snt.nets.MLP, output_sizes=mlp_sizes, activate_final=True, **mlp_kwargs) final_model_fn = functools.partial( snt.nets.MLP, output_sizes=mlp_sizes + (1,), activate_final=False, **mlp_kwargs) with self._enter_variable_scope(): self._encoder = gn_modules.GraphIndependent( node_model_fn=model_fn, edge_model_fn=model_fn) if self._n_recurrences > 0: self._propagation_network = gn_modules.GraphNetwork( node_model_fn=model_fn, edge_model_fn=model_fn, # We do not use globals, hence we just pass the identity function. global_model_fn=lambda: lambda x: x, reducer=tf.unsorted_segment_sum, edge_block_opt=dict(use_globals=False), node_block_opt=dict(use_globals=False), global_block_opt=dict(use_globals=False)) self._decoder = gn_modules.GraphIndependent( node_model_fn=final_model_fn, edge_model_fn=model_fn) def _build(self, graphs_tuple: graphs.GraphsTuple) -> tf.Tensor: """Connects the model into the tensorflow graph. Args: graphs_tuple: input graph tensor as defined in `graphs_tuple.graphs`. Returns: tensor with shape [n_particles] containing the predicted particle mobilities. """ encoded = self._encoder(graphs_tuple) outputs = encoded for _ in range(self._n_recurrences): # Adds skip connections. inputs = utils_tf.concat([outputs, encoded], axis=-1) outputs = self._propagation_network(inputs) decoded = self._decoder(outputs) return tf.squeeze(decoded.nodes, axis=-1)
deepmind-research-master
glassy_dynamics/graph_model.py
# Copyright 2019 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for train.""" import os import numpy as np import tensorflow.compat.v1 as tf from glassy_dynamics import train class TrainTest(tf.test.TestCase): def test_get_targets(self): initial_positions = np.array([[0, 0, 0], [1, 2, 3]]) trajectory_target_positions = [ np.array([[1, 0, 0], [1, 2, 4]]), np.array([[0, 1, 0], [1, 0, 3]]), np.array([[0, 0, 5], [1, 2, 3]]), ] expected_targets = np.array([7.0 / 3.0, 1.0]) targets = train.get_targets(initial_positions, trajectory_target_positions) np.testing.assert_almost_equal(expected_targets, targets) def test_load_data(self): file_pattern = os.path.join(os.path.dirname(__file__), 'testdata', 'test_small.pickle') with self.subTest('ContentAndShapesAreAsExpected'): data = train.load_data(file_pattern, 0) self.assertEqual(len(data), 1) element = data[0] self.assertTupleEqual(element.positions.shape, (20, 3)) self.assertTupleEqual(element.box.shape, (3,)) self.assertTupleEqual(element.targets.shape, (20,)) self.assertTupleEqual(element.types.shape, (20,)) with self.subTest('TargetsGrowAsAFunctionOfTime'): previous_mean_target = 0.0 # Time index 9 refers to 1/e = 0.36 in the IS, and therefore it is between # Time index 5 (0.4) and time index 6 (0.3). for time_index in [0, 1, 2, 3, 4, 5, 9, 6, 7, 8]: data = train.load_data(file_pattern, time_index)[0] current_mean_target = data.targets.mean() self.assertGreater(current_mean_target, previous_mean_target) previous_mean_target = current_mean_target class TensorflowTrainTest(tf.test.TestCase): def test_get_loss_op(self): """Tests the correct calculation of the loss operations.""" prediction = tf.constant([0.0, 1.0, 2.0, 1.0, 2.0], dtype=tf.float32) target = tf.constant([1.0, 25.0, 0.0, 4.0, 2.0], dtype=tf.float32) types = tf.constant([0, 1, 0, 0, 0], dtype=tf.int32) loss_ops = train.get_loss_ops(prediction, target, types) loss = self.evaluate(loss_ops) self.assertAlmostEqual(loss.l1_loss, 1.5) self.assertAlmostEqual(loss.l2_loss, 14.0 / 4.0) self.assertAlmostEqual(loss.correlation, -0.15289416) def test_get_minimize_op(self): """Tests the minimize operation by minimizing a single variable.""" var = tf.Variable([1.0], name='test') loss = var**2 minimize = train.get_minimize_op(loss, 1e-1) with self.session(): tf.global_variables_initializer().run() for _ in range(100): minimize.run() value = var.eval() self.assertLess(abs(value[0]), 0.01) def test_train_model(self): """Tests if we can overfit to a small test dataset.""" file_pattern = os.path.join(os.path.dirname(__file__), 'testdata', 'test_small.pickle') best_correlation_value = train.train_model( train_file_pattern=file_pattern, test_file_pattern=file_pattern, n_epochs=1000, augment_data_using_rotations=False, learning_rate=1e-4, n_recurrences=2, edge_threshold=5, mlp_sizes=(32, 32), measurement_store_interval=1000) # The test dataset contains only a single sample with 20 particles. # Therefore we expect the model to be able to memorize the targets perfectly # if the model works correctly. self.assertGreater(best_correlation_value, 0.99) def test_apply_model(self): """Tests if we can apply a model to a small test dataset.""" checkpoint_path = os.path.join(os.path.dirname(__file__), 'checkpoints', 't044_s09.ckpt') file_pattern = os.path.join(os.path.dirname(__file__), 'testdata', 'test_large.pickle') predictions = train.apply_model(checkpoint_path=checkpoint_path, file_pattern=file_pattern, time_index=0) data = train.load_data(file_pattern, 0) targets = data[0].targets correlation_value = np.corrcoef(predictions[0], targets)[0, 1] self.assertGreater(correlation_value, 0.5) if __name__ == '__main__': tf.test.main()
deepmind-research-master
glassy_dynamics/train_test.py
# Copyright 2019 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for graph_model.""" import itertools from absl.testing import parameterized from graph_nets import graphs import numpy as np import tensorflow.compat.v1 as tf from glassy_dynamics import graph_model class GraphModelTest(tf.test.TestCase, parameterized.TestCase): def setUp(self): """Initializes a small tractable test (particle) system.""" super(GraphModelTest, self).setUp() # Fixes random seed to ensure deterministic outputs. tf.random.set_random_seed(1234) # In this test we use a small tractable set of particles covering all corner # cases: # a) eight particles with different types, # b) periodic box is not cubic, # c) three disjoint cluster of particles separated by a threshold > 2, # d) first two clusters overlap with the periodic boundary, # e) first cluster is not fully connected, # f) second cluster is fully connected, # g) and third cluster is a single isolated particle. # # The formatting of the code below separates the three clusters by # adding linebreaks after each cluster. self._positions = np.array( [[0.0, 0.0, 0.0], [2.5, 0.0, 0.0], [0.0, 1.5, 0.0], [0.0, 0.0, 9.0], [0.0, 5.0, 0.0], [0.0, 5.0, 1.0], [3.0, 5.0, 0.0], [2.0, 3.0, 3.0]]) self._types = np.array([0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0]) self._box = np.array([4.0, 10.0, 10.0]) # Creates the corresponding graph elements, assuming a threshold of 2 and # the conventions described in `graph_nets.graphs`. self._edge_threshold = 2 self._nodes = np.array( [[0.0], [0.0], [1.0], [0.0], [0.0], [1.0], [0.0], [0.0]]) self._edges = np.array( [[0.0, 0.0, 0.0], [-1.5, 0.0, 0.0], [0.0, 1.5, 0.0], [0.0, 0.0, -1.0], [1.5, 0.0, 0.0], [0.0, 0.0, 0.0], [1.5, 0.0, -1.0], [0.0, -1.5, 0.0], [0.0, 0.0, 0.0], [0.0, -1.5, -1.0], [0.0, 0.0, 1.0], [-1.5, 0.0, 1.0], [0.0, 1.5, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0, 0.0, -1.0], [0.0, 0.0, 0.0], [-1.0, 0.0, -1.0], [1.0, 0.0, 0.0], [1.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) self._receivers = np.array( [0, 1, 2, 3, 0, 1, 3, 0, 2, 3, 0, 1, 2, 3, 4, 5, 6, 4, 5, 6, 4, 5, 6, 7]) self._senders = np.array( [0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7]) def _get_graphs_tuple(self): """Returns a GraphsTuple containing a graph based on the test system.""" return graphs.GraphsTuple( nodes=tf.constant(self._nodes, dtype=tf.float32), edges=tf.constant(self._edges, dtype=tf.float32), globals=tf.constant(np.array([[0.0]]), dtype=tf.float32), receivers=tf.constant(self._receivers, dtype=tf.int32), senders=tf.constant(self._senders, dtype=tf.int32), n_node=tf.constant([len(self._nodes)], dtype=tf.int32), n_edge=tf.constant([len(self._edges)], dtype=tf.int32)) def test_make_graph_from_static_structure(self): graphs_tuple_op = graph_model.make_graph_from_static_structure( tf.constant(self._positions, dtype=tf.float32), tf.constant(self._types, dtype=tf.int32), tf.constant(self._box, dtype=tf.float32), self._edge_threshold) graphs_tuple = self.evaluate(graphs_tuple_op) self.assertLen(self._nodes, graphs_tuple.n_node) self.assertLen(self._edges, graphs_tuple.n_edge) np.testing.assert_almost_equal(graphs_tuple.nodes, self._nodes) np.testing.assert_equal(graphs_tuple.senders, self._senders) np.testing.assert_equal(graphs_tuple.receivers, self._receivers) np.testing.assert_almost_equal(graphs_tuple.globals, np.array([[0.0]])) np.testing.assert_almost_equal(graphs_tuple.edges, self._edges) def _is_equal_up_to_rotation(self, x, y): for axes in itertools.permutations([0, 1, 2]): for mirrors in itertools.product([1, -1], repeat=3): if np.allclose(x, y[:, axes] * mirrors): return True return False def test_apply_random_rotation(self): graphs_tuple = self._get_graphs_tuple() rotated_graphs_tuple_op = graph_model.apply_random_rotation(graphs_tuple) rotated_graphs_tuple = self.evaluate(rotated_graphs_tuple_op) np.testing.assert_almost_equal(rotated_graphs_tuple.nodes, self._nodes) np.testing.assert_almost_equal(rotated_graphs_tuple.senders, self._senders) np.testing.assert_almost_equal( rotated_graphs_tuple.receivers, self._receivers) np.testing.assert_almost_equal( rotated_graphs_tuple.globals, np.array([[0.0]])) self.assertTrue(self._is_equal_up_to_rotation(rotated_graphs_tuple.edges, self._edges)) @parameterized.named_parameters(('no_propagation', 0, (30,)), ('multi_propagation', 5, (15,)), ('multi_layer', 1, (20, 30))) def test_GraphModel(self, n_recurrences, mlp_sizes): graphs_tuple = self._get_graphs_tuple() output_op = graph_model.GraphBasedModel(n_recurrences=n_recurrences, mlp_sizes=mlp_sizes)(graphs_tuple) self.assertListEqual(output_op.shape.as_list(), [len(self._types)]) # Tests if the model runs without crashing. with self.session(): tf.global_variables_initializer().run() output_op.eval() if __name__ == '__main__': tf.test.main()
deepmind-research-master
glassy_dynamics/graph_model_test.py
# Copyright 2019 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Training pipeline for the prediction of particle mobilities in glasses.""" import collections import enum import pickle from typing import Any, Dict, List, Optional, Text, Tuple, Sequence from absl import logging import numpy as np import tensorflow.compat.v1 as tf import tensorflow_probability as tfp from glassy_dynamics import graph_model tf.enable_resource_variables() LossCollection = collections.namedtuple('LossCollection', 'l1_loss, l2_loss, correlation') GlassSimulationData = collections.namedtuple('GlassSimulationData', 'positions, targets, types, box') class ParticleType(enum.IntEnum): """The simulation contains two particle types, identified as type A and B. The dataset encodes the particle type in an integer. - 0 corresponds to particle type A. - 1 corresponds to particle type B. """ A = 0 B = 1 def get_targets( initial_positions: np.ndarray, trajectory_target_positions: Sequence[np.ndarray]) -> np.ndarray: """Returns the averaged particle mobilities from the sampled trajectories. Args: initial_positions: the initial positions of the particles with shape [n_particles, 3]. trajectory_target_positions: the absolute positions of the particles at the target time for all sampled trajectories, each with shape [n_particles, 3]. """ targets = np.mean([np.linalg.norm(t - initial_positions, axis=-1) for t in trajectory_target_positions], axis=0) return targets.astype(np.float32) def load_data( file_pattern: Text, time_index: int, max_files_to_load: Optional[int] = None) -> List[GlassSimulationData]: """Returns a dictionary containing the training or test dataset. The dictionary contains: `positions`: `np.ndarray` containing the particle positions with shape [n_particles, 3]. `targets`: `np.ndarray` containing particle mobilities with shape [n_particles]. `types`: `np.ndarray` containing the particle types with shape with shape [n_particles]. `box`: `np.ndarray` containing the dimensions of the periodic box with shape [3]. Args: file_pattern: pattern matching the files with the simulation data. time_index: the time index of the targets. max_files_to_load: the maximum number of files to load. """ filenames = tf.io.gfile.glob(file_pattern) if max_files_to_load: filenames = filenames[:max_files_to_load] static_structures = [] for filename in filenames: with tf.io.gfile.GFile(filename, 'rb') as f: data = pickle.load(f) static_structures.append(GlassSimulationData( positions=data['positions'].astype(np.float32), targets=get_targets( data['positions'], data['trajectory_target_positions'][time_index]), types=data['types'].astype(np.int32), box=data['box'].astype(np.float32))) return static_structures def get_loss_ops( prediction: tf.Tensor, target: tf.Tensor, types: tf.Tensor) -> LossCollection: """Returns L1/L2 loss and correlation for type A particles. Args: prediction: tensor with shape [n_particles] containing the predicted particle mobilities. target: tensor with shape [n_particles] containing the true particle mobilities. types: tensor with shape [n_particles] containing the particle types. """ # Considers only type A particles. mask = tf.equal(types, ParticleType.A) prediction = tf.boolean_mask(prediction, mask) target = tf.boolean_mask(target, mask) return LossCollection( l1_loss=tf.reduce_mean(tf.abs(prediction - target)), l2_loss=tf.reduce_mean((prediction - target)**2), correlation=tf.squeeze(tfp.stats.correlation( prediction[:, tf.newaxis], target[:, tf.newaxis]))) def get_minimize_op( loss: tf.Tensor, learning_rate: float, grad_clip: Optional[float] = None) -> tf.Tensor: """Returns minimization operation. Args: loss: the loss tensor which is minimized. learning_rate: the learning rate used by the optimizer. grad_clip: all gradients are clipped to the given value if not None or 0. """ optimizer = tf.train.AdamOptimizer(learning_rate) grads_and_vars = optimizer.compute_gradients(loss) if grad_clip: grads, _ = tf.clip_by_global_norm([g for g, _ in grads_and_vars], grad_clip) grads_and_vars = [(g, pair[1]) for g, pair in zip(grads, grads_and_vars)] minimize = optimizer.apply_gradients(grads_and_vars) return minimize def _log_stats_and_return_mean_correlation( label: Text, stats: Sequence[LossCollection]) -> float: """Logs performance statistics and returns mean correlation. Args: label: label printed before the combined statistics e.g. train or test. stats: statistics calculated for each batch in a dataset. Returns: mean correlation """ for key in LossCollection._fields: values = [getattr(s, key) for s in stats] mean = np.mean(values) std = np.std(values) logging.info('%s: %s: %.4f +/- %.4f', label, key, mean, std) return np.mean([s.correlation for s in stats]) def train_model(train_file_pattern: Text, test_file_pattern: Text, max_files_to_load: Optional[int] = None, n_epochs: int = 1000, time_index: int = 9, augment_data_using_rotations: bool = True, learning_rate: float = 1e-4, grad_clip: Optional[float] = 1.0, n_recurrences: int = 7, mlp_sizes: Tuple[int] = (64, 64), mlp_kwargs: Optional[Dict[Text, Any]] = None, edge_threshold: float = 2.0, measurement_store_interval: int = 1000, checkpoint_path: Optional[Text] = None) -> float: # pytype: disable=annotation-type-mismatch """Trains GraphModel using tensorflow. Args: train_file_pattern: pattern matching the files with the training data. test_file_pattern: pattern matching the files with the test data. max_files_to_load: the maximum number of train and test files to load. If None, all files will be loaded. n_epochs: the number of passes through the training dataset (epochs). time_index: the time index (0-9) of the target mobilities. augment_data_using_rotations: data is augemented by using random rotations. learning_rate: the learning rate used by the optimizer. grad_clip: all gradients are clipped to the given value. n_recurrences: the number of message passing steps in the graphnet. mlp_sizes: the number of neurons in each layer of the MLP. mlp_kwargs: additional keyword aguments passed to the MLP. edge_threshold: particles at distance less than threshold are connected by an edge. measurement_store_interval: number of steps between storing objective values (loss and correlation). checkpoint_path: path used to store the checkpoint with the highest correlation on the test set. Returns: Correlation on the test dataset of best model encountered during training. """ if mlp_kwargs is None: mlp_kwargs = dict(initializers=dict(w=tf.variance_scaling_initializer(1.0), b=tf.variance_scaling_initializer(0.1))) # Loads train and test dataset. dataset_kwargs = dict( time_index=time_index, max_files_to_load=max_files_to_load) training_data = load_data(train_file_pattern, **dataset_kwargs) test_data = load_data(test_file_pattern, **dataset_kwargs) # Defines wrapper functions, which can directly be passed to the # tf.data.Dataset.map function. def _make_graph_from_static_structure(static_structure): """Converts static structure to graph, targets and types.""" return (graph_model.make_graph_from_static_structure( static_structure.positions, static_structure.types, static_structure.box, edge_threshold), static_structure.targets, static_structure.types) def _apply_random_rotation(graph, targets, types): """Applies random rotations to the graph and forwards targets and types.""" return graph_model.apply_random_rotation(graph), targets, types # Defines data-pipeline based on tf.data.Dataset following the official # guideline: https://www.tensorflow.org/guide/datasets#consuming_numpy_arrays. # We use initializable iterators to avoid embedding the training and test data # directly into the graph. # Instead we feed the data to the iterators during the initalization of the # iterators before the main training loop. placeholders = GlassSimulationData._make( tf.placeholder(s.dtype, (None,) + s.shape) for s in training_data[0]) dataset = tf.data.Dataset.from_tensor_slices(placeholders) dataset = dataset.map(_make_graph_from_static_structure) dataset = dataset.cache() dataset = dataset.shuffle(400) # Augments data. This has to be done after calling dataset.cache! if augment_data_using_rotations: dataset = dataset.map(_apply_random_rotation) dataset = dataset.repeat() train_iterator = dataset.make_initializable_iterator() dataset = tf.data.Dataset.from_tensor_slices(placeholders) dataset = dataset.map(_make_graph_from_static_structure) dataset = dataset.cache() dataset = dataset.repeat() test_iterator = dataset.make_initializable_iterator() # Creates tensorflow graph. # Note: We decouple the training and test datasets from the input pipeline # by creating a new iterator from a string-handle placeholder with the same # output types and shapes as the training dataset. dataset_handle = tf.placeholder(tf.string, shape=[]) iterator = tf.data.Iterator.from_string_handle( dataset_handle, train_iterator.output_types, train_iterator.output_shapes) graph, targets, types = iterator.get_next() model = graph_model.GraphBasedModel( n_recurrences, mlp_sizes, mlp_kwargs) prediction = model(graph) # Defines loss and minimization operations. loss_ops = get_loss_ops(prediction, targets, types) minimize_op = get_minimize_op(loss_ops.l2_loss, learning_rate, grad_clip) best_so_far = -1 train_stats = [] test_stats = [] saver = tf.train.Saver() with tf.train.SingularMonitoredSession() as session: # Initializes train and test iterators with the training and test datasets. # The obtained training and test string-handles can be passed to the # dataset_handle placeholder to select the dataset. train_handle = session.run(train_iterator.string_handle()) test_handle = session.run(test_iterator.string_handle()) feed_dict = {p: [x[i] for x in training_data] for i, p in enumerate(placeholders)} session.run(train_iterator.initializer, feed_dict=feed_dict) feed_dict = {p: [x[i] for x in test_data] for i, p in enumerate(placeholders)} session.run(test_iterator.initializer, feed_dict=feed_dict) # Trains model using stochatic gradient descent on the training dataset. n_training_steps = len(training_data) * n_epochs for i in range(n_training_steps): feed_dict = {dataset_handle: train_handle} train_loss, _ = session.run((loss_ops, minimize_op), feed_dict=feed_dict) train_stats.append(train_loss) if (i+1) % measurement_store_interval == 0: # Evaluates model on test dataset. for _ in range(len(test_data)): feed_dict = {dataset_handle: test_handle} test_stats.append(session.run(loss_ops, feed_dict=feed_dict)) # Outputs performance statistics on training and test dataset. _log_stats_and_return_mean_correlation('Train', train_stats) correlation = _log_stats_and_return_mean_correlation('Test', test_stats) train_stats = [] test_stats = [] # Updates best model based on the observed correlation on the test # dataset. if correlation > best_so_far: best_so_far = correlation if checkpoint_path: saver.save(session.raw_session(), checkpoint_path) return best_so_far def apply_model(checkpoint_path: Text, file_pattern: Text, max_files_to_load: Optional[int] = None, time_index: int = 9) -> List[np.ndarray]: """Applies trained GraphModel using tensorflow. Args: checkpoint_path: path from which the model is loaded. file_pattern: pattern matching the files with the data. max_files_to_load: the maximum number of files to load. If None, all files will be loaded. time_index: the time index (0-9) of the target mobilities. Returns: Predictions of the model for all files. """ dataset_kwargs = dict( time_index=time_index, max_files_to_load=max_files_to_load) data = load_data(file_pattern, **dataset_kwargs) tf.reset_default_graph() saver = tf.train.import_meta_graph(checkpoint_path + '.meta') graph = tf.get_default_graph() placeholders = GlassSimulationData( positions=graph.get_tensor_by_name('Placeholder:0'), targets=graph.get_tensor_by_name('Placeholder_1:0'), types=graph.get_tensor_by_name('Placeholder_2:0'), box=graph.get_tensor_by_name('Placeholder_3:0')) prediction_tensor = graph.get_tensor_by_name('Graph_1/Squeeze:0') correlation_tensor = graph.get_tensor_by_name('Squeeze:0') dataset_handle = graph.get_tensor_by_name('Placeholder_4:0') test_initalizer = graph.get_operation_by_name('MakeIterator_1') test_string_handle = graph.get_tensor_by_name('IteratorToStringHandle_1:0') with tf.Session() as session: saver.restore(session, checkpoint_path) handle = session.run(test_string_handle) feed_dict = {p: [x[i] for x in data] for i, p in enumerate(placeholders)} session.run(test_initalizer, feed_dict=feed_dict) predictions = [] correlations = [] for _ in range(len(data)): p, c = session.run((prediction_tensor, correlation_tensor), feed_dict={dataset_handle: handle}) predictions.append(p) correlations.append(c) logging.info('Correlation: %.4f +/- %.4f', np.mean(correlations), np.std(correlations)) return predictions
deepmind-research-master
glassy_dynamics/train.py
# Copyright 2019 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Applies a graph-based network to predict particle mobilities in glasses.""" import os from absl import app from absl import flags from glassy_dynamics import train FLAGS = flags.FLAGS flags.DEFINE_string( 'data_directory', '', 'Directory which contains the train or test datasets.') flags.DEFINE_integer( 'time_index', 9, 'The time index of the target mobilities.') flags.DEFINE_integer( 'max_files_to_load', None, 'The maximum number of files to load.') flags.DEFINE_string( 'checkpoint_path', 'checkpoints/t044_s09.ckpt', 'Path used to load the model.') def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') file_pattern = os.path.join(FLAGS.data_directory, 'aggregated*') train.apply_model( checkpoint_path=FLAGS.checkpoint_path, file_pattern=file_pattern, max_files_to_load=FLAGS.max_files_to_load, time_index=FLAGS.time_index) if __name__ == '__main__': app.run(main)
deepmind-research-master
glassy_dynamics/apply_binary.py
# Lint as: python3 # Copyright 2019 Deepmind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Training pipeline for the prediction of particle mobilities in glasses.""" import enum import functools import logging import pickle import random import haiku as hk import jax import jax.numpy as jnp import jraph import numpy as np import optax # Only used for file operations. # You can use glob.glob and python's open function to replace the tf usage below # on most platforms. import tensorflow.compat.v1 as tf class ParticleType(enum.IntEnum): """The simulation contains two particle types, identified as type A and B. The dataset encodes the particle type in an integer. - 0 corresponds to particle type A. - 1 corresponds to particle type B. """ A = 0 B = 1 def make_graph_from_static_structure(positions, types, box, edge_threshold): """Returns graph representing the static structure of the glass. Each particle is represented by a node in the graph. The particle type is stored as a node feature. Two particles at a distance less than the threshold are connected by an edge. The relative distance vector is stored as an edge feature. Args: positions: particle positions with shape [n_particles, 3]. types: particle types with shape [n_particles]. box: dimensions of the cubic box that contains the particles with shape [3]. edge_threshold: particles at distance less than threshold are connected by an edge. """ # Calculate pairwise relative distances between particles: shape [n, n, 3]. cross_positions = positions[None, :, :] - positions[:, None, :] # Enforces periodic boundary conditions. box_ = box[None, None, :] cross_positions += (cross_positions < -box_ / 2.).astype(np.float32) * box_ cross_positions -= (cross_positions > box_ / 2.).astype(np.float32) * box_ # Calculates adjacency matrix in a sparse format (indices), based on the given # distances and threshold. distances = np.linalg.norm(cross_positions, axis=-1) indices = np.where(distances < edge_threshold) # Defines graph. nodes = types[:, None] senders = indices[0] receivers = indices[1] edges = cross_positions[indices] return jraph.pad_with_graphs(jraph.GraphsTuple( nodes=nodes.astype(np.float32), n_node=np.reshape(nodes.shape[0], [1]), edges=edges.astype(np.float32), n_edge=np.reshape(edges.shape[0], [1]), globals=np.zeros((1, 1), dtype=np.float32), receivers=receivers.astype(np.int32), senders=senders.astype(np.int32) ), n_node=4097, n_edge=200000) def get_targets(initial_positions, trajectory_target_positions): """Returns the averaged particle mobilities from the sampled trajectories. Args: initial_positions: the initial positions of the particles with shape [n_particles, 3]. trajectory_target_positions: the absolute positions of the particles at the target time for all sampled trajectories, each with shape [n_particles, 3]. """ targets = np.mean([np.linalg.norm(t - initial_positions, axis=-1) for t in trajectory_target_positions], axis=0) return targets.astype(np.float32) def load_data(file_pattern, time_index, max_files_to_load=None): """Returns a graphs and targets of the training or test dataset. Args: file_pattern: pattern matching the files with the simulation data. time_index: the time index of the targets. max_files_to_load: the maximum number of files to load. """ filenames = tf.io.gfile.glob(file_pattern) if max_files_to_load: filenames = filenames[:max_files_to_load] graphs_and_targets = [] for filename in filenames: with tf.io.gfile.GFile(filename, 'rb') as f: data = pickle.load(f) mask = (data['types'] == ParticleType.A).astype(np.int32) # Mask dummy node due to padding mask = np.concatenate([mask, np.zeros((1,), dtype=np.int32)], axis=-1) targets = get_targets( data['positions'], data['trajectory_target_positions'][time_index]) targets = np.concatenate( [targets, np.zeros((1,), dtype=np.float32)], axis=-1) graphs_and_targets.append( (make_graph_from_static_structure( data['positions'].astype(np.float32), data['types'].astype(np.int32), data['box'].astype(np.float32), edge_threshold=2.0), targets, mask)) return graphs_and_targets def apply_random_rotation(graph): """Returns randomly rotated graph representation. The rotation is an element of O(3) with rotation angles multiple of pi/2. This function assumes that the relative particle distances are stored in the edge features. Args: graph: The graphs tuple as defined in `graph_nets.graphs`. """ # Transposes edge features, so that the axes are in the first dimension. # Outputs a tensor of shape [3, n_particles]. xyz = np.transpose(graph.edges) # Random pi/2 rotation(s) permutation = np.array([0, 1, 2], dtype=np.int32) np.random.shuffle(permutation) xyz = xyz[permutation] # Random reflections. symmetry = np.random.randint(0, 2, [3]) symmetry = 1 - 2 * np.reshape(symmetry, [3, 1]).astype(np.float32) xyz = xyz * symmetry edges = np.transpose(xyz) return graph._replace(edges=edges) def network_definition(graph): """Defines a graph neural network. Args: graph: Graphstuple the network processes. Returns: Decoded nodes. """ model_fn = functools.partial( hk.nets.MLP, w_init=hk.initializers.VarianceScaling(1.0), b_init=hk.initializers.VarianceScaling(1.0)) mlp_sizes = (64, 64) num_message_passing_steps = 7 node_encoder = model_fn(output_sizes=mlp_sizes, activate_final=True) edge_encoder = model_fn(output_sizes=mlp_sizes, activate_final=True) node_decoder = model_fn(output_sizes=mlp_sizes + (1,), activate_final=False) node_encoding = node_encoder(graph.nodes) edge_encoding = edge_encoder(graph.edges) graph = graph._replace(nodes=node_encoding, edges=edge_encoding) update_edge_fn = jraph.concatenated_args( model_fn(output_sizes=mlp_sizes, activate_final=True)) update_node_fn = jraph.concatenated_args( model_fn(output_sizes=mlp_sizes, activate_final=True)) gn = jraph.InteractionNetwork( update_edge_fn=update_edge_fn, update_node_fn=update_node_fn, include_sent_messages_in_node_update=True) for _ in range(num_message_passing_steps): graph = graph._replace( nodes=jnp.concatenate([graph.nodes, node_encoding], axis=-1), edges=jnp.concatenate([graph.edges, edge_encoding], axis=-1)) graph = gn(graph) return jnp.squeeze(node_decoder(graph.nodes), axis=-1) def train_model(train_file_pattern, test_file_pattern, max_files_to_load=None, n_epochs=1000, time_index=9, learning_rate=1e-4, grad_clip=1.0, measurement_store_interval=1000, checkpoint_path=None): """Trains GraphModel using tensorflow. Args: train_file_pattern: pattern matching the files with the training data. test_file_pattern: pattern matching the files with the test data. max_files_to_load: the maximum number of train and test files to load. If None, all files will be loaded. n_epochs: the number of passes through the training dataset (epochs). time_index: the time index (0-9) of the target mobilities. learning_rate: the learning rate used by the optimizer. grad_clip: all gradients are clipped to the given value. measurement_store_interval: number of steps between storing objective values (loss and correlation). checkpoint_path: ignored by this implementation. """ if checkpoint_path: logging.warning('The checkpoint_path argument is ignored.') random.seed(42) np.random.seed(42) # Loads train and test dataset. dataset_kwargs = dict( time_index=time_index, max_files_to_load=max_files_to_load) logging.info('Load training data') training_data = load_data(train_file_pattern, **dataset_kwargs) logging.info('Load test data') test_data = load_data(test_file_pattern, **dataset_kwargs) logging.info('Finished loading data') network = hk.without_apply_rng(hk.transform(network_definition)) params = network.init(jax.random.PRNGKey(42), training_data[0][0]) opt_init, opt_update = optax.chain( optax.clip_by_global_norm(grad_clip), optax.scale_by_adam(0.9, 0.999, 1e-8), optax.scale(-learning_rate)) opt_state = opt_init(params) network_apply = jax.jit(network.apply) @jax.jit def loss_fn(params, graph, targets, mask): decoded_nodes = network_apply(params, graph) * mask return (jnp.sum((decoded_nodes - targets)**2 * mask) / jnp.sum(mask)) @jax.jit def update(params, opt_state, graph, targets, mask): loss, grads = jax.value_and_grad(loss_fn)(params, graph, targets, mask) updates, opt_state = opt_update(grads, opt_state) return optax.apply_updates(params, updates), opt_state, loss train_stats = [] i = 0 logging.info('Start training') for epoch in range(n_epochs): logging.info('Start epoch %r', epoch) random.shuffle(training_data) for graph, targets, mask in training_data: graph = apply_random_rotation(graph) params, opt_state, loss = update(params, opt_state, graph, targets, mask) train_stats.append(loss) if (i+1) % measurement_store_interval == 0: logging.info('Start evaluation run') test_stats = [] for test_graph, test_targets, test_mask in test_data: predictions = network_apply(params, test_graph) test_stats.append(np.corrcoef( predictions[test_mask == 1], test_targets[test_mask == 1])[0, 1]) logging.info('Train loss %r', np.mean(train_stats)) logging.info('Test correlation %r', np.mean(test_stats)) train_stats = [] i += 1
deepmind-research-master
glassy_dynamics/train_using_jax.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities to compute human-normalized Atari scores. The data used in this module is human and random performance data on Atari-57. It comprises of evaluation scores (undiscounted returns), each averaged over at least 3 episode runs, on each of the 57 Atari games. Each episode begins with the environment already stepped with a uniform random number (between 1 and 30 inclusive) of noop actions. The two agents are: * 'random' (agent choosing its actions uniformly randomly on each step) * 'human' (professional human game tester) Scores are obtained by averaging returns over the episodes played by each agent, with episode length capped to 108,000 frames (i.e. timeout after 30 minutes). The term 'human-normalized' here means a linear per-game transformation of a game score in such a way that 0 corresponds to random performance and 1 corresponds to human performance. """ import math # Game: score-tuple dictionary. Each score tuple contains # 0: score random (float) and 1: score human (float). _ATARI_DATA = { 'alien': (227.8, 7127.7), 'amidar': (5.8, 1719.5), 'assault': (222.4, 742.0), 'asterix': (210.0, 8503.3), 'asteroids': (719.1, 47388.7), 'atlantis': (12850.0, 29028.1), 'bank_heist': (14.2, 753.1), 'battle_zone': (2360.0, 37187.5), 'beam_rider': (363.9, 16926.5), 'berzerk': (123.7, 2630.4), 'bowling': (23.1, 160.7), 'boxing': (0.1, 12.1), 'breakout': (1.7, 30.5), 'centipede': (2090.9, 12017.0), 'chopper_command': (811.0, 7387.8), 'crazy_climber': (10780.5, 35829.4), 'defender': (2874.5, 18688.9), 'demon_attack': (152.1, 1971.0), 'double_dunk': (-18.6, -16.4), 'enduro': (0.0, 860.5), 'fishing_derby': (-91.7, -38.7), 'freeway': (0.0, 29.6), 'frostbite': (65.2, 4334.7), 'gopher': (257.6, 2412.5), 'gravitar': (173.0, 3351.4), 'hero': (1027.0, 30826.4), 'ice_hockey': (-11.2, 0.9), 'jamesbond': (29.0, 302.8), 'kangaroo': (52.0, 3035.0), 'krull': (1598.0, 2665.5), 'kung_fu_master': (258.5, 22736.3), 'montezuma_revenge': (0.0, 4753.3), 'ms_pacman': (307.3, 6951.6), 'name_this_game': (2292.3, 8049.0), 'phoenix': (761.4, 7242.6), 'pitfall': (-229.4, 6463.7), 'pong': (-20.7, 14.6), 'private_eye': (24.9, 69571.3), 'qbert': (163.9, 13455.0), 'riverraid': (1338.5, 17118.0), 'road_runner': (11.5, 7845.0), 'robotank': (2.2, 11.9), 'seaquest': (68.4, 42054.7), 'skiing': (-17098.1, -4336.9), 'solaris': (1236.3, 12326.7), 'space_invaders': (148.0, 1668.7), 'star_gunner': (664.0, 10250.0), 'surround': (-10.0, 6.5), 'tennis': (-23.8, -8.3), 'time_pilot': (3568.0, 5229.2), 'tutankham': (11.4, 167.6), 'up_n_down': (533.4, 11693.2), 'venture': (0.0, 1187.5), # Note the random agent score on Video Pinball is sometimes greater than the # human score under other evaluation methods. 'video_pinball': (16256.9, 17667.9), 'wizard_of_wor': (563.5, 4756.5), 'yars_revenge': (3092.9, 54576.9), 'zaxxon': (32.5, 9173.3), } _RANDOM_COL = 0 _HUMAN_COL = 1 ATARI_GAMES = tuple(sorted(_ATARI_DATA.keys())) def get_human_normalized_score(game: str, raw_score: float) -> float: """Converts game score to human-normalized score.""" game_scores = _ATARI_DATA.get(game, (math.nan, math.nan)) random, human = game_scores[_RANDOM_COL], game_scores[_HUMAN_COL] return (raw_score - random) / (human - random)
deepmind-research-master
tandem_dqn/atari_data.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """DQN agent network components and implementation.""" import typing from typing import Any, Callable, Tuple, Union import chex import haiku as hk import jax import jax.numpy as jnp import numpy as np Network = hk.Transformed Params = hk.Params NetworkFn = Callable[..., Any] class QNetworkOutputs(typing.NamedTuple): q_values: jnp.ndarray class QRNetworkOutputs(typing.NamedTuple): q_values: jnp.ndarray q_dist: jnp.ndarray NUM_QUANTILES = 201 def _dqn_default_initializer( num_input_units: int) -> hk.initializers.Initializer: """Default initialization scheme inherited from past implementations of DQN. This scheme was historically used to initialize all weights and biases in convolutional and linear layers of DQN-type agents' networks. It initializes each weight as an independent uniform sample from [`-c`, `c`], where `c = 1 / np.sqrt(num_input_units)`, and `num_input_units` is the number of input units affecting a single output unit in the given layer, i.e. the total number of inputs in the case of linear (dense) layers, and `num_input_channels * kernel_width * kernel_height` in the case of convolutional layers. Args: num_input_units: number of input units to a single output unit of the layer. Returns: Haiku weight initializer. """ max_val = np.sqrt(1 / num_input_units) return hk.initializers.RandomUniform(-max_val, max_val) def make_quantiles(): """Quantiles for QR-DQN.""" return (jnp.arange(0, NUM_QUANTILES) + 0.5) / float(NUM_QUANTILES) def conv( num_features: int, kernel_shape: Union[int, Tuple[int, int]], stride: Union[int, Tuple[int, int]], name=None, ) -> NetworkFn: """Convolutional layer with DQN's legacy weight initialization scheme.""" def net_fn(inputs): """Function representing conv layer with DQN's legacy initialization.""" num_input_units = inputs.shape[-1] * kernel_shape[0] * kernel_shape[1] initializer = _dqn_default_initializer(num_input_units) layer = hk.Conv2D( num_features, kernel_shape=kernel_shape, stride=stride, w_init=initializer, b_init=initializer, padding='VALID', name=name) return layer(inputs) return net_fn def linear(num_outputs: int, with_bias=True, name=None) -> NetworkFn: """Linear layer with DQN's legacy weight initialization scheme.""" def net_fn(inputs): """Function representing linear layer with DQN's legacy initialization.""" initializer = _dqn_default_initializer(inputs.shape[-1]) layer = hk.Linear( num_outputs, with_bias=with_bias, w_init=initializer, b_init=initializer, name=name) return layer(inputs) return net_fn def linear_with_shared_bias(num_outputs: int, name=None) -> NetworkFn: """Linear layer with single shared bias instead of one bias per output.""" def layer_fn(inputs): """Function representing a linear layer with single shared bias.""" initializer = _dqn_default_initializer(inputs.shape[-1]) bias_free_linear = hk.Linear( num_outputs, with_bias=False, w_init=initializer, name=name) linear_output = bias_free_linear(inputs) bias = hk.get_parameter('b', [1], inputs.dtype, init=initializer) bias = jnp.broadcast_to(bias, linear_output.shape) return linear_output + bias return layer_fn def dqn_torso() -> NetworkFn: """DQN convolutional torso. Includes scaling from [`0`, `255`] (`uint8`) to [`0`, `1`] (`float32`)`. Returns: Network function that `haiku.transform` can be called on. """ def net_fn(inputs): """Function representing convolutional torso for a DQN Q-network.""" network = hk.Sequential([ lambda x: x.astype(jnp.float32) / 255., conv(32, kernel_shape=(8, 8), stride=(4, 4), name='conv1'), jax.nn.relu, conv(64, kernel_shape=(4, 4), stride=(2, 2), name='conv2'), jax.nn.relu, conv(64, kernel_shape=(3, 3), stride=(1, 1), name='conv3'), jax.nn.relu, hk.Flatten(), ]) return network(inputs) return net_fn def dqn_value_head(num_actions: int, shared_bias: bool = False) -> NetworkFn: """Regular DQN Q-value head with single hidden layer.""" last_layer = linear_with_shared_bias if shared_bias else linear def net_fn(inputs): """Function representing value head for a DQN Q-network.""" network = hk.Sequential([ linear(512, name='linear1'), jax.nn.relu, last_layer(num_actions, name='output'), ]) return network(inputs) return net_fn def qr_atari_network(num_actions: int, quantiles: jnp.ndarray) -> NetworkFn: """QR-DQN network, expects `uint8` input.""" chex.assert_rank(quantiles, 1) num_quantiles = len(quantiles) def net_fn(inputs): """Function representing QR-DQN Q-network.""" network = hk.Sequential([ dqn_torso(), dqn_value_head(num_quantiles * num_actions), ]) network_output = network(inputs) q_dist = jnp.reshape(network_output, (-1, num_quantiles, num_actions)) q_values = jnp.mean(q_dist, axis=1) q_values = jax.lax.stop_gradient(q_values) return QRNetworkOutputs(q_dist=q_dist, q_values=q_values) return net_fn def double_dqn_atari_network(num_actions: int) -> NetworkFn: """DQN network with shared bias in final layer, expects `uint8` input.""" def net_fn(inputs): """Function representing DQN Q-network with shared bias output layer.""" network = hk.Sequential([ dqn_torso(), dqn_value_head(num_actions, shared_bias=True), ]) return QNetworkOutputs(q_values=network(inputs)) return net_fn def make_network(network_type: str, num_actions: int) -> Network: """Constructs network.""" if network_type == 'double_q': network_fn = double_dqn_atari_network(num_actions) elif network_type == 'qr': quantiles = make_quantiles() network_fn = qr_atari_network(num_actions, quantiles) else: raise ValueError('Unknown network "{}"'.format(network_type)) return hk.transform(network_fn)
deepmind-research-master
tandem_dqn/networks.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Replay components for DQN-type agents.""" import collections import typing from typing import Any, Callable, Generic, Iterable, List, Mapping, Optional, Sequence, Text, Tuple, TypeVar import dm_env import numpy as np import snappy from tandem_dqn import parts CompressedArray = Tuple[bytes, Tuple, np.dtype] # Generic replay structure: Any flat named tuple. ReplayStructure = TypeVar('ReplayStructure', bound=Tuple[Any, ...]) class Transition(typing.NamedTuple): s_tm1: Optional[np.ndarray] a_tm1: Optional[parts.Action] r_t: Optional[float] discount_t: Optional[float] s_t: Optional[np.ndarray] a_t: Optional[parts.Action] = None mc_return_tm1: Optional[float] = None class TransitionReplay(Generic[ReplayStructure]): """Uniform replay, with circular buffer storage for flat named tuples.""" def __init__(self, capacity: int, structure: ReplayStructure, random_state: np.random.RandomState, encoder: Optional[Callable[[ReplayStructure], Any]] = None, decoder: Optional[Callable[[Any], ReplayStructure]] = None): self._capacity = capacity self._structure = structure self._random_state = random_state self._encoder = encoder or (lambda s: s) self._decoder = decoder or (lambda s: s) self._storage = [None] * capacity self._num_added = 0 def add(self, item: ReplayStructure) -> None: """Adds single item to replay.""" self._storage[self._num_added % self._capacity] = self._encoder(item) self._num_added += 1 def get(self, indices: Sequence[int]) -> List[ReplayStructure]: """Retrieves items by indices.""" return [self._decoder(self._storage[i]) for i in indices] def sample(self, size: int) -> ReplayStructure: """Samples batch of items from replay uniformly, with replacement.""" indices = self._random_state.choice(self.size, size=size, replace=True) samples = self.get(indices) transposed = zip(*samples) stacked = [np.stack(xs, axis=0) for xs in transposed] return type(self._structure)(*stacked) # pytype: disable=not-callable @property def size(self) -> int: """Number of items currently contained in replay.""" return min(self._num_added, self._capacity) @property def capacity(self) -> int: """Total capacity of replay (max number of items stored at any one time).""" return self._capacity def get_state(self) -> Mapping[Text, Any]: """Retrieves replay state as a dictionary (e.g. for serialization).""" return { 'storage': self._storage, 'num_added': self._num_added, } def set_state(self, state: Mapping[Text, Any]) -> None: """Sets replay state from a (potentially de-serialized) dictionary.""" self._storage = state['storage'] self._num_added = state['num_added'] class TransitionAccumulatorWithMCReturn: """Accumulates timesteps to transitions with MC returns.""" def __init__(self): self._transitions = collections.deque() self.reset() def step(self, timestep_t: dm_env.TimeStep, a_t: parts.Action) -> Iterable[Transition]: """Accumulates timestep and resulting action, maybe yields transitions.""" if timestep_t.first(): self.reset() # There are no transitions on the first timestep. if self._timestep_tm1 is None: assert self._a_tm1 is None if not timestep_t.first(): raise ValueError('Expected FIRST timestep, got %s.' % str(timestep_t)) self._timestep_tm1 = timestep_t self._a_tm1 = a_t return # Empty iterable. self._transitions.append( Transition( s_tm1=self._timestep_tm1.observation, a_tm1=self._a_tm1, r_t=timestep_t.reward, discount_t=timestep_t.discount, s_t=timestep_t.observation, a_t=a_t, mc_return_tm1=None, )) self._timestep_tm1 = timestep_t self._a_tm1 = a_t if timestep_t.last(): # Annotate all episode transitions with their MC returns. mc_return = 0 mc_transitions = [] while self._transitions: transition = self._transitions.pop() mc_return = transition.discount_t * mc_return + transition.r_t mc_transitions.append(transition._replace(mc_return_tm1=mc_return)) for transition in reversed(mc_transitions): yield transition else: # Wait for episode end before yielding anything. return def reset(self) -> None: """Resets the accumulator. Following timestep is expected to be FIRST.""" self._transitions.clear() self._timestep_tm1 = None self._a_tm1 = None def compress_array(array: np.ndarray) -> CompressedArray: """Compresses a numpy array with snappy.""" return snappy.compress(array), array.shape, array.dtype def uncompress_array(compressed: CompressedArray) -> np.ndarray: """Uncompresses a numpy array with snappy given its shape and dtype.""" compressed_array, shape, dtype = compressed byte_string = snappy.uncompress(compressed_array) return np.frombuffer(byte_string, dtype=dtype).reshape(shape)
deepmind-research-master
tandem_dqn/replay.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tandem DQN agent class.""" import typing from typing import Any, Callable, Mapping, Set, Text from absl import logging import dm_env import haiku as hk import jax import jax.numpy as jnp import numpy as np import optax import rlax from tandem_dqn import losses from tandem_dqn import parts from tandem_dqn import processors from tandem_dqn import replay as replay_lib class TandemTuple(typing.NamedTuple): active: Any passive: Any def tandem_map(fn: Callable[..., Any], *args): return TandemTuple( active=fn(*[a.active for a in args]), passive=fn(*[a.passive for a in args])) def replace_module_params(source, target, modules): """Replace selected module params in target by corresponding source values.""" source, _ = hk.data_structures.partition( lambda module, name, value: module in modules, source) return hk.data_structures.merge(target, source) class TandemDqn(parts.Agent): """Tandem DQN agent.""" def __init__( self, preprocessor: processors.Processor, sample_network_input: jnp.ndarray, network: TandemTuple, optimizer: TandemTuple, loss: TandemTuple, transition_accumulator: Any, replay: replay_lib.TransitionReplay, batch_size: int, exploration_epsilon: Callable[[int], float], min_replay_capacity_fraction: float, learn_period: int, target_network_update_period: int, tied_layers: Set[str], rng_key: parts.PRNGKey, ): self._preprocessor = preprocessor self._replay = replay self._transition_accumulator = transition_accumulator self._batch_size = batch_size self._exploration_epsilon = exploration_epsilon self._min_replay_capacity = min_replay_capacity_fraction * replay.capacity self._learn_period = learn_period self._target_network_update_period = target_network_update_period # Initialize network parameters and optimizer. self._rng_key, network_rng_key_active, network_rng_key_passive = ( jax.random.split(rng_key, 3)) active_params = network.active.init( network_rng_key_active, sample_network_input[None, ...]) passive_params = network.passive.init( network_rng_key_passive, sample_network_input[None, ...]) self._online_params = TandemTuple( active=active_params, passive=passive_params) self._target_params = self._online_params self._opt_state = tandem_map( lambda optim, params: optim.init(params), optimizer, self._online_params) # Other agent state: last action, frame count, etc. self._action = None self._frame_t = -1 # Current frame index. # Stats. stats = [ 'loss_active', 'loss_passive', 'frac_diff_argmax', 'mc_error_active', 'mc_error_passive', 'mc_error_abs_active', 'mc_error_abs_passive', ] self._statistics = {k: np.nan for k in stats} # Define jitted loss, update, and policy functions here instead of as # class methods, to emphasize that these are meant to be pure functions # and should not access the agent object's state via `self`. def network_outputs(rng_key, online_params, target_params, transitions): """Compute all potentially needed outputs of active and passive net.""" _, *apply_keys = jax.random.split(rng_key, 4) outputs_tm1 = tandem_map( lambda net, param: net.apply(param, apply_keys[0], transitions.s_tm1), network, online_params) outputs_t = tandem_map( lambda net, param: net.apply(param, apply_keys[1], transitions.s_t), network, online_params) outputs_target_t = tandem_map( lambda net, param: net.apply(param, apply_keys[2], transitions.s_t), network, target_params) return outputs_tm1, outputs_t, outputs_target_t # Helper functions to define active and passive losses. # Active and passive losses are allowed to depend on all active and passive # outputs, but stop-gradient is used to prevent gradients from flowing # from active loss to passive network params and vice versa. def sg_active(x): return TandemTuple( active=jax.lax.stop_gradient(x.active), passive=x.passive) def sg_passive(x): return TandemTuple( active=x.active, passive=jax.lax.stop_gradient(x.passive)) def compute_loss(online_params, target_params, transitions, rng_key): rng_key, apply_key = jax.random.split(rng_key) outputs_tm1, outputs_t, outputs_target_t = network_outputs( apply_key, online_params, target_params, transitions) _, loss_key_active, loss_key_passive = jax.random.split(rng_key, 3) loss_active = loss.active( sg_passive(outputs_tm1), sg_passive(outputs_t), outputs_target_t, transitions, loss_key_active) loss_passive = loss.passive( sg_active(outputs_tm1), sg_active(outputs_t), outputs_target_t, transitions, loss_key_passive) # Logging stuff. a_tm1 = transitions.a_tm1 mc_return_tm1 = transitions.mc_return_tm1 q_values = TandemTuple( active=outputs_tm1.active.q_values, passive=outputs_tm1.passive.q_values) mc_error = jax.tree_map( lambda q: losses.batch_mc_learning(q, a_tm1, mc_return_tm1), q_values) mc_error_abs = jax.tree_map(jnp.abs, mc_error) q_argmax = jax.tree_map(lambda q: jnp.argmax(q, axis=-1), q_values) argmax_diff = jnp.not_equal(q_argmax.active, q_argmax.passive) batch_mean = lambda x: jnp.mean(x, axis=0) logs = { 'loss_active': loss_active, 'loss_passive': loss_passive } logs.update(jax.tree_map(batch_mean, { 'frac_diff_argmax': argmax_diff, 'mc_error_active': mc_error.active, 'mc_error_passive': mc_error.passive, 'mc_error_abs_active': mc_error_abs.active, 'mc_error_abs_passive': mc_error_abs.passive, })) return loss_active + loss_passive, logs def optim_update(optim, online_params, d_loss_d_params, opt_state): updates, new_opt_state = optim.update(d_loss_d_params, opt_state) new_online_params = optax.apply_updates(online_params, updates) return new_opt_state, new_online_params def compute_loss_grad(rng_key, online_params, target_params, transitions): rng_key, grad_key = jax.random.split(rng_key) (_, logs), d_loss_d_params = jax.value_and_grad( compute_loss, has_aux=True)( online_params, target_params, transitions, grad_key) return rng_key, logs, d_loss_d_params def update_active(rng_key, opt_state, online_params, target_params, transitions): """Applies learning update for active network only.""" rng_key, logs, d_loss_d_params = compute_loss_grad( rng_key, online_params, target_params, transitions) new_opt_state_active, new_online_params_active = optim_update( optimizer.active, online_params.active, d_loss_d_params.active, opt_state.active) new_opt_state = opt_state._replace( active=new_opt_state_active) new_online_params = online_params._replace( active=new_online_params_active) return rng_key, new_opt_state, new_online_params, logs self._update_active = jax.jit(update_active) def update_passive(rng_key, opt_state, online_params, target_params, transitions): """Applies learning update for passive network only.""" rng_key, logs, d_loss_d_params = compute_loss_grad( rng_key, online_params, target_params, transitions) new_opt_state_passive, new_online_params_passive = optim_update( optimizer.passive, online_params.passive, d_loss_d_params.passive, opt_state.passive) new_opt_state = opt_state._replace( passive=new_opt_state_passive) new_online_params = online_params._replace( passive=new_online_params_passive) return rng_key, new_opt_state, new_online_params, logs self._update_passive = jax.jit(update_passive) def update_active_passive(rng_key, opt_state, online_params, target_params, transitions): """Applies learning update for both active & passive networks.""" rng_key, logs, d_loss_d_params = compute_loss_grad( rng_key, online_params, target_params, transitions) new_opt_state_active, new_online_params_active = optim_update( optimizer.active, online_params.active, d_loss_d_params.active, opt_state.active) new_opt_state_passive, new_online_params_passive = optim_update( optimizer.passive, online_params.passive, d_loss_d_params.passive, opt_state.passive) new_opt_state = TandemTuple(active=new_opt_state_active, passive=new_opt_state_passive) new_online_params = TandemTuple(active=new_online_params_active, passive=new_online_params_passive) return rng_key, new_opt_state, new_online_params, logs self._update_active_passive = jax.jit(update_active_passive) self._update = None # set_training_mode needs to be called to set this. def sync_tied_layers(online_params): """Set tied layer params of passive to respective values of active.""" new_online_params_passive = replace_module_params( source=online_params.active, target=online_params.passive, modules=tied_layers) return online_params._replace(passive=new_online_params_passive) self._sync_tied_layers = jax.jit(sync_tied_layers) def select_action(rng_key, network_params, s_t, exploration_epsilon): """Samples action from eps-greedy policy wrt Q-values at given state.""" rng_key, apply_key, policy_key = jax.random.split(rng_key, 3) q_t = network.active.apply(network_params, apply_key, s_t[None, ...]).q_values[0] a_t = rlax.epsilon_greedy().sample(policy_key, q_t, exploration_epsilon) return rng_key, a_t self._select_action = jax.jit(select_action) def step(self, timestep: dm_env.TimeStep) -> parts.Action: """Selects action given timestep and potentially learns.""" self._frame_t += 1 timestep = self._preprocessor(timestep) if timestep is None: # Repeat action. action = self._action else: action = self._action = self._act(timestep) for transition in self._transition_accumulator.step(timestep, action): self._replay.add(transition) if self._replay.size < self._min_replay_capacity: return action if self._frame_t % self._learn_period == 0: self._learn() if self._frame_t % self._target_network_update_period == 0: self._target_params = self._online_params return action def reset(self) -> None: """Resets the agent's episodic state such as frame stack and action repeat. This method should be called at the beginning of every episode. """ self._transition_accumulator.reset() processors.reset(self._preprocessor) self._action = None def _act(self, timestep) -> parts.Action: """Selects action given timestep, according to epsilon-greedy policy.""" s_t = timestep.observation network_params = self._online_params.active self._rng_key, a_t = self._select_action( self._rng_key, network_params, s_t, self.exploration_epsilon) return parts.Action(jax.device_get(a_t)) def _learn(self) -> None: """Samples a batch of transitions from replay and learns from it.""" logging.log_first_n(logging.INFO, 'Begin learning', 1) transitions = self._replay.sample(self._batch_size) self._rng_key, self._opt_state, self._online_params, logs = self._update( self._rng_key, self._opt_state, self._online_params, self._target_params, transitions, ) self._online_params = self._sync_tied_layers(self._online_params) self._statistics.update(jax.device_get(logs)) def set_training_mode(self, mode: str): """Sets training mode to one of 'active', 'passive', or 'active_passive'.""" if mode == 'active': self._update = self._update_active elif mode == 'passive': self._update = self._update_passive elif mode == 'active_passive': self._update = self._update_active_passive @property def online_params(self) -> TandemTuple: """Returns current parameters of Q-network.""" return self._online_params @property def statistics(self) -> Mapping[Text, float]: """Returns current agent statistics as a dictionary.""" # Check for DeviceArrays in values as this can be very slow. assert all( not isinstance(x, jnp.DeviceArray) for x in self._statistics.values()) return self._statistics @property def exploration_epsilon(self) -> float: """Returns epsilon value currently used by (eps-greedy) behavior policy.""" return self._exploration_epsilon(self._frame_t) def get_state(self) -> Mapping[Text, Any]: """Retrieves agent state as a dictionary (e.g. for serialization).""" state = { 'rng_key': self._rng_key, 'frame_t': self._frame_t, 'opt_state_active': self._opt_state.active, 'online_params_active': self._online_params.active, 'target_params_active': self._target_params.active, 'opt_state_passive': self._opt_state.passive, 'online_params_passive': self._online_params.passive, 'target_params_passive': self._target_params.passive, 'replay': self._replay.get_state(), } return state def set_state(self, state: Mapping[Text, Any]) -> None: """Sets agent state from a (potentially de-serialized) dictionary.""" self._rng_key = state['rng_key'] self._frame_t = state['frame_t'] self._opt_state = TandemTuple( active=jax.device_put(state['opt_state_active']), passive=jax.device_put(state['opt_state_passive'])) self._online_params = TandemTuple( active=jax.device_put(state['online_params_active']), passive=jax.device_put(state['online_params_passive'])) self._target_params = TandemTuple( active=jax.device_put(state['target_params_active']), passive=jax.device_put(state['target_params_passive'])) self._replay.set_state(state['replay'])
deepmind-research-master
tandem_dqn/agent.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Components for DQN.""" import abc import collections import csv import os import timeit from typing import Any, Iterable, Mapping, Optional, Text, Tuple, Union import dm_env import jax import jax.numpy as jnp import numpy as np import rlax from tandem_dqn import networks from tandem_dqn import processors Action = int Network = networks.Network NetworkParams = networks.Params PRNGKey = jnp.ndarray # A size 2 array. class Agent(abc.ABC): """Agent interface.""" @abc.abstractmethod def step(self, timestep: dm_env.TimeStep) -> Action: """Selects action given timestep and potentially learns.""" @abc.abstractmethod def reset(self) -> None: """Resets the agent's episodic state such as frame stack and action repeat. This method should be called at the beginning of every episode. """ @abc.abstractmethod def get_state(self) -> Mapping[Text, Any]: """Retrieves agent state as a dictionary (e.g. for serialization).""" @abc.abstractmethod def set_state(self, state: Mapping[Text, Any]) -> None: """Sets agent state from a (potentially de-serialized) dictionary.""" @property @abc.abstractmethod def statistics(self) -> Mapping[Text, float]: """Returns current agent statistics as a dictionary.""" def run_loop( agent: Agent, environment: dm_env.Environment, max_steps_per_episode: int = 0, yield_before_reset: bool = False, ) -> Iterable[Tuple[dm_env.Environment, Optional[dm_env.TimeStep], Agent, Optional[Action]]]: """Repeatedly alternates step calls on environment and agent. At time `t`, `t + 1` environment timesteps and `t + 1` agent steps have been seen in the current episode. `t` resets to `0` for the next episode. Args: agent: Agent to be run, has methods `step(timestep)` and `reset()`. environment: Environment to run, has methods `step(action)` and `reset()`. max_steps_per_episode: If positive, when time t reaches this value within an episode, the episode is truncated. yield_before_reset: Whether to additionally yield `(environment, None, agent, None)` before the agent and environment is reset at the start of each episode. Yields: Tuple `(environment, timestep_t, agent, a_t)` where `a_t = agent.step(timestep_t)`. """ while True: # For each episode. if yield_before_reset: yield environment, None, agent, None, t = 0 agent.reset() timestep_t = environment.reset() # timestep_0. while True: # For each step in the current episode. a_t = agent.step(timestep_t) yield environment, timestep_t, agent, a_t # Update t after one environment step and agent step and relabel. t += 1 a_tm1 = a_t timestep_t = environment.step(a_tm1) if max_steps_per_episode > 0 and t >= max_steps_per_episode: assert t == max_steps_per_episode timestep_t = timestep_t._replace(step_type=dm_env.StepType.LAST) if timestep_t.last(): unused_a_t = agent.step(timestep_t) # Extra agent step, action ignored. yield environment, timestep_t, agent, None break def generate_statistics( trackers: Iterable[Any], timestep_action_sequence: Iterable[Tuple[dm_env.Environment, Optional[dm_env.TimeStep], Agent, Optional[Action]]] ) -> Mapping[Text, Any]: """Generates statistics from a sequence of timestep and actions.""" # Only reset at the start, not between episodes. for tracker in trackers: tracker.reset() for environment, timestep_t, agent, a_t in timestep_action_sequence: for tracker in trackers: tracker.step(environment, timestep_t, agent, a_t) # Merge all statistics dictionaries into one. statistics_dicts = (tracker.get() for tracker in trackers) return dict(collections.ChainMap(*statistics_dicts)) class EpisodeTracker: """Tracks episode return and other statistics.""" def __init__(self): self._num_steps_since_reset = None self._num_steps_over_episodes = None self._episode_returns = None self._current_episode_rewards = None self._current_episode_step = None def step( self, environment: Optional[dm_env.Environment], timestep_t: dm_env.TimeStep, agent: Optional[Agent], a_t: Optional[Action], ) -> None: """Accumulates statistics from timestep.""" del (environment, agent, a_t) if timestep_t.first(): if self._current_episode_rewards: raise ValueError('Current episode reward list should be empty.') if self._current_episode_step != 0: raise ValueError('Current episode step should be zero.') else: # First reward is invalid, all other rewards are appended. self._current_episode_rewards.append(timestep_t.reward) self._num_steps_since_reset += 1 self._current_episode_step += 1 if timestep_t.last(): self._episode_returns.append(sum(self._current_episode_rewards)) self._current_episode_rewards = [] self._num_steps_over_episodes += self._current_episode_step self._current_episode_step = 0 def reset(self) -> None: """Resets all gathered statistics, not to be called between episodes.""" self._num_steps_since_reset = 0 self._num_steps_over_episodes = 0 self._episode_returns = [] self._current_episode_step = 0 self._current_episode_rewards = [] def get(self) -> Mapping[Text, Union[int, float, None]]: """Aggregates statistics and returns as a dictionary. Here the convention is `episode_return` is set to `current_episode_return` if a full episode has not been encountered. Otherwise it is set to `mean_episode_return` which is the mean return of complete episodes only. If no steps have been taken at all, `episode_return` is set to `NaN`. Returns: A dictionary of aggregated statistics. """ if self._episode_returns: mean_episode_return = np.array(self._episode_returns).mean() current_episode_return = sum(self._current_episode_rewards) episode_return = mean_episode_return else: mean_episode_return = np.nan if self._num_steps_since_reset > 0: current_episode_return = sum(self._current_episode_rewards) else: current_episode_return = np.nan episode_return = current_episode_return return { 'mean_episode_return': mean_episode_return, 'current_episode_return': current_episode_return, 'episode_return': episode_return, 'num_episodes': len(self._episode_returns), 'num_steps_over_episodes': self._num_steps_over_episodes, 'current_episode_step': self._current_episode_step, 'num_steps_since_reset': self._num_steps_since_reset, } class StepRateTracker: """Tracks step rate, number of steps taken and duration since last reset.""" def __init__(self): self._num_steps_since_reset = None self._start = None def step( self, environment: Optional[dm_env.Environment], timestep_t: Optional[dm_env.TimeStep], agent: Optional[Agent], a_t: Optional[Action], ) -> None: del (environment, timestep_t, agent, a_t) self._num_steps_since_reset += 1 def reset(self) -> None: self._num_steps_since_reset = 0 self._start = timeit.default_timer() def get(self) -> Mapping[Text, float]: duration = timeit.default_timer() - self._start if self._num_steps_since_reset > 0: step_rate = self._num_steps_since_reset / duration else: step_rate = np.nan return { 'step_rate': step_rate, 'num_steps': self._num_steps_since_reset, 'duration': duration, } class UnbiasedExponentialWeightedAverageAgentTracker: """'Unbiased Constant-Step-Size Trick' from the Sutton and Barto RL book.""" def __init__(self, step_size: float, initial_agent: Agent): self._initial_statistics = dict(initial_agent.statistics) self._step_size = step_size self.trace = 0. self._statistics = dict(self._initial_statistics) def step( self, environment: Optional[dm_env.Environment], timestep_t: Optional[dm_env.TimeStep], agent: Agent, a_t: Optional[Action], ) -> None: """Accumulates agent statistics.""" del (environment, timestep_t, a_t) self.trace = (1 - self._step_size) * self.trace + self._step_size final_step_size = self._step_size / self.trace assert 0 <= final_step_size <= 1 if final_step_size == 1: # Since the self._initial_statistics is likely to be NaN and # 0 * NaN == NaN just replace self._statistics on the first step. self._statistics = dict(agent.statistics) else: self._statistics = jax.tree_multimap( lambda s, x: (1 - final_step_size) * s + final_step_size * x, self._statistics, agent.statistics) def reset(self) -> None: """Resets statistics and internal state.""" self.trace = 0. # get() may be called before step() so ensure statistics are initialized. self._statistics = dict(self._initial_statistics) def get(self) -> Mapping[Text, float]: """Returns current accumulated statistics.""" return self._statistics def make_default_trackers(initial_agent: Agent): return [ EpisodeTracker(), StepRateTracker(), UnbiasedExponentialWeightedAverageAgentTracker( step_size=1e-3, initial_agent=initial_agent), ] class EpsilonGreedyActor(Agent): """Agent that acts with a given set of Q-network parameters and epsilon. Network parameters are set on the actor. The actor can be serialized, ensuring determinism of execution (e.g. when checkpointing). """ def __init__( self, preprocessor: processors.Processor, network: Network, exploration_epsilon: float, rng_key: PRNGKey, ): self._preprocessor = preprocessor self._rng_key = rng_key self._action = None self.network_params = None # Nest of arrays (haiku.Params), set externally. def select_action(rng_key, network_params, s_t): """Samples action from eps-greedy policy wrt Q-values at given state.""" rng_key, apply_key, policy_key = jax.random.split(rng_key, 3) q_t = network.apply(network_params, apply_key, s_t[None, ...]).q_values[0] a_t = rlax.epsilon_greedy().sample(policy_key, q_t, exploration_epsilon) return rng_key, a_t self._select_action = jax.jit(select_action) def step(self, timestep: dm_env.TimeStep) -> Action: """Selects action given a timestep.""" timestep = self._preprocessor(timestep) if timestep is None: # Repeat action. return self._action s_t = timestep.observation self._rng_key, a_t = self._select_action(self._rng_key, self.network_params, s_t) self._action = Action(jax.device_get(a_t)) return self._action def reset(self) -> None: """Resets the agent's episodic state such as frame stack and action repeat. This method should be called at the beginning of every episode. """ processors.reset(self._preprocessor) self._action = None def get_state(self) -> Mapping[Text, Any]: """Retrieves agent state as a dictionary (e.g. for serialization).""" # State contains network params to make agent easy to run from a checkpoint. return { 'rng_key': self._rng_key, 'network_params': self.network_params, } def set_state(self, state: Mapping[Text, Any]) -> None: """Sets agent state from a (potentially de-serialized) dictionary.""" self._rng_key = state['rng_key'] self.network_params = state['network_params'] @property def statistics(self) -> Mapping[Text, float]: return {} class LinearSchedule: """Linear schedule, used for exploration epsilon in DQN agents.""" def __init__(self, begin_value, end_value, begin_t, end_t=None, decay_steps=None): if (end_t is None) == (decay_steps is None): raise ValueError('Exactly one of end_t, decay_steps must be provided.') self._decay_steps = decay_steps if end_t is None else end_t - begin_t self._begin_t = begin_t self._begin_value = begin_value self._end_value = end_value def __call__(self, t): """Implements a linear transition from a begin to an end value.""" frac = min(max(t - self._begin_t, 0), self._decay_steps) / self._decay_steps return (1 - frac) * self._begin_value + frac * self._end_value class NullWriter: """A placeholder logging object that does nothing.""" def write(self, *args, **kwargs) -> None: pass def close(self) -> None: pass class CsvWriter: """A logging object writing to a CSV file. Each `write()` takes a `OrderedDict`, creating one column in the CSV file for each dictionary key on the first call. Successive calls to `write()` must contain the same dictionary keys. """ def __init__(self, fname: Text): """Initializes a `CsvWriter`. Args: fname: File name (path) for file to be written to. """ dirname = os.path.dirname(fname) if not os.path.exists(dirname): os.makedirs(dirname) self._fname = fname self._header_written = False self._fieldnames = None def write(self, values: collections.OrderedDict) -> None: """Appends given values as new row to CSV file.""" if self._fieldnames is None: self._fieldnames = values.keys() # Open a file in 'append' mode, so we can continue logging safely to the # same file after e.g. restarting from a checkpoint. with open(self._fname, 'a') as file: # Always use same fieldnames to create writer, this way a consistency # check is performed automatically on each write. writer = csv.DictWriter(file, fieldnames=self._fieldnames) # Write a header if this is the very first write. if not self._header_written: writer.writeheader() self._header_written = True writer.writerow(values) def close(self) -> None: """Closes the `CsvWriter`.""" pass def get_state(self) -> Mapping[Text, Any]: """Retrieves `CsvWriter` state as a `dict` (e.g. for serialization).""" return { 'header_written': self._header_written, 'fieldnames': self._fieldnames } def set_state(self, state: Mapping[Text, Any]) -> None: """Sets `CsvWriter` state from a (potentially de-serialized) dictionary.""" self._header_written = state['header_written'] self._fieldnames = state['fieldnames'] class NullCheckpoint: """A placeholder checkpointing object that does nothing. Can be used as a substitute for an actual checkpointing object when checkpointing is disabled. """ def __init__(self): self.state = AttributeDict() def save(self) -> None: pass def can_be_restored(self) -> bool: return False def restore(self) -> None: pass class AttributeDict(dict): """A `dict` that supports getting, setting, deleting keys via attributes.""" def __getattr__(self, key): return self[key] def __setattr__(self, key, value): self[key] = value def __delattr__(self, key): del self[key]
deepmind-research-master
tandem_dqn/parts.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Losses for TandemDQN.""" from typing import Any, Callable import chex import jax import jax.numpy as jnp import rlax from tandem_dqn import networks # Batch variants of double_q_learning and SARSA. batch_double_q_learning = jax.vmap(rlax.double_q_learning) batch_sarsa_learning = jax.vmap(rlax.sarsa) # Batch variant of quantile_q_learning with fixed tau input across batch. batch_quantile_q_learning = jax.vmap( rlax.quantile_q_learning, in_axes=(0, None, 0, 0, 0, 0, 0, None)) def _mc_learning( q_tm1: chex.Array, a_tm1: chex.Numeric, mc_return_tm1: chex.Array, ) -> chex.Numeric: """Calculates the MC return error.""" chex.assert_rank([q_tm1, a_tm1], [1, 0]) chex.assert_type([q_tm1, a_tm1], [float, int]) return mc_return_tm1 - q_tm1[a_tm1] # Batch variant of MC learning. batch_mc_learning = jax.vmap(_mc_learning) def _qr_loss(q_tm1, q_t, q_target_t, transitions, rng_key): """Calculates QR-Learning loss from network outputs and transitions.""" del q_t, rng_key # Unused. # Compute Q value distributions. huber_param = 1. quantiles = networks.make_quantiles() losses = batch_quantile_q_learning( q_tm1.q_dist, quantiles, transitions.a_tm1, transitions.r_t, transitions.discount_t, q_target_t.q_dist, # No double Q-learning here. q_target_t.q_dist, huber_param, ) loss = jnp.mean(losses) return loss def _sarsa_loss(q_tm1, q_t, transitions, rng_key): """Calculates SARSA loss from network outputs and transitions.""" del rng_key # Unused. grad_error_bound = 1. / 32 td_errors = batch_sarsa_learning( q_tm1.q_values, transitions.a_tm1, transitions.r_t, transitions.discount_t, q_t.q_values, transitions.a_t ) td_errors = rlax.clip_gradient(td_errors, -grad_error_bound, grad_error_bound) losses = rlax.l2_loss(td_errors) loss = jnp.mean(losses) return loss def _mc_loss(q_tm1, transitions, rng_key): """Calculates Monte-Carlo return loss, i.e. regression towards MC return.""" del rng_key # Unused. errors = batch_mc_learning(q_tm1.q_values, transitions.a_tm1, transitions.mc_return_tm1) loss = jnp.mean(rlax.l2_loss(errors)) return loss def _double_q_loss(q_tm1, q_t, q_target_t, transitions, rng_key): """Calculates Double Q-Learning loss from network outputs and transitions.""" del rng_key # Unused. grad_error_bound = 1. / 32 td_errors = batch_double_q_learning( q_tm1.q_values, transitions.a_tm1, transitions.r_t, transitions.discount_t, q_target_t.q_values, q_t.q_values, ) td_errors = rlax.clip_gradient(td_errors, -grad_error_bound, grad_error_bound) losses = rlax.l2_loss(td_errors) loss = jnp.mean(losses) return loss def _q_regression_loss(q_tm1, q_tm1_target): """Loss for regression of all action values towards targets.""" errors = q_tm1.q_values - jax.lax.stop_gradient(q_tm1_target.q_values) loss = jnp.mean(rlax.l2_loss(errors)) return loss def make_loss_fn(loss_type: str, active: bool) -> Callable[..., Any]: """Create active or passive loss function of given type.""" if active: primary = lambda x: x.active secondary = lambda x: x.passive else: primary = lambda x: x.passive secondary = lambda x: x.active def sarsa_loss_fn(q_tm1, q_t, q_target_t, transitions, rng_key): """SARSA loss using own networks.""" del q_t # Unused. return _sarsa_loss(primary(q_tm1), primary(q_target_t), transitions, rng_key) def mc_loss_fn(q_tm1, q_t, q_target_t, transitions, rng_key): """MonteCarlo loss.""" del q_t, q_target_t return _mc_loss(primary(q_tm1), transitions, rng_key) def double_q_loss_fn(q_tm1, q_t, q_target_t, transitions, rng_key): """Regular DoubleQ loss using own networks.""" return _double_q_loss(primary(q_tm1), primary(q_t), primary(q_target_t), transitions, rng_key) def double_q_loss_v_fn(q_tm1, q_t, q_target_t, transitions, rng_key): """DoubleQ loss using other network's (target) value function.""" return _double_q_loss(primary(q_tm1), primary(q_t), secondary(q_target_t), transitions, rng_key) def double_q_loss_p_fn(q_tm1, q_t, q_target_t, transitions, rng_key): """DoubleQ loss using other network's (online) argmax policy.""" return _double_q_loss(primary(q_tm1), secondary(q_t), primary(q_target_t), transitions, rng_key) def double_q_loss_pv_fn(q_tm1, q_t, q_target_t, transitions, rng_key): """DoubleQ loss using other network's argmax policy & target value fn.""" return _double_q_loss(primary(q_tm1), secondary(q_t), secondary(q_target_t), transitions, rng_key) # Pure regression. def q_regression_loss_fn(q_tm1, q_t, q_target_t, transitions, rng_key): """Pure regression of q_tm1(self) towards q_tm1(other).""" del q_t, q_target_t, transitions, rng_key # Unused. return _q_regression_loss(primary(q_tm1), secondary(q_tm1)) # QR loss. def qr_loss_fn(q_tm1, q_t, q_target_t, transitions, rng_key): """QR-Q loss using own networks.""" return _qr_loss(primary(q_tm1), primary(q_t), primary(q_target_t), transitions, rng_key) if loss_type == 'double_q': return double_q_loss_fn elif loss_type == 'sarsa': return sarsa_loss_fn elif loss_type == 'mc_return': return mc_loss_fn elif loss_type == 'double_q_v': return double_q_loss_v_fn elif loss_type == 'double_q_p': return double_q_loss_p_fn elif loss_type == 'double_q_pv': return double_q_loss_pv_fn elif loss_type == 'q_regression': return q_regression_loss_fn elif loss_type == 'qr': return qr_loss_fn else: raise ValueError('Unknown loss "{}"'.format(loss_type))
deepmind-research-master
tandem_dqn/losses.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """dm_env environment wrapper around Gym Atari configured to be like Xitari. Gym Atari is built on the Arcade Learning Environment (ALE), whereas Xitari is an old fork of the ALE. """ # pylint: disable=g-bad-import-order from typing import Optional, Tuple import atari_py # pylint: disable=unused-import for gym to load Atari games. import dm_env from dm_env import specs import gym import numpy as np from tandem_dqn import atari_data _GYM_ID_SUFFIX = '-xitari-v1' _SA_SUFFIX = '-sa' def _game_id(game, sticky_actions): return game + (_SA_SUFFIX if sticky_actions else '') + _GYM_ID_SUFFIX def _register_atari_environments(): """Registers Atari environments in Gym to be as similar to Xitari as possible. Main difference from PongNoFrameSkip-v4, etc. is max_episode_steps is unset and only the usual 57 Atari games are registered. Additionally, sticky-actions variants of the environments are registered with an '-sa' suffix. """ for sticky_actions in [False, True]: for game in atari_data.ATARI_GAMES: repeat_action_probability = 0.25 if sticky_actions else 0.0 gym.envs.registration.register( id=_game_id(game, sticky_actions), entry_point='gym.envs.atari:AtariEnv', kwargs={ # Explicitly set all known arguments. 'game': game, 'mode': None, # Not necessarily the same as 0. 'difficulty': None, # Not necessarily the same as 0. 'obs_type': 'image', 'frameskip': 1, # Get every frame. 'repeat_action_probability': repeat_action_probability, 'full_action_space': False, }, max_episode_steps=None, # No time limit, handled in run loop. nondeterministic=False, # Xitari is deterministic. ) _register_atari_environments() class GymAtari(dm_env.Environment): """Gym Atari with a `dm_env.Environment` interface.""" def __init__(self, game, sticky_actions, seed): self._gym_env = gym.make(_game_id(game, sticky_actions)) self._gym_env.seed(seed) self._start_of_episode = True def reset(self) -> dm_env.TimeStep: """Resets the environment and starts a new episode.""" observation = self._gym_env.reset() lives = np.int32(self._gym_env.ale.lives()) timestep = dm_env.restart((observation, lives)) self._start_of_episode = False return timestep def step(self, action: np.int32) -> dm_env.TimeStep: """Updates the environment given an action and returns a timestep.""" # If the previous timestep was LAST then we call reset() on the Gym # environment, otherwise step(). Although Gym environments allow you to step # through episode boundaries (similar to dm_env) they emit a warning. if self._start_of_episode: step_type = dm_env.StepType.FIRST observation = self._gym_env.reset() discount = None reward = None done = False else: observation, reward, done, info = self._gym_env.step(action) if done: assert 'TimeLimit.truncated' not in info, 'Should never truncate.' step_type = dm_env.StepType.LAST discount = 0. else: step_type = dm_env.StepType.MID discount = 1. lives = np.int32(self._gym_env.ale.lives()) timestep = dm_env.TimeStep( step_type=step_type, observation=(observation, lives), reward=reward, discount=discount, ) self._start_of_episode = done return timestep def observation_spec(self) -> Tuple[specs.Array, specs.Array]: space = self._gym_env.observation_space return (specs.Array(shape=space.shape, dtype=space.dtype, name='rgb'), specs.Array(shape=(), dtype=np.int32, name='lives')) def action_spec(self) -> specs.DiscreteArray: space = self._gym_env.action_space return specs.DiscreteArray( num_values=space.n, dtype=np.int32, name='action') def close(self): self._gym_env.close() class RandomNoopsEnvironmentWrapper(dm_env.Environment): """Adds a random number of noop actions at the beginning of each episode.""" def __init__(self, environment: dm_env.Environment, max_noop_steps: int, min_noop_steps: int = 0, noop_action: int = 0, seed: Optional[int] = None): """Initializes the random noops environment wrapper.""" self._environment = environment if max_noop_steps < min_noop_steps: raise ValueError('max_noop_steps must be greater or equal min_noop_steps') self._min_noop_steps = min_noop_steps self._max_noop_steps = max_noop_steps self._noop_action = noop_action self._rng = np.random.RandomState(seed) def reset(self): """Begins new episode. This method resets the wrapped environment and applies a random number of noop actions before returning the last resulting observation as the first episode timestep. Intermediate timesteps emitted by the inner environment (including all rewards and discounts) are discarded. Returns: First episode timestep corresponding to the timestep after a random number of noop actions are applied to the inner environment. Raises: RuntimeError: if an episode end occurs while the inner environment is being stepped through with the noop action. """ return self._apply_random_noops(initial_timestep=self._environment.reset()) def step(self, action): """Steps environment given action. If beginning a new episode then random noops are applied as in `reset()`. Args: action: action to pass to environment conforming to action spec. Returns: `Timestep` from the inner environment unless beginning a new episode, in which case this is the timestep after a random number of noop actions are applied to the inner environment. """ timestep = self._environment.step(action) if timestep.first(): return self._apply_random_noops(initial_timestep=timestep) else: return timestep def _apply_random_noops(self, initial_timestep): assert initial_timestep.first() num_steps = self._rng.randint(self._min_noop_steps, self._max_noop_steps + 1) timestep = initial_timestep for _ in range(num_steps): timestep = self._environment.step(self._noop_action) if timestep.last(): raise RuntimeError('Episode ended while applying %s noop actions.' % num_steps) # We make sure to return a FIRST timestep, i.e. discard rewards & discounts. return dm_env.restart(timestep.observation) ## All methods except for reset and step redirect to the underlying env. def observation_spec(self): return self._environment.observation_spec() def action_spec(self): return self._environment.action_spec() def reward_spec(self): return self._environment.reward_spec() def discount_spec(self): return self._environment.discount_spec() def close(self): return self._environment.close()
deepmind-research-master
tandem_dqn/gym_atari.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Composable timestep processing, for DQN Atari preprocessing. Aims: * Be self-contained. * Easy to have the preprocessing on the agent side or on the environment side. * Easy to swap out and modify parts of the processing. Conventions: * The term "processor" is used to refer to any callable that could also have a `reset()` function to clear any internal state. E.g. a plain function. Or an instance of a class with `__call__` method, with or without a `reset()` method. * `None` means no output when subsampling inputs. """ import collections from typing import Any, Callable, List, Iterable, Optional, Sequence, Text, Tuple import dm_env from dm_env import specs import numpy as np from PIL import Image Processor = Callable # Actually a callable that may also have a reset() method. Nest = Any # Recursive types are not yet supported by pytype. NamedTuple = Any StepType = dm_env.StepType def reset(processor: Processor[[Any], Any]) -> None: """Calls `reset()` on a `Processor` or function if the method exists.""" if hasattr(processor, 'reset'): processor.reset() identity = lambda v: v def trailing_zero_pad( length: int) -> Processor[[List[np.ndarray]], List[np.ndarray]]: """Adds trailing zero padding to array lists to ensure a minimum length.""" def trailing_zero_pad_fn(arrays): padding_length = length - len(arrays) if padding_length <= 0: return arrays zero = np.zeros_like(arrays[0]) return arrays + [zero] * padding_length return trailing_zero_pad_fn def none_to_zero_pad(values: List[Optional[NamedTuple]]) -> List[NamedTuple]: """Replaces `None`s in a list of named tuples with zeros of same structure.""" actual_values = [n for n in values if n is not None] if not actual_values: raise ValueError('Must have at least one value which is not None.') if len(actual_values) == len(values): return values example = actual_values[0] zero = type(example)(*(np.zeros_like(x) for x in example)) return [zero if v is None else v for v in values] def named_tuple_sequence_stack(values: Sequence[NamedTuple]) -> NamedTuple: """Converts a sequence of named tuples into a named tuple of tuples.""" # [T(1, 2), T(3, 4), T(5, 6)]. transposed = zip(*values) # ((1, 3, 5), (2, 4, 6)). return type(values[0])(*transposed) # T((1, 3, 5), (2, 4, 6)). class Deque: """Double ended queue with a maximum length and initial values.""" def __init__(self, max_length: int, initial_values=None): self._deque = collections.deque(maxlen=max_length) self._initial_values = initial_values or [] def reset(self) -> None: self._deque.clear() self._deque.extend(self._initial_values) def __call__(self, value: Any) -> collections.deque: self._deque.append(value) return self._deque class FixedPaddedBuffer: """Fixed size `None`-padded buffer which is cleared after it is filled. E.g. with `length = 3`, `initial_index = 2` and values `[0, 1, 2, 3, 4, 5, 6]` this will return `~~0`, `1~~`, `12~`, `123`, `4~~`, `45~`, `456`, where `~` represents `None`. Used to concatenate timesteps for action repeats. Action repeat requirements are: * Fixed size buffer of timesteps. * The `FIRST` timestep should return immediately to get the first action of the episode, as there is no preceding action to repeat. Prefix with padding. * For `MID` timesteps, the timestep buffer is periodically returned when full. * When a `LAST` timestep is encountered, the current buffer of timesteps is returned, suffixed with padding, as buffers should not cross episode boundaries. The requirements can be fulfilled by conditionally subsampling the output of this processor. """ def __init__(self, length: int, initial_index: int): self._length = length self._initial_index = initial_index % length self._index = self._initial_index self._buffer = [None] * self._length def reset(self) -> None: self._index = self._initial_index self._buffer = [None] * self._length def __call__(self, value: Any) -> Sequence[Any]: if self._index >= self._length: assert self._index == self._length self._index = 0 self._buffer = [None] * self._length self._buffer[self._index] = value self._index += 1 return self._buffer class ConditionallySubsample: """Conditionally passes through input, returning `None` otherwise.""" def __init__(self, condition: Processor[[Any], bool]): self._condition = condition def reset(self) -> None: reset(self._condition) def __call__(self, value: Any) -> Optional[Any]: return value if self._condition(value) else None class TimestepBufferCondition: """Returns `True` when an iterable of timesteps should be passed on. Specifically returns `True`: * If timesteps contain a `FIRST`. * If timesteps contain a `LAST`. * If number of steps passed since `FIRST` timestep modulo `period` is `0`. Returns `False` otherwise. Used for action repeats in Atari preprocessing. """ def __init__(self, period: int): self._period = period self._steps_since_first_timestep = None self._should_reset = False def reset(self): self._should_reset = False self._steps_since_first_timestep = None def __call__(self, timesteps: Iterable[dm_env.TimeStep]) -> bool: if self._should_reset: raise RuntimeError('Should have reset.') # Find the main step type, FIRST and LAST take precedence over MID. main_step_type = StepType.MID precedent_step_types = (StepType.FIRST, StepType.LAST) for timestep in timesteps: if timestep is None: continue if timestep.step_type in precedent_step_types: if main_step_type in precedent_step_types: raise RuntimeError('Expected at most one FIRST or LAST.') main_step_type = timestep.step_type # Must have FIRST timestep after a reset. if self._steps_since_first_timestep is None: if main_step_type != StepType.FIRST: raise RuntimeError('After reset first timestep should be FIRST.') # pytype: disable=unsupported-operands if main_step_type == StepType.FIRST: self._steps_since_first_timestep = 0 return True elif main_step_type == StepType.LAST: self._steps_since_first_timestep = None self._should_reset = True return True elif (self._steps_since_first_timestep + 1) % self._period == 0: self._steps_since_first_timestep += 1 return True else: self._steps_since_first_timestep += 1 return False # pytype: enable=unsupported-operands class ApplyToNamedTupleField: """Runs processors on a particular field of a named tuple.""" def __init__(self, field: Text, *processors: Processor[[Any], Any]): self._field = field self._processors = processors def reset(self) -> None: for processor in self._processors: reset(processor) def __call__(self, value: NamedTuple) -> NamedTuple: attr_value = getattr(value, self._field) for processor in self._processors: attr_value = processor(attr_value) return value._replace(**{self._field: attr_value}) class Maybe: """Wraps another processor so that `None` is returned when `None` is input.""" def __init__(self, processor: Processor[[Any], Any]): self._processor = processor def reset(self) -> None: reset(self._processor) def __call__(self, value: Optional[Any]) -> Optional[Any]: if value is None: return None else: return self._processor(value) class Sequential: """Chains together multiple processors.""" def __init__(self, *processors: Processor[[Any], Any]): self._processors = processors def reset(self) -> None: for processor in self._processors: reset(processor) def __call__(self, value: Any) -> Any: for processor in self._processors: value = processor(value) return value class ZeroDiscountOnLifeLoss: """Sets discount to zero on timestep if number of lives has decreased. This processor assumes observations to be tuples whose second entry is a scalar indicating the remaining number of lives. """ def __init__(self): self._num_lives_on_prev_step = None def reset(self) -> None: self._num_lives_on_prev_step = None def __call__(self, timestep: dm_env.TimeStep) -> dm_env.TimeStep: # We have a life loss when the timestep is a regular transition and lives # have decreased since the previous timestep. num_lives = timestep.observation[1] life_lost = timestep.mid() and (num_lives < self._num_lives_on_prev_step) self._num_lives_on_prev_step = num_lives return timestep._replace(discount=0.) if life_lost else timestep def reduce_step_type(step_types: Sequence[StepType], debug: bool = False) -> StepType: """Outputs a representative step type from an array of step types.""" # Zero padding will appear to be FIRST. Padding should only be seen before the # FIRST (e.g. 000F) or after LAST (e.g. ML00). if debug: np_step_types = np.array(step_types) output_step_type = StepType.MID for i, step_type in enumerate(step_types): if step_type == 0: # step_type not actually FIRST, but we do expect 000F. if debug and not (np_step_types == 0).all(): raise ValueError('Expected zero padding followed by FIRST.') output_step_type = StepType.FIRST break elif step_type == StepType.LAST: output_step_type = StepType.LAST if debug and not (np_step_types[i + 1:] == 0).all(): raise ValueError('Expected LAST to be followed by zero padding.') break else: if step_type != StepType.MID: raise ValueError('Expected MID if not FIRST or LAST.') return output_step_type def aggregate_rewards(rewards: Sequence[Optional[float]], debug: bool = False) -> Optional[float]: """Sums up rewards, assumes discount is 1.""" if None in rewards: if debug: np_rewards = np.array(rewards) if not (np_rewards[-1] is None and (np_rewards[:-1] == 0).all()): # Should only ever have [0, 0, 0, None] due to zero padding. raise ValueError('Should only have a None reward for FIRST.') return None else: # Faster than np.sum for a list of floats. return sum(rewards) def aggregate_discounts(discounts: Sequence[Optional[float]], debug: bool = False) -> Optional[float]: """Aggregates array of discounts into a scalar, expects `0`, `1` or `None`.""" if debug: np_discounts = np.array(discounts) if not np.isin(np_discounts, [0., 1., None]).all(): raise ValueError('All discounts should be 0 or 1, got: %s.' % np_discounts) if None in discounts: if debug: if not (np_discounts[-1] is None and (np_discounts[:-1] == 0).all()): # Should have [0, 0, 0, None] due to zero padding. raise ValueError('Should only have a None discount for FIRST.') return None else: # Faster than np.prod for a list of floats. result = 1 for d in discounts: result *= d return result def rgb2y(array: np.ndarray) -> np.ndarray: """Converts RGB image array into grayscale.""" if array.ndim != 3: raise ValueError('Input array should be 3D, got %s.' % array.ndim) output = np.tensordot(array, [0.299, 0.587, 1 - (0.299 + 0.587)], (-1, 0)) return output.astype(np.uint8) def resize(shape: Tuple[int, ...]) -> Processor[[np.ndarray], np.ndarray]: """Resizes array to the given shape.""" if len(shape) != 2: raise ValueError('Resize shape has to be 2D, given: %s.' % str(shape)) # Image.resize takes (width, height) as output_shape argument. image_shape = (shape[1], shape[0]) def resize_fn(array): image = Image.fromarray(array).resize(image_shape, Image.BILINEAR) return np.array(image, dtype=np.uint8) return resize_fn def select_rgb_observation(timestep: dm_env.TimeStep) -> dm_env.TimeStep: """Replaces an observation tuple by its first entry (the RGB observation).""" return timestep._replace(observation=timestep.observation[0]) def apply_additional_discount( additional_discount: float) -> Processor[[float], float]: """Returns a function that scales its non-`None` input by a constant.""" return lambda d: None if d is None else additional_discount * d def clip_reward(bound: float) -> Processor[[Optional[float]], Optional[float]]: """Returns a function that clips non-`None` inputs to (`-bound`, `bound`).""" def clip_reward_fn(reward): return None if reward is None else max(min(reward, bound), -bound) return clip_reward_fn def show(prefix: Text) -> Processor[[Any], Any]: """Prints value and passes through, for debugging.""" def show_fn(value): print('%s: %s' % (prefix, value)) return value return show_fn def atari( additional_discount: float = 0.99, max_abs_reward: Optional[float] = 1.0, resize_shape: Optional[Tuple[int, int]] = (84, 84), num_action_repeats: int = 4, num_pooled_frames: int = 2, zero_discount_on_life_loss: bool = True, num_stacked_frames: int = 4, grayscaling: bool = True, ) -> Processor[[dm_env.TimeStep], Optional[dm_env.TimeStep]]: """Standard DQN preprocessing on Atari.""" # This processor does the following to a sequence of timesteps. # # 1. Zeroes discount on loss of life. # 2. Repeats actions (previous action should be repeated if None is returned). # 3. Max pools action repeated observations. # 4. Grayscales observations. # 5. Resizes observations. # 6. Stacks observations. # 7. Clips rewards. # 8. Applies an additional discount. # # For more detail see the annotations in the processors below. # The FixedPaddedBuffer, ConditionallySubsample, none_to_zero_pad, stack and # max_pool on the observation collectively does this (step types: F = FIRST, # M = MID, L = LAST, ~ is None): # # Type: F | M M M M | M M L | F | # Frames: A | B C D E | F G H | I | # Output: max[0A]| ~ ~ ~ max[DE]| ~ ~ max[H0]|max[0I]| return Sequential( # When the number of lives decreases, set discount to 0. ZeroDiscountOnLifeLoss() if zero_discount_on_life_loss else identity, # Select the RGB observation as the main observation, dropping lives. select_rgb_observation, # obs: 1, 2, 3, 4, 5, 6, 7, 8, 9, ... # Write timesteps into a fixed-sized buffer padded with None. FixedPaddedBuffer(length=num_action_repeats, initial_index=-1), # obs: ~~~1, 2~~~, 23~~, 234~, 2345, 6~~~, 67~~, 678~, 6789, ... # Periodically return the deque of timesteps, when the current timestep is # FIRST, after that every 4 steps, and when the current timestep is LAST. ConditionallySubsample(TimestepBufferCondition(num_action_repeats)), # obs: ~~~1, ~, ~, ~, 2345, ~, ~, ~, 6789, ... # If None pass through, otherwise apply the processor. Maybe( Sequential( # Replace Nones with zero padding in each buffer. none_to_zero_pad, # obs: 0001, ~, ~, ~, 2345, ~, ~, ~, 6789, ... # Convert sequence of nests into a nest of sequences. named_tuple_sequence_stack, # Choose representative step type from an array of step types. ApplyToNamedTupleField('step_type', reduce_step_type), # Rewards: sum then clip. ApplyToNamedTupleField( 'reward', aggregate_rewards, clip_reward(max_abs_reward) if max_abs_reward else identity, ), # Discounts: take product and scale by an additional discount. ApplyToNamedTupleField( 'discount', aggregate_discounts, apply_additional_discount(additional_discount), ), # Observations: max pool, grayscale, resize, and stack. ApplyToNamedTupleField( 'observation', lambda obs: np.stack(obs[-num_pooled_frames:], axis=0), lambda obs: np.max(obs, axis=0), # obs: max[01], ~, ~, ~, max[45], ~, ~, ~, max[89], ... # obs: A, ~, ~, ~, B, ~, ~, ~, C, ... rgb2y if grayscaling else identity, resize(resize_shape) if resize_shape else identity, Deque(max_length=num_stacked_frames), # obs: A, ~, ~, ~, AB, ~, ~, ~, ABC, ~, ~, ~, ABCD, ~, ~, ~, # BCDE, ~, ~, ~, CDEF, ... list, trailing_zero_pad(length=num_stacked_frames), # obs: A000, ~, ~, ~, AB00, ~, ~, ~, ABC0, ~, ~, ~, ABCD, # ~, ~, ~, BCDE, ... lambda obs: np.stack(obs, axis=-1), ), )), ) class AtariEnvironmentWrapper(dm_env.Environment): """Python environment wrapper that provides DQN Atari preprocessing. This is a thin wrapper around the Atari processor. Expects underlying Atari environment to have interleaved pixels (HWC) and zero-indexed actions. """ def __init__( self, environment: dm_env.Environment, additional_discount: float = 0.99, max_abs_reward: Optional[float] = 1.0, resize_shape: Optional[Tuple[int, int]] = (84, 84), num_action_repeats: int = 4, num_pooled_frames: int = 2, zero_discount_on_life_loss: bool = True, num_stacked_frames: int = 4, grayscaling: bool = True, ): rgb_spec, unused_lives_spec = environment.observation_spec() if rgb_spec.shape[2] != 3: raise ValueError( 'This wrapper assumes interleaved pixel observations with shape ' '(height, width, channels).') if int(environment.action_spec().minimum) != 0: raise ValueError('This wrapper assumes zero-indexed actions.') self._environment = environment self._processor = atari( additional_discount=additional_discount, max_abs_reward=max_abs_reward, resize_shape=resize_shape, num_action_repeats=num_action_repeats, num_pooled_frames=num_pooled_frames, zero_discount_on_life_loss=zero_discount_on_life_loss, num_stacked_frames=num_stacked_frames, grayscaling=grayscaling, ) if grayscaling: self._observation_shape = resize_shape + (num_stacked_frames,) self._observation_spec_name = 'grayscale' else: self._observation_shape = resize_shape + (3, num_stacked_frames) self._observation_spec_name = 'RGB' self._reset_next_step = True def reset(self) -> dm_env.TimeStep: """Resets environment and provides the first processed timestep.""" reset(self._processor) timestep = self._environment.reset() processed_timestep = self._processor(timestep) assert processed_timestep is not None self._reset_next_step = False return processed_timestep def step(self, action: int) -> dm_env.TimeStep: """Steps up to `num_action_repeat` times, returns a processed timestep.""" # This implements the action repeat by repeatedly passing in the last action # until an actual timestep is returned by the processor. if self._reset_next_step: return self.reset() # Ignore action. processed_timestep = None while processed_timestep is None: timestep = self._environment.step(action) processed_timestep = self._processor(timestep) if timestep.last(): self._reset_next_step = True assert processed_timestep is not None return processed_timestep def action_spec(self) -> specs.DiscreteArray: return self._environment.action_spec() def observation_spec(self) -> specs.Array: return specs.Array( shape=self._observation_shape, dtype=np.uint8, name=self._observation_spec_name) class AtariSimpleActionEnvironmentWrapper(dm_env.Environment): """Python environment wrapper for Atari so it takes integer actions. Use this when processing is done on the agent side. """ def __init__(self, environment: dm_env.Environment): self._environment = environment if int(environment.action_spec()[0].minimum) != 0: raise ValueError( 'This wrapper assumes zero-indexed actions. Use the Atari setting ' 'zero_indexed_actions=\"true\" to get actions in this format.') def reset(self) -> dm_env.TimeStep: return self._environment.reset() def step(self, action: int) -> dm_env.TimeStep: return self._environment.step([np.array(action).reshape((1,))]) def action_spec(self) -> specs.DiscreteArray: action_spec = self._environment.action_spec()[0] return specs.DiscreteArray( num_values=action_spec.maximum.item() + 1, dtype=action_spec.dtype, name='action_spec') def observation_spec(self) -> specs.Array: return self._environment.observation_spec()
deepmind-research-master
tandem_dqn/processors.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Tandem losses.""" from absl.testing import absltest from absl.testing import parameterized import chex import jax from jax.config import config import numpy as np from tandem_dqn import agent from tandem_dqn import losses from tandem_dqn import networks from tandem_dqn import replay def make_tandem_qvals(): return agent.TandemTuple( active=networks.QNetworkOutputs(3. * np.ones((3, 5), np.float32)), passive=networks.QNetworkOutputs(2. * np.ones((3, 5), np.float32)) ) def make_transition(): return replay.Transition( s_tm1=np.zeros(3), a_tm1=np.ones(3, np.int32), r_t=5. * np.ones(3), discount_t=0.9 * np.ones(3), s_t=np.zeros(3)) class DoubleQLossesTest(parameterized.TestCase): def setUp(self): super().setUp() self.qs = make_tandem_qvals() self.transition = make_transition() self.rng_key = jax.random.PRNGKey(42) @chex.all_variants() @parameterized.parameters( ('double_q',), ('double_q_v',), ('double_q_p',), ('double_q_pv',), ('q_regression',), ) def test_active_loss_gradients(self, loss_type): loss_fn = losses.make_loss_fn(loss_type, active=True) def fn(q_tm1, q_t, q_t_target, transition, rng_key): return loss_fn(q_tm1, q_t, q_t_target, transition, rng_key) grad_fn = self.variant(jax.grad(fn, argnums=(0, 1, 2))) dldq_tm1, dldq_t, dldq_t_target = grad_fn( self.qs, self.qs, self.qs, self.transition, self.rng_key) # Assert that only active net gets nonzero gradients. self.assertGreater(np.sum(np.abs(dldq_tm1.active.q_values)), 0.) self.assertTrue(np.all(dldq_t.active.q_values == 0.)) self.assertTrue(np.all(dldq_t_target.active.q_values == 0.)) self.assertTrue(np.all(dldq_t.passive.q_values == 0.)) self.assertTrue(np.all(dldq_tm1.passive.q_values == 0.)) self.assertTrue(np.all(dldq_t_target.passive.q_values == 0.)) @chex.all_variants() @parameterized.parameters( ('double_q',), ('double_q_v',), ('double_q_p',), ('double_q_pv',), ('q_regression',), ) def test_passive_loss_gradients(self, loss_type): loss_fn = losses.make_loss_fn(loss_type, active=False) def fn(q_tm1, q_t, q_t_target, transition, rng_key): return loss_fn(q_tm1, q_t, q_t_target, transition, rng_key) grad_fn = self.variant(jax.grad(fn, argnums=(0, 1, 2))) dldq_tm1, dldq_t, dldq_t_target = grad_fn( self.qs, self.qs, self.qs, self.transition, self.rng_key) # Assert that only passive net gets nonzero gradients. self.assertGreater(np.sum(np.abs(dldq_tm1.passive.q_values)), 0.) self.assertTrue(np.all(dldq_t.passive.q_values == 0.)) self.assertTrue(np.all(dldq_t_target.passive.q_values == 0.)) self.assertTrue(np.all(dldq_t.active.q_values == 0.)) self.assertTrue(np.all(dldq_tm1.active.q_values == 0.)) self.assertTrue(np.all(dldq_t_target.active.q_values == 0.)) if __name__ == '__main__': config.update('jax_numpy_rank_promotion', 'raise') absltest.main()
deepmind-research-master
tandem_dqn/losses_test.py
# Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A Tandem DQN agent implemented in JAX, training on Atari.""" import collections import itertools import sys import typing from absl import app from absl import flags from absl import logging import dm_env import jax from jax.config import config import numpy as np import optax from tandem_dqn import agent as agent_lib from tandem_dqn import atari_data from tandem_dqn import gym_atari from tandem_dqn import losses from tandem_dqn import networks from tandem_dqn import parts from tandem_dqn import processors from tandem_dqn import replay as replay_lib # Relevant flag values are expressed in terms of environment frames. FLAGS = flags.FLAGS flags.DEFINE_string('environment_name', 'pong', '') flags.DEFINE_boolean('use_sticky_actions', False, '') flags.DEFINE_integer('environment_height', 84, '') flags.DEFINE_integer('environment_width', 84, '') flags.DEFINE_integer('replay_capacity', int(1e6), '') flags.DEFINE_bool('compress_state', True, '') flags.DEFINE_float('min_replay_capacity_fraction', 0.05, '') flags.DEFINE_integer('batch_size', 32, '') flags.DEFINE_integer('max_frames_per_episode', 108000, '') # 30 mins. flags.DEFINE_integer('num_action_repeats', 4, '') flags.DEFINE_integer('num_stacked_frames', 4, '') flags.DEFINE_float('exploration_epsilon_begin_value', 1., '') flags.DEFINE_float('exploration_epsilon_end_value', 0.01, '') flags.DEFINE_float('exploration_epsilon_decay_frame_fraction', 0.02, '') flags.DEFINE_float('eval_exploration_epsilon', 0.01, '') flags.DEFINE_integer('target_network_update_period', int(1.2e5), '') flags.DEFINE_float('additional_discount', 0.99, '') flags.DEFINE_float('max_abs_reward', 1., '') flags.DEFINE_integer('seed', 1, '') # GPU may introduce nondeterminism. flags.DEFINE_integer('num_iterations', 200, '') flags.DEFINE_integer('num_train_frames', int(1e6), '') # Per iteration. flags.DEFINE_integer('num_eval_frames', int(5e5), '') # Per iteration. flags.DEFINE_integer('learn_period', 16, '') flags.DEFINE_string('results_csv_path', '/tmp/results.csv', '') # Tandem-specific parameters. # Using fixed configs for optimizers: # RMSProp: lr = 0.00025, eps=0.01 / (32 ** 2) # ADAM: lr = 0.00005, eps=0.01 / 32 _OPTIMIZERS = ['rmsprop', 'adam'] flags.DEFINE_enum('optimizer_active', 'rmsprop', _OPTIMIZERS, '') flags.DEFINE_enum('optimizer_passive', 'rmsprop', _OPTIMIZERS, '') _NETWORKS = ['double_q', 'qr'] flags.DEFINE_enum('network_active', 'double_q', _NETWORKS, '') flags.DEFINE_enum('network_passive', 'double_q', _NETWORKS, '') _LOSSES = ['double_q', 'double_q_v', 'double_q_p', 'double_q_pv', 'qr', 'q_regression'] flags.DEFINE_enum('loss_active', 'double_q', _LOSSES, '') flags.DEFINE_enum('loss_passive', 'double_q', _LOSSES, '') flags.DEFINE_integer('tied_layers', 0, '') TandemTuple = agent_lib.TandemTuple def make_optimizer(optimizer_type): """Constructs optimizer.""" if optimizer_type == 'rmsprop': learning_rate = 0.00025 epsilon = 0.01 / (32**2) optimizer = optax.rmsprop( learning_rate=learning_rate, decay=0.95, eps=epsilon, centered=True) elif optimizer_type == 'adam': learning_rate = 0.00005 epsilon = 0.01 / 32 optimizer = optax.adam( learning_rate=learning_rate, eps=epsilon) else: raise ValueError('Unknown optimizer "{}"'.format(optimizer_type)) return optimizer def main(argv): """Trains Tandem DQN agent on Atari.""" del argv logging.info('Tandem DQN on Atari on %s.', jax.lib.xla_bridge.get_backend().platform) random_state = np.random.RandomState(FLAGS.seed) rng_key = jax.random.PRNGKey( random_state.randint(-sys.maxsize - 1, sys.maxsize + 1, dtype=np.int64)) if FLAGS.results_csv_path: writer = parts.CsvWriter(FLAGS.results_csv_path) else: writer = parts.NullWriter() def environment_builder(): """Creates Atari environment.""" env = gym_atari.GymAtari( FLAGS.environment_name, sticky_actions=FLAGS.use_sticky_actions, seed=random_state.randint(1, 2**32)) return gym_atari.RandomNoopsEnvironmentWrapper( env, min_noop_steps=1, max_noop_steps=30, seed=random_state.randint(1, 2**32), ) env = environment_builder() logging.info('Environment: %s', FLAGS.environment_name) logging.info('Action spec: %s', env.action_spec()) logging.info('Observation spec: %s', env.observation_spec()) num_actions = env.action_spec().num_values # Check: qr network and qr losses can only be used together. if ('qr' in FLAGS.network_active) != ('qr' in FLAGS.loss_active): raise ValueError('Active loss/net must either both use QR, or neither.') if ('qr' in FLAGS.network_passive) != ('qr' in FLAGS.loss_passive): raise ValueError('Passive loss/net must either both use QR, or neither.') network = TandemTuple( active=networks.make_network(FLAGS.network_active, num_actions), passive=networks.make_network(FLAGS.network_passive, num_actions), ) loss = TandemTuple( active=losses.make_loss_fn(FLAGS.loss_active, active=True), passive=losses.make_loss_fn(FLAGS.loss_passive, active=False), ) # Tied layers. assert 0 <= FLAGS.tied_layers <= 4 if FLAGS.tied_layers > 0 and (FLAGS.network_passive != 'double_q' or FLAGS.network_active != 'double_q'): raise ValueError('Tied layers > 0 is only supported for double_q networks.') layers = [ 'sequential/sequential/conv1', 'sequential/sequential/conv2', 'sequential/sequential/conv3', 'sequential/sequential_1/linear1' ] tied_layers = set(layers[:FLAGS.tied_layers]) def preprocessor_builder(): return processors.atari( additional_discount=FLAGS.additional_discount, max_abs_reward=FLAGS.max_abs_reward, resize_shape=(FLAGS.environment_height, FLAGS.environment_width), num_action_repeats=FLAGS.num_action_repeats, num_pooled_frames=2, zero_discount_on_life_loss=True, num_stacked_frames=FLAGS.num_stacked_frames, grayscaling=True, ) # Create sample network input from sample preprocessor output. sample_processed_timestep = preprocessor_builder()(env.reset()) sample_processed_timestep = typing.cast(dm_env.TimeStep, sample_processed_timestep) sample_network_input = sample_processed_timestep.observation assert sample_network_input.shape == (FLAGS.environment_height, FLAGS.environment_width, FLAGS.num_stacked_frames) exploration_epsilon_schedule = parts.LinearSchedule( begin_t=int(FLAGS.min_replay_capacity_fraction * FLAGS.replay_capacity * FLAGS.num_action_repeats), decay_steps=int(FLAGS.exploration_epsilon_decay_frame_fraction * FLAGS.num_iterations * FLAGS.num_train_frames), begin_value=FLAGS.exploration_epsilon_begin_value, end_value=FLAGS.exploration_epsilon_end_value) if FLAGS.compress_state: def encoder(transition): return transition._replace( s_tm1=replay_lib.compress_array(transition.s_tm1), s_t=replay_lib.compress_array(transition.s_t)) def decoder(transition): return transition._replace( s_tm1=replay_lib.uncompress_array(transition.s_tm1), s_t=replay_lib.uncompress_array(transition.s_t)) else: encoder = None decoder = None replay_structure = replay_lib.Transition( s_tm1=None, a_tm1=None, r_t=None, discount_t=None, s_t=None, a_t=None, mc_return_tm1=None, ) replay = replay_lib.TransitionReplay(FLAGS.replay_capacity, replay_structure, random_state, encoder, decoder) optimizer = TandemTuple( active=make_optimizer(FLAGS.optimizer_active), passive=make_optimizer(FLAGS.optimizer_passive), ) train_rng_key, eval_rng_key = jax.random.split(rng_key) train_agent = agent_lib.TandemDqn( preprocessor=preprocessor_builder(), sample_network_input=sample_network_input, network=network, optimizer=optimizer, loss=loss, transition_accumulator=replay_lib.TransitionAccumulatorWithMCReturn(), replay=replay, batch_size=FLAGS.batch_size, exploration_epsilon=exploration_epsilon_schedule, min_replay_capacity_fraction=FLAGS.min_replay_capacity_fraction, learn_period=FLAGS.learn_period, target_network_update_period=FLAGS.target_network_update_period, tied_layers=tied_layers, rng_key=train_rng_key, ) eval_agent_active = parts.EpsilonGreedyActor( preprocessor=preprocessor_builder(), network=network.active, exploration_epsilon=FLAGS.eval_exploration_epsilon, rng_key=eval_rng_key) eval_agent_passive = parts.EpsilonGreedyActor( preprocessor=preprocessor_builder(), network=network.passive, exploration_epsilon=FLAGS.eval_exploration_epsilon, rng_key=eval_rng_key) # Set up checkpointing. checkpoint = parts.NullCheckpoint() state = checkpoint.state state.iteration = 0 state.train_agent = train_agent state.eval_agent_active = eval_agent_active state.eval_agent_passive = eval_agent_passive state.random_state = random_state state.writer = writer if checkpoint.can_be_restored(): checkpoint.restore() # Run single iteration of training or evaluation. def run_iteration(agent, env, num_frames): seq = parts.run_loop(agent, env, FLAGS.max_frames_per_episode) seq_truncated = itertools.islice(seq, num_frames) trackers = parts.make_default_trackers(agent) return parts.generate_statistics(trackers, seq_truncated) def eval_log_output(eval_stats, suffix): human_normalized_score = atari_data.get_human_normalized_score( FLAGS.environment_name, eval_stats['episode_return']) capped_human_normalized_score = np.amin([1., human_normalized_score]) return [ ('eval_episode_return_' + suffix, eval_stats['episode_return'], '% 2.2f'), ('eval_num_episodes_' + suffix, eval_stats['num_episodes'], '%3d'), ('eval_frame_rate_' + suffix, eval_stats['step_rate'], '%4.0f'), ('normalized_return_' + suffix, human_normalized_score, '%.3f'), ('capped_normalized_return_' + suffix, capped_human_normalized_score, '%.3f'), ('human_gap_' + suffix, 1. - capped_human_normalized_score, '%.3f'), ] while state.iteration <= FLAGS.num_iterations: # New environment for each iteration to allow for determinism if preempted. env = environment_builder() # Set agent to train active and passive nets on each learning step. train_agent.set_training_mode('active_passive') logging.info('Training iteration %d.', state.iteration) num_train_frames = 0 if state.iteration == 0 else FLAGS.num_train_frames train_stats = run_iteration(train_agent, env, num_train_frames) logging.info('Evaluation iteration %d - active agent.', state.iteration) eval_agent_active.network_params = train_agent.online_params.active eval_stats_active = run_iteration(eval_agent_active, env, FLAGS.num_eval_frames) logging.info('Evaluation iteration %d - passive agent.', state.iteration) eval_agent_passive.network_params = train_agent.online_params.passive eval_stats_passive = run_iteration(eval_agent_passive, env, FLAGS.num_eval_frames) # Logging and checkpointing. agent_logs = [ 'loss_active', 'loss_passive', 'frac_diff_argmax', 'mc_error_active', 'mc_error_passive', 'mc_error_abs_active', 'mc_error_abs_passive', ] log_output = ( eval_log_output(eval_stats_active, 'active') + eval_log_output(eval_stats_passive, 'passive') + [('iteration', state.iteration, '%3d'), ('frame', state.iteration * FLAGS.num_train_frames, '%5d'), ('train_episode_return', train_stats['episode_return'], '% 2.2f'), ('train_num_episodes', train_stats['num_episodes'], '%3d'), ('train_frame_rate', train_stats['step_rate'], '%4.0f'), ] + [(k, train_stats[k], '% 2.2f') for k in agent_logs] ) log_output_str = ', '.join(('%s: ' + f) % (n, v) for n, v, f in log_output) logging.info(log_output_str) writer.write(collections.OrderedDict((n, v) for n, v, _ in log_output)) state.iteration += 1 checkpoint.save() writer.close() if __name__ == '__main__': config.update('jax_platform_name', 'gpu') # Default to GPU. config.update('jax_numpy_rank_promotion', 'raise') config.config_with_absl() app.run(main)
deepmind-research-master
tandem_dqn/run_tandem.py
# Copyright 2019 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Plot results for different side effects penalties. Loads csv result files generated by `run_experiment' and outputs a summary data frame in a csv file to be used for plotting by plot_results.ipynb. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os.path from absl import app from absl import flags import pandas as pd from side_effects_penalties.file_loading import load_files FLAGS = flags.FLAGS if __name__ == '__main__': # Avoid defining flags when used as a library. flags.DEFINE_string('path', '', 'File path.') flags.DEFINE_string('input_suffix', '', 'Filename suffix to use when loading data files.') flags.DEFINE_string('output_suffix', '', 'Filename suffix to use when saving files.') flags.DEFINE_bool('bar_plot', True, 'Make a data frame for a bar plot (True) ' + 'or learning curves (False)') flags.DEFINE_string('env_name', 'box', 'Environment name.') flags.DEFINE_bool('noops', True, 'Whether the environment includes noops.') flags.DEFINE_list('beta_list', [0.1, 0.3, 1.0, 3.0, 10.0, 30.0, 100.0], 'List of beta values.') flags.DEFINE_list('seed_list', [1], 'List of random seeds.') flags.DEFINE_bool('compare_penalties', True, 'Compare different penalties using the best beta value ' + 'for each penalty (True), or compare different beta values ' + 'for the same penalty (False).') flags.DEFINE_enum('dev_measure', 'rel_reach', ['none', 'reach', 'rel_reach', 'att_util'], 'Deviation measure (used if compare_penalties=False).') flags.DEFINE_enum('dev_fun', 'truncation', ['truncation', 'absolute'], 'Summary function for the deviation measure ' + '(used if compare_penalties=False)') flags.DEFINE_float('value_discount', 0.99, 'Discount factor for deviation measure value function ' + '(used if compare_penalties=False)') def beta_choice(baseline, dev_measure, dev_fun, value_discount, env_name, beta_list, seed_list, noops=False, path='', suffix=''): """Choose beta value that gives the highest final performance.""" if dev_measure == 'none': return 0.1 perf_max = float('-inf') best_beta = 0.0 for beta in beta_list: df = load_files(baseline=baseline, dev_measure=dev_measure, dev_fun=dev_fun, value_discount=value_discount, beta=beta, env_name=env_name, noops=noops, path=path, suffix=suffix, seed_list=seed_list) if df.empty: perf = float('-inf') else: perf = df['performance_smooth'].mean() if perf > perf_max: perf_max = perf best_beta = beta return best_beta def penalty_label(dev_measure, dev_fun, value_discount): """Penalty label specifying design choices.""" dev_measure_labels = { 'none': 'None', 'rel_reach': 'RR', 'att_util': 'AU', 'reach': 'UR'} label = dev_measure_labels[dev_measure] disc_lab = 'u' if value_discount == 1.0 else 'd' dev_lab = '' if dev_measure in ['rel_reach', 'att_util']: dev_lab = 't' if dev_fun == 'truncation' else 'a' if dev_measure != 'none': label = label + '(' + disc_lab + dev_lab + ')' return label def make_summary_data_frame( env_name, beta_list, seed_list, final=True, baseline=None, dev_measure=None, dev_fun=None, value_discount=None, noops=False, compare_penalties=True, path='', input_suffix='', output_suffix=''): """Make summary dataframe from multiple csv result files and output to csv.""" # For each of the penalty parameters (baseline, dev_measure, dev_fun, and # value_discount), compare a list of multiple values if the parameter is None, # or use the provided parameter value if it is not None baseline_list = ['start', 'inaction', 'stepwise', 'step_noroll'] if dev_measure is not None: dev_measure_list = [dev_measure] else: dev_measure_list = ['none', 'reach', 'rel_reach', 'att_util'] dataframes = [] for dev_measure in dev_measure_list: # These deviation measures don't have a deviation function: if dev_measure in ['reach', 'none']: dev_fun_list = ['none'] elif dev_fun is not None: dev_fun_list = [dev_fun] else: dev_fun_list = ['truncation', 'absolute'] # These deviation measures must be discounted: if dev_measure in ['none', 'att_util']: value_discount_list = [0.99] elif value_discount is not None: value_discount_list = [value_discount] else: value_discount_list = [0.99, 1.0] for baseline in baseline_list: for vd in value_discount_list: for devf in dev_fun_list: # Choose the best beta for this set of penalty parameters if # compare_penalties=True, or compare all betas otherwise if compare_penalties: beta = beta_choice( baseline=baseline, dev_measure=dev_measure, dev_fun=devf, value_discount=vd, env_name=env_name, noops=noops, beta_list=beta_list, seed_list=seed_list, path=path, suffix=input_suffix) betas = [beta] else: betas = beta_list for beta in betas: label = penalty_label( dev_measure=dev_measure, dev_fun=devf, value_discount=vd) df_part = load_files( baseline=baseline, dev_measure=dev_measure, dev_fun=devf, value_discount=vd, beta=beta, env_name=env_name, noops=noops, path=path, suffix=input_suffix, final=final, seed_list=seed_list) df_part = df_part.assign( baseline=baseline, dev_measure=dev_measure, dev_fun=devf, value_discount=vd, beta=beta, env_name=env_name, label=label) dataframes.append(df_part) df = pd.concat(dataframes, sort=False) # Output summary data frame final_str = '_final' if final else '' if compare_penalties: filename = ('df_summary_penalties_' + env_name + final_str + output_suffix + '.csv') else: filename = ('df_summary_betas_' + env_name + '_' + dev_measure + '_' + dev_fun + '_' + str(value_discount) + final_str + output_suffix + '.csv') f = os.path.join(path, filename) df.to_csv(f) return df def main(unused_argv): compare_penalties = FLAGS.compare_penalties dev_measure = None if compare_penalties else FLAGS.dev_measure dev_fun = None if compare_penalties else FLAGS.dev_fun value_discount = None if compare_penalties else FLAGS.value_discount make_summary_data_frame( compare_penalties=compare_penalties, env_name=FLAGS.env_name, noops=FLAGS.noops, final=FLAGS.bar_plot, dev_measure=dev_measure, value_discount=value_discount, dev_fun=dev_fun, path=FLAGS.path, input_suffix=FLAGS.input_suffix, output_suffix=FLAGS.output_suffix, beta_list=FLAGS.beta_list, seed_list=FLAGS.seed_list) if __name__ == '__main__': app.run(main)
deepmind-research-master
side_effects_penalties/results_summary.py
# Copyright 2019 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Helper functions for loading files.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os.path import pandas as pd def filename(env_name, noops, dev_measure, dev_fun, baseline, beta, value_discount, seed, path='', suffix=''): """Generate filename for the given set of parameters.""" noop_str = 'noops' if noops else 'nonoops' seed_str = '_' + str(seed) if seed else '' filename_template = ('{env_name}_{noop_str}_{dev_measure}_{dev_fun}' + '_{baseline}_beta_{beta}_vd_{value_discount}' + '{suffix}{seed_str}.csv') full_path = os.path.join(path, filename_template.format( env_name=env_name, noop_str=noop_str, dev_measure=dev_measure, dev_fun=dev_fun, baseline=baseline, beta=beta, value_discount=value_discount, suffix=suffix, seed_str=seed_str)) return full_path def load_files(baseline, dev_measure, dev_fun, value_discount, beta, env_name, noops, path, suffix, seed_list, final=True): """Load result files generated by run_experiment with the given parameters.""" def try_loading(f, final): if os.path.isfile(f): df = pd.read_csv(f, index_col=0) if final: last_episode = max(df['episode']) return df[df.episode == last_episode] else: return df else: return pd.DataFrame() dataframes = [] for seed in seed_list: f = filename(baseline=baseline, dev_measure=dev_measure, dev_fun=dev_fun, value_discount=value_discount, beta=beta, env_name=env_name, noops=noops, path=path, suffix=suffix, seed=int(seed)) df_part = try_loading(f, final) dataframes.append(df_part) df = pd.concat(dataframes) return df
deepmind-research-master
side_effects_penalties/file_loading.py
# Copyright 2019 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Side Effects Penalties. Abstract class for implementing a side effects (impact measure) penalty, and various concrete penalties deriving from it. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import collections import copy import enum import random import numpy as np import six from six.moves import range from six.moves import zip import sonnet as snt import tensorflow.compat.v1 as tf class Actions(enum.IntEnum): """Enum for actions the agent can take.""" UP = 0 DOWN = 1 LEFT = 2 RIGHT = 3 NOOP = 4 @six.add_metaclass(abc.ABCMeta) class Baseline(object): """Base class for baseline states.""" def __init__(self, start_timestep, exact=False, env=None, timestep_to_state=None): """Create a baseline. Args: start_timestep: starting state timestep exact: whether to use an exact or approximate baseline env: a copy of the environment (used to simulate exact baselines) timestep_to_state: a function that turns timesteps into states """ self._exact = exact self._env = env self._timestep_to_state = timestep_to_state self._start_timestep = start_timestep self._baseline_state = self._timestep_to_state(self._start_timestep) self._inaction_next = collections.defaultdict( lambda: collections.defaultdict(lambda: 0)) @abc.abstractmethod def calculate(self): """Update and return the baseline state.""" def sample(self, state): """Sample the outcome of a noop in `state`.""" d = self._inaction_next[state] counts = np.array(list(d.values())) index = np.random.choice(a=len(counts), p=counts/sum(counts)) return list(d.keys())[index] def reset(self): """Signal start of new episode.""" self._baseline_state = self._timestep_to_state(self._start_timestep) if self._exact: self._env.reset() @abc.abstractproperty def rollout_func(self): """Function to compute a rollout chain, or None if n/a.""" @property def baseline_state(self): return self._baseline_state class StartBaseline(Baseline): """Starting state baseline.""" def calculate(self, *unused_args): return self._baseline_state @property def rollout_func(self): return None class InactionBaseline(Baseline): """Inaction baseline: the state resulting from taking no-ops from start.""" def calculate(self, prev_state, action, current_state): if self._exact: self._baseline_state = self._timestep_to_state( self._env.step(Actions.NOOP)) else: if action == Actions.NOOP: self._inaction_next[prev_state][current_state] += 1 if self._baseline_state in self._inaction_next: self._baseline_state = self.sample(self._baseline_state) return self._baseline_state @property def rollout_func(self): return None class StepwiseBaseline(Baseline): """Stepwise baseline: the state one no-op after the previous state.""" def __init__(self, start_timestep, exact=False, env=None, timestep_to_state=None, use_rollouts=True): """Create a stepwise baseline. Args: start_timestep: starting state timestep exact: whether to use an exact or approximate baseline env: a copy of the environment (used to simulate exact baselines) timestep_to_state: a function that turns timesteps into states use_rollouts: whether to use inaction rollouts """ super(StepwiseBaseline, self).__init__( start_timestep, exact, env, timestep_to_state) self._rollouts = use_rollouts def calculate(self, prev_state, action, current_state): """Update and return the baseline state. Args: prev_state: the state in which `action` was taken action: the action just taken current_state: the state resulting from taking `action` Returns: the baseline state, for computing the penalty for this transition """ if self._exact: if prev_state in self._inaction_next: self._baseline_state = self.sample(prev_state) else: inaction_env = copy.deepcopy(self._env) timestep_inaction = inaction_env.step(Actions.NOOP) self._baseline_state = self._timestep_to_state(timestep_inaction) self._inaction_next[prev_state][self._baseline_state] += 1 timestep_action = self._env.step(action) assert current_state == self._timestep_to_state(timestep_action) else: if action == Actions.NOOP: self._inaction_next[prev_state][current_state] += 1 if prev_state in self._inaction_next: self._baseline_state = self.sample(prev_state) else: self._baseline_state = prev_state return self._baseline_state def _inaction_rollout(self, state): """Compute an (approximate) inaction rollout from a state.""" chain = [] st = state while st not in chain: chain.append(st) if st in self._inaction_next: st = self.sample(st) return chain def parallel_inaction_rollouts(self, s1, s2): """Compute (approximate) parallel inaction rollouts from two states.""" chain = [] states = (s1, s2) while states not in chain: chain.append(states) s1, s2 = states states = (self.sample(s1) if s1 in self._inaction_next else s1, self.sample(s2) if s2 in self._inaction_next else s2) return chain @property def rollout_func(self): return self._inaction_rollout if self._rollouts else None @six.add_metaclass(abc.ABCMeta) class DeviationMeasure(object): """Base class for deviation measures.""" @abc.abstractmethod def calculate(self): """Calculate the deviation between two states.""" @abc.abstractmethod def update(self): """Update any models after seeing a state transition.""" class ReachabilityMixin(object): """Class for computing reachability deviation measure. Computes the relative/un- reachability given a dictionary of reachability scores for pairs of states. Expects _reachability, _discount, and _dev_fun attributes to exist in the inheriting class. """ def calculate(self, current_state, baseline_state, rollout_func=None): """Calculate relative/un- reachability between particular states.""" # relative reachability case if self._dev_fun: if rollout_func: curr_values = self._rollout_values(rollout_func(current_state)) base_values = self._rollout_values(rollout_func(baseline_state)) else: curr_values = self._reachability[current_state] base_values = self._reachability[baseline_state] all_s = set(list(curr_values.keys()) + list(base_values.keys())) total = 0 for s in all_s: diff = base_values[s] - curr_values[s] total += self._dev_fun(diff) d = total / len(all_s) # unreachability case else: assert rollout_func is None d = 1 - self._reachability[current_state][baseline_state] return d def _rollout_values(self, chain): """Compute stepwise rollout values for the relative reachability penalty. Args: chain: chain of states in an inaction rollout starting with the state for which to compute the rollout values Returns: a dictionary of the form: { s : (1-discount) sum_{k=0}^inf discount^k R_s(S_k) } where S_k is the k-th state in the inaction rollout from 'state', s is a state, and R_s(S_k) is the reachability of s from S_k. """ rollout_values = collections.defaultdict(lambda: 0) coeff = 1 for st in chain: for s, rch in six.iteritems(self._reachability[st]): rollout_values[s] += coeff * rch * (1.0 - self._discount) coeff *= self._discount last_state = chain[-1] for s, rch in six.iteritems(self._reachability[last_state]): rollout_values[s] += coeff * rch return rollout_values class Reachability(ReachabilityMixin, DeviationMeasure): """Approximate (relative) (un)reachability deviation measure. Unreachability (the default, when `dev_fun=None`) uses the length (say, n) of the shortest path (sequence of actions) from the current state to the baseline state. The reachability score is value_discount ** n. Unreachability is then 1.0 - the reachability score. Relative reachability (when `dev_fun` is not `None`) considers instead the difference in reachability of all other states from the current state versus from the baseline state. We approximate reachability by only considering state transitions that have been observed. Add transitions using the `update` function. """ def __init__(self, value_discount=1.0, dev_fun=None, discount=None): self._value_discount = value_discount self._dev_fun = dev_fun self._discount = discount self._reachability = collections.defaultdict( lambda: collections.defaultdict(lambda: 0)) def update(self, prev_state, current_state, action=None): del action # Unused. self._reachability[prev_state][prev_state] = 1 self._reachability[current_state][current_state] = 1 if self._reachability[prev_state][current_state] < self._value_discount: for s1 in self._reachability.keys(): if self._reachability[s1][prev_state] > 0: for s2 in self._reachability[current_state].keys(): if self._reachability[current_state][s2] > 0: self._reachability[s1][s2] = max( self._reachability[s1][s2], self._reachability[s1][prev_state] * self._value_discount * self._reachability[current_state][s2]) @property def discount(self): return self._discount class UVFAReachability(ReachabilityMixin, DeviationMeasure): """Approximate relative reachability deviation measure using UVFA. We approximate reachability using a neural network only trained on state transitions that have been observed. For each (s0, action, s1) transition, we update the reachability estimate for (s0, action, s) towards the reachability estimate between s1 and s, for each s in a random sample of size update_sample_size. In particular, the loss for the neural network reachability estimate (NN) is sum_s(max_a(NN(s1, a, s)) * value_discount - NN(s0, action, s)), where the sum is over all sampled s, the max is taken over all actions a. At evaluation time, the reachability difference is calculated with respect to a randomly sampled set of states of size calc_sample_size. """ def __init__( self, value_discount=0.95, dev_fun=None, discount=0.95, state_size=36, # Sokoban default num_actions=5, update_sample_size=10, calc_sample_size=10, hidden_size=50, representation_size=5, num_layers=1, base_loss_coeff=0.1, num_stored=100): # Create networks to generate state representations. To get a reachability # estimate, take the dot product of the origin network output and the goal # network output, then pass it through a sigmoid function to constrain it to # between 0 and 1. output_sizes = [hidden_size] * num_layers + [representation_size] self._origin_network = snt.nets.MLP( output_sizes=output_sizes, activation=tf.nn.relu, activate_final=False, name='origin_network') self._goal_network = snt.nets.MLP( output_sizes=output_sizes, activation=tf.nn.relu, activate_final=False, name='goal_network') self._value_discount = value_discount self._dev_fun = dev_fun self._discount = discount self._state_size = state_size self._num_actions = num_actions self._update_sample_size = update_sample_size self._calc_sample_size = calc_sample_size self._num_stored = num_stored self._stored_states = set() self._state_0_placeholder = tf.placeholder(tf.float32, shape=(state_size)) self._state_1_placeholder = tf.placeholder(tf.float32, shape=(state_size)) self._action_placeholder = tf.placeholder(tf.float32, shape=(num_actions)) self._update_sample_placeholder = tf.placeholder( tf.float32, shape=(update_sample_size, state_size)) self._calc_sample_placeholder = tf.placeholder( tf.float32, shape=(calc_sample_size, state_size)) # Trained to estimate reachability = value_discount ^ distance. self._sample_loss = self._get_state_action_loss( self._state_0_placeholder, self._state_1_placeholder, self._action_placeholder, self._update_sample_placeholder) # Add additional loss to force observed transitions towards value_discount. self._base_reachability = self._get_state_sample_reachability( self._state_0_placeholder, tf.expand_dims(self._state_1_placeholder, axis=0), action=self._action_placeholder) self._base_case_loss = tf.keras.losses.MSE(self._value_discount, self._base_reachability) self._opt = tf.train.AdamOptimizer().minimize(self._sample_loss + base_loss_coeff * self._base_case_loss) current_state_reachability = self._get_state_sample_reachability( self._state_0_placeholder, self._calc_sample_placeholder) baseline_state_reachability = self._get_state_sample_reachability( self._state_1_placeholder, self._calc_sample_placeholder) self._reachability_calculation = [ tf.reshape(baseline_state_reachability, [-1]), tf.reshape(current_state_reachability, [-1]) ] init = tf.global_variables_initializer() self._sess = tf.Session() self._sess.run(init) def calculate(self, current_state, baseline_state, rollout_func=None): """Compute the reachability penalty between two states.""" current_state = np.array(current_state).flatten() baseline_state = np.array(baseline_state).flatten() sample = self._sample_n_states(self._calc_sample_size) # Run if there are enough states to draw a correctly-sized sample from. if sample: base, curr = self._sess.run( self._reachability_calculation, feed_dict={ self._state_0_placeholder: current_state, self._state_1_placeholder: baseline_state, self._calc_sample_placeholder: sample }) return sum(map(self._dev_fun, base - curr)) / self._calc_sample_size else: return 0 def _sample_n_states(self, n): try: return random.sample(self._stored_states, n) except ValueError: return None def update(self, prev_state, current_state, action): prev_state = np.array(prev_state).flatten() current_state = np.array(current_state).flatten() one_hot_action = np.zeros(self._num_actions) one_hot_action[action] = 1 sample = self._sample_n_states(self._update_sample_size) if self._num_stored is None or len(self._stored_states) < self._num_stored: self._stored_states.add(tuple(prev_state)) self._stored_states.add(tuple(current_state)) elif (np.random.random() < 0.01 and tuple(current_state) not in self._stored_states): self._stored_states.pop() self._stored_states.add(tuple(current_state)) # If there aren't enough states to get a full sample, do nothing. if sample: self._sess.run([self._opt], feed_dict={ self._state_0_placeholder: prev_state, self._state_1_placeholder: current_state, self._action_placeholder: one_hot_action, self._update_sample_placeholder: sample }) def _get_state_action_loss(self, prev_state, current_state, action, sample): """Get the loss from differences in state reachability estimates.""" # Calculate NN(s0, action, s) for all s in sample. prev_state_reachability = self._get_state_sample_reachability( prev_state, sample, action=action) # Calculate max_a(NN(s1, a, s)) for all s in sample and all actions a. current_state_reachability = tf.stop_gradient( self._get_state_sample_reachability(current_state, sample)) # Combine to return loss. return tf.keras.losses.MSE( current_state_reachability * self._value_discount, prev_state_reachability) def _get_state_sample_reachability(self, state, sample, action=None): """Calculate reachability from a state to each item in a sample.""" if action is None: state_options = self._tile_with_all_actions(state) else: state_options = tf.expand_dims(tf.concat([state, action], axis=0), axis=0) goal_representations = self._goal_network(sample) # Reachability of sampled states by taking actions reach_result = tf.sigmoid( tf.reduce_max( tf.matmul( goal_representations, self._origin_network(state_options), transpose_b=True), axis=1)) if action is None: # Return 1 if sampled state is already reached (equal to state) reach_no_action = tf.cast(tf.reduce_all(tf.equal(sample, state), axis=1), dtype=tf.float32) reach_result = tf.maximum(reach_result, reach_no_action) return reach_result def _tile_with_all_actions(self, state): """Returns tensor with all state/action combinations.""" state_tiled = tf.tile(tf.expand_dims(state, axis=0), [self._num_actions, 1]) all_actions_tiled = tf.one_hot( tf.range(self._num_actions), depth=self._num_actions) return tf.concat([state_tiled, all_actions_tiled], axis=1) class AttainableUtilityMixin(object): """Class for computing attainable utility measure. Computes attainable utility (averaged over a set of utility functions) given value functions for each utility function. Expects _u_values, _discount, _value_discount, and _dev_fun attributes to exist in the inheriting class. """ def calculate(self, current_state, baseline_state, rollout_func=None): if rollout_func: current_values = self._rollout_values(rollout_func(current_state)) baseline_values = self._rollout_values(rollout_func(baseline_state)) else: current_values = [u_val[current_state] for u_val in self._u_values] baseline_values = [u_val[baseline_state] for u_val in self._u_values] penalties = [self._dev_fun(base_val - cur_val) * (1. - self._value_discount) for base_val, cur_val in zip(baseline_values, current_values)] return sum(penalties) / len(penalties) def _rollout_values(self, chain): """Compute stepwise rollout values for the attainable utility penalty. Args: chain: chain of states in an inaction rollout starting with the state for which to compute the rollout values Returns: a list containing (1-discount) sum_{k=0}^inf discount^k V_u(S_k) for each utility function u, where S_k is the k-th state in the inaction rollout from 'state'. """ rollout_values = [0 for _ in self._u_values] coeff = 1 for st in chain: rollout_values = [rv + coeff * u_val[st] * (1.0 - self._discount) for rv, u_val in zip(rollout_values, self._u_values)] coeff *= self._discount last_state = chain[-1] rollout_values = [rv + coeff * u_val[last_state] for rv, u_val in zip(rollout_values, self._u_values)] return rollout_values def _set_util_funs(self, util_funs): """Set up this instance's utility functions. Args: util_funs: either a number of functions to generate or a list of pre-defined utility functions, represented as dictionaries over states: util_funs[i][s] = u_i(s), the utility of s according to u_i. """ if isinstance(util_funs, int): self._util_funs = [ collections.defaultdict(float) for _ in range(util_funs) ] else: self._util_funs = util_funs def _utility(self, u, state): """Apply a random utility function, generating its value if necessary.""" if state not in u: u[state] = np.random.random() return u[state] class AttainableUtility(AttainableUtilityMixin, DeviationMeasure): """Approximate attainable utility deviation measure.""" def __init__(self, value_discount=0.99, dev_fun=np.abs, util_funs=10, discount=None): assert value_discount < 1.0 # AU does not converge otherwise self._value_discount = value_discount self._dev_fun = dev_fun self._discount = discount self._set_util_funs(util_funs) # u_values[i][s] = V_{u_i}(s), the (approximate) value of s according to u_i self._u_values = [ collections.defaultdict(float) for _ in range(len(self._util_funs)) ] # predecessors[s] = set of states known to lead, by some action, to s self._predecessors = collections.defaultdict(set) def update(self, prev_state, current_state, action=None): """Update predecessors and attainable utility estimates.""" del action # Unused. self._predecessors[current_state].add(prev_state) seen = set() queue = [current_state] while queue: s_to = queue.pop(0) seen.add(s_to) for u, u_val in zip(self._util_funs, self._u_values): for s_from in self._predecessors[s_to]: v = self._utility(u, s_from) + self._value_discount * u_val[s_to] if u_val[s_from] < v: u_val[s_from] = v if s_from not in seen: queue.append(s_from) class NoDeviation(DeviationMeasure): """Dummy deviation measure corresponding to no impact penalty.""" def calculate(self, *unused_args): return 0 def update(self, *unused_args): pass class SideEffectPenalty(object): """Impact penalty.""" def __init__( self, baseline, dev_measure, beta=1.0, nonterminal_weight=0.01, use_inseparable_rollout=False): """Make an object to calculate the impact penalty. Args: baseline: object for calculating the baseline state dev_measure: object for calculating the deviation between states beta: weight (scaling factor) for the impact penalty nonterminal_weight: penalty weight on nonterminal states. use_inseparable_rollout: whether to compute the penalty as the average of deviations over parallel inaction rollouts from the current and baseline states (True) otherwise just between the current state and baseline state (or by whatever rollout value is provided in the baseline) (False) """ self._baseline = baseline self._dev_measure = dev_measure self._beta = beta self._nonterminal_weight = nonterminal_weight self._use_inseparable_rollout = use_inseparable_rollout def calculate(self, prev_state, action, current_state): """Calculate the penalty associated with a transition, and update models.""" def compute_penalty(current_state, baseline_state): """Compute penalty.""" if self._use_inseparable_rollout: penalty = self._rollout_value(current_state, baseline_state, self._dev_measure.discount, self._dev_measure.calculate) else: penalty = self._dev_measure.calculate(current_state, baseline_state, self._baseline.rollout_func) return self._beta * penalty if current_state: # not a terminal state self._dev_measure.update(prev_state, current_state, action) baseline_state =\ self._baseline.calculate(prev_state, action, current_state) penalty = compute_penalty(current_state, baseline_state) return self._nonterminal_weight * penalty else: # terminal state penalty = compute_penalty(prev_state, self._baseline.baseline_state) return penalty def reset(self): """Signal start of new episode.""" self._baseline.reset() def _rollout_value(self, cur_state, base_state, discount, func): """Compute stepwise rollout value for unreachability.""" # Returns (1-discount) sum_{k=0}^inf discount^k R(S_{t,t+k}, S'_{t,t+k}), # where S_{t,t+k} is k-th state in the inaction rollout from current state, # S'_{t,t+k} is k-th state in the inaction rollout from baseline state, # and R is the reachability function. chain = self._baseline.parallel_inaction_rollouts(cur_state, base_state) coeff = 1 rollout_value = 0 for states in chain: rollout_value += (coeff * func(states[0], states[1]) * (1.0 - discount)) coeff *= discount last_states = chain[-1] rollout_value += coeff * func(last_states[0], last_states[1]) return rollout_value @property def beta(self): return self._beta
deepmind-research-master
side_effects_penalties/side_effects_penalty.py
# Copyright 2019 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================
deepmind-research-master
side_effects_penalties/__init__.py
# Copyright 2019 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for side_effects_penalty.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import absltest from absl.testing import parameterized import numpy as np from six.moves import range from side_effects_penalties import side_effects_penalty from side_effects_penalties import training from side_effects_penalties.side_effects_penalty import Actions environments = ['box', 'vase', 'sushi_goal'] class SideEffectsTestCase(parameterized.TestCase): def _timestep_to_state(self, timestep): return tuple(map(tuple, np.copy(timestep.observation['board']))) def _env_to_action_range(self, env): action_spec = env.action_spec() action_range = list(range(action_spec.minimum, action_spec.maximum + 1)) return action_range class BaselineTestCase(SideEffectsTestCase): def _create_baseline(self, env_name): self._env, _ = training.get_env(env_name, True) self._baseline_env, _ = training.get_env(env_name, True) baseline_class = getattr(side_effects_penalty, self.__class__.__name__[:-4]) # remove 'Test' self._baseline = baseline_class( self._env.reset(), True, self._baseline_env, self._timestep_to_state) def _test_trajectory(self, actions, key): init_state = self._timestep_to_state(self._env.reset()) self._baseline.reset() current_state = init_state for action in actions: timestep = self._env.step(action) next_state = self._timestep_to_state(timestep) baseline_state = self._baseline.calculate(current_state, action, next_state) comparison_dict = { 'current_state': current_state, 'next_state': next_state, 'init_state': init_state } self.assertEqual(baseline_state, comparison_dict[key]) current_state = next_state class StartBaselineTest(BaselineTestCase): @parameterized.parameters(*environments) def testInit(self, env_name): self._create_baseline(env_name) self._test_trajectory([Actions.NOOP], 'init_state') @parameterized.parameters(*environments) def testTenNoops(self, env_name): self._create_baseline(env_name) self._test_trajectory([Actions.NOOP for _ in range(10)], 'init_state') class InactionBaselineTest(BaselineTestCase): box_env, _ = training.get_env('box', True) box_action_spec = box_env.action_spec() @parameterized.parameters( *list(range(box_action_spec.minimum, box_action_spec.maximum + 1))) def testStaticEnvOneAction(self, action): self._create_baseline('box') self._test_trajectory([action], 'init_state') def testStaticEnvRandomActions(self): self._create_baseline('box') num_steps = np.random.randint(low=1, high=20) action_range = self._env_to_action_range(self._env) actions = [np.random.choice(action_range) for _ in range(num_steps)] self._test_trajectory(actions, 'init_state') @parameterized.parameters(*environments) def testInactionPolicy(self, env_name): self._create_baseline(env_name) num_steps = np.random.randint(low=1, high=20) self._test_trajectory([Actions.NOOP for _ in range(num_steps)], 'next_state') class StepwiseBaselineTest(BaselineTestCase): def testStaticEnvRandomActions(self): self._create_baseline('box') action_range = self._env_to_action_range(self._env) num_steps = np.random.randint(low=1, high=20) actions = [np.random.choice(action_range) for _ in range(num_steps)] self._test_trajectory(actions, 'current_state') @parameterized.parameters(*environments) def testInactionPolicy(self, env_name): self._create_baseline(env_name) num_steps = np.random.randint(low=1, high=20) self._test_trajectory([Actions.NOOP for _ in range(num_steps)], 'next_state') @parameterized.parameters(*environments) def testInactionRollout(self, env_name): self._create_baseline(env_name) init_state = self._timestep_to_state(self._env.reset()) self._baseline.reset() action = Actions.NOOP state1 = init_state trajectory = [init_state] for _ in range(10): trajectory.append(self._timestep_to_state(self._env.step(action))) state2 = trajectory[-1] self._baseline.calculate(state1, action, state2) state1 = state2 chain = self._baseline.rollout_func(init_state) self.assertEqual(chain, trajectory[:len(chain)]) if len(chain) < len(trajectory): self.assertEqual(trajectory[len(chain) - 1], trajectory[len(chain)]) def testStaticRollouts(self): self._create_baseline('box') action_range = self._env_to_action_range(self._env) num_steps = np.random.randint(low=1, high=20) actions = [np.random.choice(action_range) for _ in range(num_steps)] state1 = self._timestep_to_state(self._env.reset()) states = [state1] self._baseline.reset() for action in actions: state2 = self._timestep_to_state(self._env.step(action)) states.append(state2) self._baseline.calculate(state1, action, state2) state1 = state2 i1, i2 = np.random.choice(len(states), 2) chain = self._baseline.parallel_inaction_rollouts(states[i1], states[i2]) self.assertLen(chain, 1) chain1 = self._baseline.rollout_func(states[i1]) self.assertLen(chain1, 1) chain2 = self._baseline.rollout_func(states[i2]) self.assertLen(chain2, 1) @parameterized.parameters(('parallel', 'vase'), ('parallel', 'sushi'), ('inaction', 'vase'), ('inaction', 'sushi')) def testConveyorRollouts(self, which_rollout, env_name): self._create_baseline(env_name) init_state = self._timestep_to_state(self._env.reset()) self._baseline.reset() action = Actions.NOOP state1 = init_state init_state_next = self._timestep_to_state(self._env.step(action)) state2 = init_state_next self._baseline.calculate(state1, action, state2) state1 = state2 for _ in range(10): state2 = self._timestep_to_state(self._env.step(action)) self._baseline.calculate(state1, action, state2) state1 = state2 if which_rollout == 'parallel': chain = self._baseline.parallel_inaction_rollouts(init_state, init_state_next) else: chain = self._baseline.rollout_func(init_state) self.assertLen(chain, 5) class NoDeviationTest(SideEffectsTestCase): def _random_initial_transition(self): env_name = np.random.choice(environments) noops = np.random.choice([True, False]) env, _ = training.get_env(env_name, noops) action_range = self._env_to_action_range(env) action = np.random.choice(action_range) state1 = self._timestep_to_state(env.reset()) state2 = self._timestep_to_state(env.step(action)) return (state1, state2) def testNoDeviation(self): deviation = side_effects_penalty.NoDeviation() state1, state2 = self._random_initial_transition() self.assertEqual(deviation.calculate(state1, state2), 0) def testNoDeviationUpdate(self): deviation = side_effects_penalty.NoDeviation() state1, state2 = self._random_initial_transition() deviation.update(state1, state2) self.assertEqual(deviation.calculate(state1, state2), 0) class UnreachabilityTest(SideEffectsTestCase): @parameterized.named_parameters(('Discounted', 0.99), ('Undiscounted', 1.0)) def testUnreachabilityCycle(self, gamma): # Reachability with no dev_fun means unreachability deviation = side_effects_penalty.Reachability(value_discount=gamma) env, _ = training.get_env('box', False) state0 = self._timestep_to_state(env.reset()) state1 = self._timestep_to_state(env.step(Actions.LEFT)) # deviation should not be calculated before calling update deviation.update(state0, state1) self.assertEqual(deviation.calculate(state0, state0), 1.0 - 1.0) self.assertEqual(deviation.calculate(state0, state1), 1.0 - gamma) self.assertEqual(deviation.calculate(state1, state0), 1.0 - 0.0) state2 = self._timestep_to_state(env.step(Actions.RIGHT)) self.assertEqual(state0, state2) deviation.update(state1, state2) self.assertEqual(deviation.calculate(state0, state0), 1.0 - 1.0) self.assertEqual(deviation.calculate(state0, state1), 1.0 - gamma) self.assertEqual(deviation.calculate(state1, state0), 1.0 - gamma) self.assertEqual(deviation.calculate(state1, state1), 1.0 - 1.0) if __name__ == '__main__': absltest.main()
deepmind-research-master
side_effects_penalties/side_effects_penalty_test.py
# Copyright 2019 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Q-learning with side effects penalties.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from side_effects_penalties import agent from side_effects_penalties import side_effects_penalty as sep class QLearningSE(agent.QLearning): """Q-learning agent with side-effects penalties.""" def __init__( self, actions, alpha=0.1, epsilon=0.1, q_initialisation=0.0, baseline='start', dev_measure='none', dev_fun='truncation', discount=0.99, value_discount=1.0, beta=1.0, num_util_funs=10, exact_baseline=False, baseline_env=None, start_timestep=None, state_size=None, nonterminal_weight=0.01): """Create a Q-learning agent with a side effects penalty. Args: actions: full discrete action spec. alpha: agent learning rate. epsilon: agent exploration rate. q_initialisation: float, used to initialise the value function. baseline: which baseline state to use ('start', 'inaction', 'stepwise'). dev_measure: deviation measure: - "none" for no penalty, - "reach" for unreachability, - "rel_reach" for relative reachability, - "att_util" for attainable utility, dev_fun: what function to apply in the deviation measure ('truncation' or 'absolute' (for 'rel_reach' and 'att_util'), or 'none' (otherwise)). discount: discount factor for rewards. value_discount: discount factor for value functions in penalties. beta: side effects penalty weight. num_util_funs: number of random utility functions for attainable utility. exact_baseline: whether to use an exact or approximate baseline. baseline_env: copy of environment (with noops) for the exact baseline. start_timestep: copy of starting timestep for the baseline. state_size: the size of each state (flattened) for NN reachability. nonterminal_weight: penalty weight on nonterminal states. Raises: ValueError: for incorrect baseline, dev_measure, or dev_fun """ super(QLearningSE, self).__init__(actions, alpha, epsilon, q_initialisation, discount) # Impact penalty: set dev_fun (f) if 'rel_reach' in dev_measure or 'att_util' in dev_measure: if dev_fun == 'truncation': dev_fun = lambda diff: np.maximum(0, diff) elif dev_fun == 'absolute': dev_fun = np.abs else: raise ValueError('Deviation function not recognized') else: assert dev_fun == 'none' dev_fun = None # Impact penalty: create deviation measure if dev_measure in {'reach', 'rel_reach'}: deviation = sep.Reachability(value_discount, dev_fun, discount) elif dev_measure == 'uvfa_rel_reach': deviation = sep.UVFAReachability(value_discount, dev_fun, discount, state_size) elif dev_measure == 'att_util': deviation = sep.AttainableUtility(value_discount, dev_fun, num_util_funs, discount) elif dev_measure == 'none': deviation = sep.NoDeviation() else: raise ValueError('Deviation measure not recognized') use_inseparable_rollout = ( dev_measure == 'reach' and baseline == 'stepwise') # Impact penalty: create baseline if baseline in {'start', 'inaction', 'stepwise'}: baseline_class = getattr(sep, baseline.capitalize() + 'Baseline') baseline = baseline_class(start_timestep, exact_baseline, baseline_env, self._timestep_to_state) elif baseline == 'step_noroll': baseline_class = getattr(sep, 'StepwiseBaseline') baseline = baseline_class(start_timestep, exact_baseline, baseline_env, self._timestep_to_state, False) else: raise ValueError('Baseline not recognized') self._impact_penalty = sep.SideEffectPenalty( baseline, deviation, beta, nonterminal_weight, use_inseparable_rollout) def begin_episode(self): """Perform episode initialisation.""" super(QLearningSE, self).begin_episode() self._impact_penalty.reset() def _calculate_reward(self, timestep, state): reward = super(QLearningSE, self)._calculate_reward(timestep, state) return (reward - self._impact_penalty.calculate( self._current_state, self._current_action, state))
deepmind-research-master
side_effects_penalties/agent_with_penalties.py
# Copyright 2019 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Vanilla Q-Learning agent.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import numpy as np from six.moves import range class EpsilonGreedyPolicy(object): """Epsilon greedy policy for table value function lookup.""" def __init__(self, value_function, actions): """Construct an epsilon greedy policy object. Args: value_function: agent value function as a dict. actions: list of possible actions. Raises: ValueError: if `actions` agument is not an iterable. """ if not isinstance(actions, collections.Iterable): raise ValueError('`actions` argument must be an iterable.') self._value_function = value_function self._actions = actions def get_action(self, epsilon, state): """Get action following the e-greedy policy. Args: epsilon: probability of selecting a random action state: current state of the game as a state/action tuple. Returns: Chosen action. """ if np.random.random() < epsilon: return np.random.choice(self._actions) else: values = [self._value_function[(state, action)] for action in self._actions] max_value = max(values) max_indices = [i for i, value in enumerate(values) if value == max_value] return self._actions[np.random.choice(max_indices)] class QLearning(object): """Q-learning agent.""" def __init__(self, actions, alpha=0.1, epsilon=0.1, q_initialisation=0.0, discount=0.99): """Create a Q-learning agent. Args: actions: a BoundedArraySpec that specifes full discrete action spec. alpha: agent learning rate. epsilon: agent exploration rate. q_initialisation: float, used to initialise the value function. discount: discount factor for rewards. """ self._value_function = collections.defaultdict(lambda: q_initialisation) self._valid_actions = list(range(actions.minimum, actions.maximum + 1)) self._policy = EpsilonGreedyPolicy(self._value_function, self._valid_actions) # Hyperparameters. self.alpha = alpha self.epsilon = epsilon self.discount = discount # Episode internal variables. self._current_action = None self._current_state = None def begin_episode(self): """Perform episode initialisation.""" self._current_state = None self._current_action = None def _timestep_to_state(self, timestep): return tuple(map(tuple, np.copy(timestep.observation['board']))) def step(self, timestep): """Perform a single step in the environment.""" # Get state observations. state = self._timestep_to_state(timestep) # This is one of the follow up states (i.e. not the initial state). if self._current_state is not None: self._update(timestep, state) self._current_state = state # Determine action. self._current_action = self._policy.get_action(self.epsilon, state) # Emit action. return self._current_action def _calculate_reward(self, timestep, unused_state): """Calculate reward: to be extended when impact penalty is added.""" reward = timestep.reward return reward def _update(self, timestep, state): """Perform value function update.""" reward = self._calculate_reward(timestep, state) # Terminal state. if not state: delta = (reward - self._value_function[(self._current_state, self._current_action)]) # Non-terminal state. else: max_action = self._policy.get_action(0, state) delta = ( reward + self.discount * self._value_function[(state, max_action)] - self._value_function[(self._current_state, self._current_action)]) self._value_function[(self._current_state, self._current_action)] += self.alpha * delta def end_episode(self, timestep): """Performs episode cleanup.""" # Update for the terminal state. self._update(timestep, None) @property def value_function(self): return self._value_function
deepmind-research-master
side_effects_penalties/agent.py
# Copyright 2019 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Run a Q-learning agent with a side effects penalty.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import app from absl import flags import pandas as pd from six.moves import range from six.moves import zip from side_effects_penalties import agent_with_penalties from side_effects_penalties import training from side_effects_penalties.file_loading import filename FLAGS = flags.FLAGS if __name__ == '__main__': # Avoid defining flags when used as a library. # Side effects penalty settings flags.DEFINE_enum('baseline', 'inaction', ['start', 'inaction', 'stepwise', 'step_noroll'], 'Baseline.') flags.DEFINE_enum('dev_measure', 'rel_reach', ['none', 'reach', 'rel_reach', 'uvfa_rel_reach', 'att_util'], 'Deviation measure.') flags.DEFINE_enum('dev_fun', 'truncation', ['truncation', 'absolute'], 'Summary function for the deviation measure.') flags.DEFINE_float('discount', 0.99, 'Discount factor for rewards.') flags.DEFINE_float('value_discount', 0.99, 'Discount factor for deviation measure value function.') flags.DEFINE_float('beta', 30.0, 'Weight for side effects penalty.') flags.DEFINE_string('nonterminal', 'disc', 'Penalty for nonterminal states relative to terminal' 'states: none (0), full (1), or disc (1-discount).') flags.DEFINE_bool('exact_baseline', False, 'Compute the exact baseline using an environment copy.') # Agent settings flags.DEFINE_bool('anneal', True, 'Whether to anneal the exploration rate from 1 to 0.') flags.DEFINE_integer('num_episodes', 10000, 'Number of episodes.') flags.DEFINE_integer('num_episodes_noexp', 0, 'Number of episodes with no exploration.') flags.DEFINE_integer('seed', 1, 'Random seed.') # Environment settings flags.DEFINE_string('env_name', 'box', 'Environment name.') flags.DEFINE_bool('noops', True, 'Whether the environment includes noops.') flags.DEFINE_integer('movement_reward', 0, 'Movement reward.') flags.DEFINE_integer('goal_reward', 1, 'Reward for reaching a goal state.') flags.DEFINE_integer('side_effect_reward', -1, 'Hidden reward for causing side effects.') # Settings for outputting results flags.DEFINE_enum('mode', 'save', ['print', 'save'], 'Print results or save to file.') flags.DEFINE_string('path', '', 'File path.') flags.DEFINE_string('suffix', '', 'Filename suffix.') def run_experiment( baseline, dev_measure, dev_fun, discount, value_discount, beta, nonterminal, exact_baseline, anneal, num_episodes, num_episodes_noexp, seed, env_name, noops, movement_reward, goal_reward, side_effect_reward, mode, path, suffix): """Run agent and save or print the results.""" performances = [] rewards = [] seeds = [] episodes = [] if 'rel_reach' not in dev_measure and 'att_util' not in dev_measure: dev_fun = 'none' nonterminal_weights = {'none': 0.0, 'disc': 1.0-discount, 'full': 1.0} nonterminal_weight = nonterminal_weights[nonterminal] reward, performance = training.run_agent( baseline=baseline, dev_measure=dev_measure, dev_fun=dev_fun, discount=discount, value_discount=value_discount, beta=beta, nonterminal_weight=nonterminal_weight, exact_baseline=exact_baseline, anneal=anneal, num_episodes=num_episodes, num_episodes_noexp=num_episodes_noexp, seed=seed, env_name=env_name, noops=noops, movement_reward=movement_reward, goal_reward=goal_reward, side_effect_reward=side_effect_reward, agent_class=agent_with_penalties.QLearningSE) rewards.extend(reward) performances.extend(performance) seeds.extend([seed] * (num_episodes + num_episodes_noexp)) episodes.extend(list(range(num_episodes + num_episodes_noexp))) if mode == 'save': d = {'reward': rewards, 'performance': performances, 'seed': seeds, 'episode': episodes} df = pd.DataFrame(d) df1 = add_smoothed_data(df) f = filename(env_name, noops, dev_measure, dev_fun, baseline, beta, value_discount, path=path, suffix=suffix, seed=seed) df1.to_csv(f) return reward, performance def _smooth(values, window=100): return values.rolling(window,).mean() def add_smoothed_data(df, groupby='seed', window=100): grouped = df.groupby(groupby)[['reward', 'performance']] grouped = grouped.apply(_smooth, window=window).rename(columns={ 'performance': 'performance_smooth', 'reward': 'reward_smooth'}) temp = pd.concat([df, grouped], axis=1) return temp def main(unused_argv): reward, performance = run_experiment( baseline=FLAGS.baseline, dev_measure=FLAGS.dev_measure, dev_fun=FLAGS.dev_fun, discount=FLAGS.discount, value_discount=FLAGS.value_discount, beta=FLAGS.beta, nonterminal=FLAGS.nonterminal, exact_baseline=FLAGS.exact_baseline, anneal=FLAGS.anneal, num_episodes=FLAGS.num_episodes, num_episodes_noexp=FLAGS.num_episodes_noexp, seed=FLAGS.seed, env_name=FLAGS.env_name, noops=FLAGS.noops, movement_reward=FLAGS.movement_reward, goal_reward=FLAGS.goal_reward, side_effect_reward=FLAGS.side_effect_reward, mode=FLAGS.mode, path=FLAGS.path, suffix=FLAGS.suffix) if FLAGS.mode == 'print': print('Performance and reward in the last 10 steps:') print(list(zip(performance, reward))[-10:-1]) if __name__ == '__main__': app.run(main)
deepmind-research-master
side_effects_penalties/run_experiment.py
# Copyright 2019 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Training loop.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from ai_safety_gridworlds.helpers import factory import numpy as np from six.moves import range def get_env(env_name, noops, movement_reward=-1, goal_reward=1, side_effect_reward=-1): """Get a copy of the environment for simulating the baseline.""" if env_name == 'box' or 'sokocoin' in env_name: levels = {'box': 0, 'sokocoin1': 1, 'sokocoin2': 2, 'sokocoin3': 3} sizes = {'box': 36, 'sokocoin1': 100, 'sokocoin2': 72, 'sokocoin3': 100} env = factory.get_environment_obj( 'side_effects_sokoban', noops=noops, movement_reward=movement_reward, goal_reward=goal_reward, wall_reward=side_effect_reward, corner_reward=side_effect_reward, level=levels[env_name]) size = sizes[env_name] elif 'sushi' in env_name or env_name == 'vase': env = factory.get_environment_obj( 'conveyor_belt', variant=env_name, noops=noops, goal_reward=goal_reward) size = 49 else: env = factory.get_environment_obj(env_name) size = None return env, size def run_loop(agent, env, number_episodes, anneal): """Training agent.""" episodic_returns = [] episodic_performances = [] if anneal: agent.epsilon = 1.0 eps_unit = 1.0 / number_episodes for episode in range(number_episodes): # Get the initial set of observations from the environment. timestep = env.reset() # Prepare agent for a new episode. agent.begin_episode() while True: action = agent.step(timestep) timestep = env.step(action) if timestep.last(): agent.end_episode(timestep) episodic_returns.append(env.episode_return) episodic_performances.append(env.get_last_performance()) break if anneal: agent.epsilon = max(0, agent.epsilon - eps_unit) if episode % 500 == 0: print('Episode', episode) return episodic_returns, episodic_performances def run_agent(baseline, dev_measure, dev_fun, discount, value_discount, beta, nonterminal_weight, exact_baseline, anneal, num_episodes, num_episodes_noexp, seed, env_name, noops, movement_reward, goal_reward, side_effect_reward, agent_class): """Run agent. Create an agent with the given parameters for the side effects penalty. Run the agent for `num_episodes' episodes with an exploration rate that is either annealed from 1 to 0 (`anneal=True') or constant (`anneal=False'). Then run the agent with no exploration for `num_episodes_noexp' episodes. Args: baseline: baseline state dev_measure: deviation measure dev_fun: summary function for the deviation measure discount: discount factor value_discount: discount factor for deviation measure value function. beta: weight for side effects penalty nonterminal_weight: penalty weight for nonterminal states. exact_baseline: whether to use an exact or approximate baseline anneal: whether to anneal the exploration rate from 1 to 0 or use a constant exploration rate num_episodes: number of episodes num_episodes_noexp: number of episodes with no exploration seed: random seed env_name: environment name noops: whether the environment has noop actions movement_reward: movement reward goal_reward: reward for reaching a goal state side_effect_reward: hidden reward for causing side effects agent_class: Q-learning agent class: QLearning (regular) or QLearningSE (with side effects penalty) Returns: returns: return for each episode performances: safety performance for each episode """ np.random.seed(seed) env, state_size = get_env(env_name=env_name, noops=noops, movement_reward=movement_reward, goal_reward=goal_reward, side_effect_reward=side_effect_reward) start_timestep = env.reset() if exact_baseline: baseline_env, _ = get_env(env_name=env_name, noops=True, movement_reward=movement_reward, goal_reward=goal_reward, side_effect_reward=side_effect_reward) else: baseline_env = None agent = agent_class( actions=env.action_spec(), baseline=baseline, dev_measure=dev_measure, dev_fun=dev_fun, discount=discount, value_discount=value_discount, beta=beta, exact_baseline=exact_baseline, baseline_env=baseline_env, start_timestep=start_timestep, state_size=state_size, nonterminal_weight=nonterminal_weight) returns, performances = run_loop( agent, env, number_episodes=num_episodes, anneal=anneal) if num_episodes_noexp > 0: agent.epsilon = 0 returns_noexp, performances_noexp = run_loop( agent, env, number_episodes=num_episodes_noexp, anneal=False) returns.extend(returns_noexp) performances.extend(performances_noexp) return returns, performances
deepmind-research-master
side_effects_penalties/training.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Construct DAGs representing causal graphs, and perform inference on them.""" import collections import haiku as hk import jax import jax.numpy as jnp import pandas as pd from tensorflow_probability.substrates import jax as tfp import tree class Node: """A node in a graphical model. Conceptually, this represents a random variable in a causal probabilistic model. It knows about its 'parents', i.e. other Nodes upon which this Node causally depends. The user is responsible for ensuring that any graph built with this class is acyclic. A node knows how to represent its probability density, conditional on the values of its parents. The node needs to have a 'name', corresponding to the series within the dataframe that should be used to populate it. """ def __init__(self, distribution_module, parents=(), hidden=False): """Initialise a `Node` instance. Args: distribution_module: An instance of `DistributionModule`, a Haiku module that is suitable for modelling the conditional distribution of this node given any parents. parents: `Iterable`, optional. A (potentially nested) collection of nodes which are direct ancestors of `self`. hidden: `bool`, optional. Whether this node is hidden. Hidden nodes are permitted to not have corresponding observations. """ parents = tree.flatten(parents) self._distribution_module = distribution_module self._column = distribution_module.column self._index = distribution_module.index self._hidden = hidden self._observed_value = None # When implementing the path-specific counterfactual fairness algorithm, # we need the concept of a distribution conditional on the 'corrected' # values of the parents. This is achieved via the 'node_to_replacement' # argument of make_distribution. # However, in order to work with the `fix_categorical` and `fix_continuous` # functions, we need to assign counterfactual values for parents at # evaluation time. self._parent_to_value = collections.OrderedDict( (parent, None) for parent in parents) # This is the conditional distribution using no replacements, i.e. it is # conditioned on the observed values of parents. self._distribution = None def __repr__(self): return 'Node<{}>'.format(self.name) @property def dim(self): """The dimensionality of this node.""" return self._distribution_module.dim @property def name(self): return self._column @property def hidden(self): return self._hidden @property def observed_value(self): return self._observed_value def find_ancestor(self, name): """Returns an ancestor node with the given name.""" if self.name == name: return self for parent in self.parents: found = parent.find_ancestor(name) if found is not None: return found @property def parents(self): return tuple(self._parent_to_value) @property def distribution_module(self): return self._distribution_module @property def distribution(self): self._distribution = self.make_distribution() return self._distribution def make_distribution(self, node_to_replacement=None): """Make a conditional distribution for this node | parents. By default we use values (representing 'real data') from the parent nodes as inputs to the distribution, however we can alternatively swap out any of these for arbitrary arrays by specifying `node_to_replacement`. Args: node_to_replacement: `None`, `dict: Node -> DeviceArray`. If specified, use the indicated array. Returns: `tfp.distributions.Distribution` """ cur_parent_to_value = self._parent_to_value self._parent_to_value = collections.OrderedDict( (parent, parent.observed_value) for parent in cur_parent_to_value.keys() ) if node_to_replacement is None: parent_values = self._parent_to_value.values() return self._distribution_module(*parent_values) args = [] for node, value in self._parent_to_value.items(): if node in node_to_replacement: replacement = node_to_replacement[node] args.append(replacement) else: args.append(value) return self._distribution_module(*args) def populate(self, data, node_to_replacement=None): """Given a dataframe, populate node data. If the Node does not have data present, this is taken to be a sign of a) An error if the node is not hidden. b) Fine if the node is hidden. In case a) an exception will be raised, and in case b) observed)v will not be mutated. Args: data: tf.data.Dataset node_to_replacement: None | dict(Node -> array). If not None, use the given ndarray data rather than extracting data from the frame. This is only considered when looking at the inputs to a distribution. Raises: RuntimeError: If `data` doesn't contain the necessary feature, and the node is not hidden. """ column = self._column hidden = self._hidden replacement = None if node_to_replacement is not None and self in node_to_replacement: replacement = node_to_replacement[self] if replacement is not None: # If a replacement is present, this takes priority over any other # consideration. self._observed_value = replacement return if self._index < 0: if not hidden: raise RuntimeError( 'Node {} is not hidden, and column {} is not in the frame.'.format( self, column)) # Nothing to do - there is no data, and the node is hidden. return # Produce the observed value for this node. self._observed_value = self._distribution_module.prepare_data(data) class DistributionModule(hk.Module): """Common base class for a Haiku module representing a distribution. This provides some additional functionality common to all modules that would be used as arguments to the `Node` class above. """ def __init__(self, column, index, dim): """Initialise a `DistributionModule` instance. Args: column: `string`. The name of the random variable to which this distribution corresponds, and should match the name of the series in the pandas dataframe. index: `int`. The index of the corresponding feature in the dataset. dim: `int`. The output dimensionality of the distribution. """ super().__init__(name=column.replace('-', '_')) self._column = column self._index = index self._dim = dim @property def dim(self): """The output dimensionality of this distribution.""" return self._dim @property def column(self): return self._column @property def index(self): return self._index def prepare_data(self, data): """Given a general tensor, return an ndarray if required. This method implements the functionality delegated from `Node._prepare_data`, and it is expected that subclasses will override the implementation appropriately. Args: data: A tf.data.Dataset. Returns: `np.ndarray` of appropriately converted values for this series. """ return data[:, [self._index]] def _package_args(self, args): """Concatenate args into a single tensor. Args: args: `List[DeviceArray]`, length > 0. Each array is of shape (batch_size, ?) or (batch_size,). The former will occur if looking at e.g. a one-hot encoded categorical variable, and the latter in the case of a continuous variable. Returns: `DeviceArray`, (batch_size, num_values). """ return jnp.concatenate(args, axis=1) class Gaussian(DistributionModule): """A Haiku module that maps some inputs into a normal distribution.""" def __init__(self, column, index, dim=1, hidden_shape=(), hidden_activation=jnp.tanh, scale=None): """Initialise a `Gaussian` instance with some dimensionality.""" super(Gaussian, self).__init__(column, index, dim) self._hidden_shape = tuple(hidden_shape) self._hidden_activation = hidden_activation self._scale = scale self._loc_net = hk.nets.MLP(self._hidden_shape + (self._dim,), activation=self._hidden_activation) def __call__(self, *args): if args: # There are arguments - these represent the variables on which we are # conditioning. We set the mean of the output distribution to be a # function of these values, parameterised with an MLP. concatenated_inputs = self._package_args(args) loc = self._loc_net(concatenated_inputs) else: # There are no arguments, so instead have a learnable location parameter. loc = hk.get_parameter('loc', shape=[self._dim], init=jnp.zeros) if self._scale is None: # The scale has not been explicitly specified, in which case it is left # to be single value, i.e. not a function of the conditioning set. log_var = hk.get_parameter('log_var', shape=[self._dim], init=jnp.ones) scale = jnp.sqrt(jnp.exp(log_var)) else: scale = jnp.float32(self._scale) return tfp.distributions.Normal(loc=loc, scale=scale) def prepare_data(self, data): # For continuous data, we ensure the data is of dtype float32, and # additionally that the resulant shape is (num_examples, 1) # Note that this implementation only works for dim=1, however this is # currently also enforced by the fact that pandas series cannot be # multidimensional. result = data[:, [self.index]].astype(jnp.float32) if len(result.shape) == 1: result = jnp.expand_dims(result, axis=1) return result class GaussianMixture(DistributionModule): """A Haiku module that maps some inputs into a mixture of normals.""" def __init__(self, column, num_components, dim=1): """Initialise a `GaussianMixture` instance with some dimensionality. Args: column: `string`. The name of the column. num_components: `int`. The number of gaussians in this mixture. dim: `int`. The dimensionality of the variable. """ super().__init__(column, -1, dim) self._num_components = num_components self._loc_net = hk.nets.MLP([self._dim]) self._categorical_logits_module = hk.nets.MLP([self._num_components]) def __call__(self, *args): # Define component Gaussians to be independent functions of args. locs = [] scales = [] for _ in range(self._num_components): loc = hk.get_parameter('loc', shape=[self._dim], init=jnp.zeros) log_var = hk.get_parameter('log_var', shape=[self._dim], init=jnp.ones) scale = jnp.sqrt(jnp.exp(log_var)) locs.extend(loc) scales.extend(scale) # Define the Categorical distribution which switches between these categorical_logits = hk.get_parameter('categorical_logits', shape=[self._num_components], init=jnp.zeros) # Enforce positivity in the logits categorical_logits = jax.nn.sigmoid(categorical_logits) # If we have a multidimensional node, then the normal distributions above # have a batch shape of (dim,). We want to select between these using the # categorical distribution, so tile the logits to match this shape expanded_logits = jnp.repeat(categorical_logits, self._dim) categorical = tfp.distributions.Categorical(logits=expanded_logits) return tfp.distributions.MixtureSameFamily( mixture_distribution=categorical, components_distribution=tfp.distributions.Normal( loc=locs, scale=scales)) class MLPMultinomial(DistributionModule): """A Haiku module that consists of an MLP + multinomial distribution.""" def __init__(self, column, index, dim, hidden_shape=(), hidden_activation=jnp.tanh): """Initialise an MLPMultinomial instance. Args: column: `string`. Name of the corresponding dataframe column. index: `int`. The index of the input data for this feature. dim: `int`. Number of categories. hidden_shape: `Iterable`, optional. Shape of hidden layers. hidden_activation: `Callable`, optional. Non-linearity for hidden layers. """ super(MLPMultinomial, self).__init__(column, index, dim) self._hidden_shape = tuple(hidden_shape) self._hidden_activation = hidden_activation self._logit_net = hk.nets.MLP(self._hidden_shape + (self.dim,), activation=self._hidden_activation) @classmethod def from_frame(cls, data, column, hidden_shape=()): """Create an MLPMultinomial instance from a pandas dataframe and column.""" # Helper method that uses the dataframe to work out how many categories # are in the given column. The dataframe is not used for any other purpose. if not isinstance(data[column].dtype, pd.api.types.CategoricalDtype): raise ValueError('{} is not categorical.'.format(column)) index = list(data.columns).index(column) num_categories = len(data[column].cat.categories) return cls(column, index, num_categories, hidden_shape) def __call__(self, *args): if args: concatenated_inputs = self._package_args(args) logits = self._logit_net(concatenated_inputs) else: logits = hk.get_parameter('b', shape=[self.dim], init=jnp.zeros) return tfp.distributions.Multinomial(logits=logits, total_count=1.0) def prepare_data(self, data): # For categorical data, we convert to a one-hot representation using the # pandas category 'codes'. These are integers, and will have a definite # ordering that is identical between runs. codes = data[:, self.index] codes = codes.astype(jnp.int32) return jnp.eye(self.dim)[codes] def populate(nodes, dataframe, node_to_replacement=None): """Populate observed values for nodes.""" for node in nodes: node.populate(dataframe, node_to_replacement=node_to_replacement)
deepmind-research-master
counterfactual_fairness/causal_network.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Adult dataset. See https://archive.ics.uci.edu/ml/datasets/adult. """ import os from absl import logging import pandas as pd _COLUMNS = ('age', 'workclass', 'final-weight', 'education', 'education-num', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income') _CATEGORICAL_COLUMNS = ('workclass', 'education', 'marital-status', 'occupation', 'race', 'relationship', 'sex', 'native-country', 'income') def _read_data( name, data_path=''): with os.path.join(data_path, name) as data_file: data = pd.read_csv(data_file, header=None, index_col=False, names=_COLUMNS, skipinitialspace=True, na_values='?') for categorical in _CATEGORICAL_COLUMNS: data[categorical] = data[categorical].astype('category') return data def _combine_category_coding(df_1, df_2): """Combines the categories between dataframes df_1 and df_2. This is used to ensure that training and test data use the same category coding, so that the one-hot vectors representing the values are compatible between training and test data. Args: df_1: Pandas DataFrame. df_2: Pandas DataFrame. Must have the same columns as df_1. """ for column in df_1.columns: if df_1[column].dtype.name == 'category': categories_1 = set(df_1[column].cat.categories) categories_2 = set(df_2[column].cat.categories) categories = sorted(categories_1 | categories_2) df_1[column].cat.set_categories(categories, inplace=True) df_2[column].cat.set_categories(categories, inplace=True) def read_all_data(root_dir, remove_missing=True): """Return (train, test) dataframes, optionally removing incomplete rows.""" train_data = _read_data('adult.data', root_dir) test_data = _read_data('adult.test', root_dir) _combine_category_coding(train_data, test_data) if remove_missing: train_data = train_data.dropna() test_data = test_data.dropna() logging.info('Training data dtypes: %s', train_data.dtypes) return train_data, test_data
deepmind-research-master
counterfactual_fairness/adult.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Training script for causal model for Adult dataset, using PSCF.""" import functools import time from typing import Any, List, Mapping, NamedTuple, Sequence from absl import app from absl import flags from absl import logging import haiku as hk import jax import jax.numpy as jnp from ml_collections.config_flags import config_flags import numpy as np import optax import pandas as pd from sklearn import metrics import tensorflow as tf import tensorflow_datasets as tfds from tensorflow_probability.substrates import jax as tfp from counterfactual_fairness import adult from counterfactual_fairness import causal_network from counterfactual_fairness import utils from counterfactual_fairness import variational FLAGS = flags.FLAGS config_flags.DEFINE_config_file( 'config', 'adult_pscf_config.py', 'Training configuration.') LOG_EVERY = 100 # These are all aliases to callables which will return instances of # particular distribution modules, or a Node itself. This is used to make # subsequent code more legible. Node = causal_network.Node Gaussian = causal_network.Gaussian MLPMultinomial = causal_network.MLPMultinomial def build_input(train_data: pd.DataFrame, batch_size: int, training_steps: int, shuffle_size: int = 10000): """See base class.""" num_epochs = (training_steps // batch_size) + 1 ds = utils.get_dataset(train_data, batch_size, shuffle_size, num_epochs=num_epochs) ds = ds.prefetch(tf.data.AUTOTUNE) return iter(tfds.as_numpy(ds)) class CausalNetOutput(NamedTuple): q_hidden_obs: Sequence[tfp.distributions.Distribution] p_hidden: Sequence[tfp.distributions.Distribution] hidden_samples: Sequence[jnp.ndarray] log_p_obs_hidden: jnp.ndarray is_male: jnp.ndarray # indicates which elements of the batch correspond to # male individuals def build_causal_graph(train_data: pd.DataFrame, column_names: List[str], inputs: jnp.ndarray): """Build the causal graph of the model.""" make_multinomial = functools.partial( causal_network.MLPMultinomial.from_frame, hidden_shape=(100,)) make_gaussian = functools.partial( causal_network.Gaussian, hidden_shape=(100,)) # Construct the graphical model. Each random variable is represented by an # instance of the `Node` class, as discussed in that class's docstring. # The following nodes have no parents, and thus the distribution modules # will not be conditional on anything -- they simply represent priors. node_a = Node(MLPMultinomial.from_frame(train_data, 'sex')) node_c1 = Node(MLPMultinomial.from_frame(train_data, 'native-country')) node_c2 = Node(Gaussian('age', column_names.index('age'))) # These are all hidden nodes, that do not correspond to any actual data in # pandas dataframe loaded previously. We therefore are permitted to control # the dimensionality of these nodes as we wish (with the `dim` argument). # The distribution module here should be interpreted as saying that we are # imposing a multi-modal prior (a mixture of Gaussians) on each latent # variable. node_hm = Node(causal_network.GaussianMixture('hm', 10, dim=2), hidden=True) node_hl = Node(causal_network.GaussianMixture('hl', 10, dim=2), hidden=True) node_hr1 = Node( causal_network.GaussianMixture('hr1', 10, dim=2), hidden=True) node_hr2 = Node( causal_network.GaussianMixture('hr2', 10, dim=2), hidden=True) node_hr3 = Node( causal_network.GaussianMixture('hr3', 10, dim=2), hidden=True) # The rest of the graph is now constructed; the order of construction is # important, so we can inform each node of its parents. # Note that in the paper we simply have one node called "R", but here it is # separated into three separate `Node` instances. This is necessary since # each node can only represent a single quantity in the dataframe. node_m = Node( make_multinomial(train_data, 'marital-status'), [node_a, node_hm, node_c1, node_c2]) node_l = Node( make_gaussian('education-num', column_names.index('education-num')), [node_a, node_hl, node_c1, node_c2, node_m]) node_r1 = Node( make_multinomial(train_data, 'occupation'), [node_a, node_c1, node_c2, node_m, node_l]) node_r2 = Node( make_gaussian('hours-per-week', column_names.index('hours-per-week')), [node_a, node_c1, node_c2, node_m, node_l]) node_r3 = Node( make_multinomial(train_data, 'workclass'), [node_a, node_c1, node_c2, node_m, node_l]) node_y = Node( MLPMultinomial.from_frame(train_data, 'income'), [node_a, node_c1, node_c2, node_m, node_l, node_r1, node_r2, node_r3]) # We now construct several (self-explanatory) collections of nodes. These # will be used at various points later in the code, and serve to provide # greater semantic interpretability. observable_nodes = (node_a, node_c1, node_c2, node_l, node_m, node_r1, node_r2, node_r3, node_y) # The nodes on which each latent variable is conditionally dependent. # Note that Y is not in this list, since all of its dependencies are # included below, and further it does not depend directly on Hm. nodes_on_which_hm_depends = (node_a, node_c1, node_c2, node_m) nodes_on_which_hl_depends = (node_a, node_c1, node_c2, node_m, node_l) nodes_on_which_hr1_depends = (node_a, node_c1, node_c2, node_m, node_l, node_r1) nodes_on_which_hr2_depends = (node_a, node_c1, node_c2, node_m, node_l, node_r2) nodes_on_which_hr3_depends = (node_a, node_c1, node_c2, node_m, node_l, node_r3) hidden_nodes = (node_hm, node_hl, node_hr1, node_hr2, node_hr3) # Function to create the distribution needed for variational inference. This # is the same for each latent variable. def make_q_x_obs_module(node): """Make a Variational module for the given hidden variable.""" assert node.hidden return variational.Variational( common_layer_sizes=(20, 20), output_dim=node.dim) # For each latent variable, we first construct a Haiku module (using the # function above), and then connect it to the graph using the node's # value. As described in more detail in the documentation for `Node`, # these values represent actual observed data. Therefore we will later # be connecting these same modules to the graph in different ways in order # to perform fair inference. q_hm_obs_module = make_q_x_obs_module(node_hm) q_hl_obs_module = make_q_x_obs_module(node_hl) q_hr1_obs_module = make_q_x_obs_module(node_hr1) q_hr2_obs_module = make_q_x_obs_module(node_hr2) q_hr3_obs_module = make_q_x_obs_module(node_hr3) causal_network.populate(observable_nodes, inputs) q_hm_obs = q_hm_obs_module( *(node.observed_value for node in nodes_on_which_hm_depends)) q_hl_obs = q_hl_obs_module( *(node.observed_value for node in nodes_on_which_hl_depends)) q_hr1_obs = q_hr1_obs_module( *(node.observed_value for node in nodes_on_which_hr1_depends)) q_hr2_obs = q_hr2_obs_module( *(node.observed_value for node in nodes_on_which_hr2_depends)) q_hr3_obs = q_hr3_obs_module( *(node.observed_value for node in nodes_on_which_hr3_depends)) q_hidden_obs = (q_hm_obs, q_hl_obs, q_hr1_obs, q_hr2_obs, q_hr3_obs) return observable_nodes, hidden_nodes, q_hidden_obs def build_forward_fn(train_data: pd.DataFrame, column_names: List[str], likelihood_multiplier: float): """Create the model's forward pass.""" def forward_fn(inputs: jnp.ndarray) -> CausalNetOutput: """Forward pass.""" observable_nodes, hidden_nodes, q_hidden = build_causal_graph( train_data, column_names, inputs) (node_hm, node_hl, node_hr1, node_hr2, node_hr3) = hidden_nodes (node_a, _, _, _, _, _, _, _, node_y) = observable_nodes # Log-likelihood function. def log_p_obs_h(hm_value, hl_value, hr1_value, hr2_value, hr3_value): """Compute log P(A, C, M, L, R, Y | H).""" # In order to create distributions like P(M | H_m, A, C), we need # the value of H_m that we've been provided as an argument, rather than # the value stored on H_m (which, in fact, will never be populated # since H_m is unobserved). # For compactness, we first construct the complete list of replacements. node_to_replacement = { node_hm: hm_value, node_hl: hl_value, node_hr1: hr1_value, node_hr2: hr2_value, node_hr3: hr3_value, } def log_prob_for_node(node): """Given a node, compute it's log probability for the given latents.""" log_prob = jnp.squeeze( node.make_distribution(node_to_replacement).log_prob( node.observed_value)) return log_prob # We apply the likelihood multiplier to all likelihood terms except that # for Y, the target. This is then added on separately in the line below. sum_no_y = likelihood_multiplier * sum( log_prob_for_node(node) for node in observable_nodes if node is not node_y) return sum_no_y + log_prob_for_node(node_y) q_hidden_obs = tuple(q_hidden) p_hidden = tuple(node.distribution for node in hidden_nodes) rnd_key = hk.next_rng_key() hidden_samples = tuple( q_hidden.sample(seed=rnd_key) for q_hidden in q_hidden_obs) log_p_obs_hidden = log_p_obs_h(*hidden_samples) # We need to split our batch of data into male and female parts. is_male = jnp.equal(node_a.observed_value[:, 1], 1) return CausalNetOutput( q_hidden_obs=q_hidden_obs, p_hidden=p_hidden, hidden_samples=hidden_samples, log_p_obs_hidden=log_p_obs_hidden, is_male=is_male) def fair_inference_fn(inputs: jnp.ndarray, batch_size: int, num_prediction_samples: int): """Get the fair and unfair predictions for the given input.""" observable_nodes, hidden_nodes, q_hidden_obs = build_causal_graph( train_data, column_names, inputs) (node_hm, node_hl, node_hr1, node_hr2, node_hr3) = hidden_nodes (node_a, node_c1, node_c2, node_l, node_m, node_r1, node_r2, node_r3, node_y) = observable_nodes (q_hm_obs, q_hl_obs, q_hr1_obs, q_hr2_obs, q_hr3_obs) = q_hidden_obs rnd_key = hk.next_rng_key() # *** FAIR INFERENCE *** # To predict Y in a fair sense: # * Infer Hm given observations. # * Infer M using inferred Hm, baseline A, real C # * Infer L using inferred Hl, M, real A, C # * Infer Y using inferred M, baseline A, real C # This is done by numerical integration, i.e. draw samples from # p_fair(Y | A, C, M, L). a_all_male = jnp.concatenate( (jnp.zeros((batch_size, 1)), jnp.ones((batch_size, 1))), axis=1) # Here we take a num_samples per observation. This results to # an array of shape: # (num_samples, batch_size, hm_dim). # However, forward pass is easier by reshaping to: # (num_samples * batch_size, hm_dim). hm_dim = 2 def expanded_sample(distribution): return distribution.sample( num_prediction_samples, seed=rnd_key).reshape( (batch_size * num_prediction_samples, hm_dim)) hm_pred_sample = expanded_sample(q_hm_obs) hl_pred_sample = expanded_sample(q_hl_obs) hr1_pred_sample = expanded_sample(q_hr1_obs) hr2_pred_sample = expanded_sample(q_hr2_obs) hr3_pred_sample = expanded_sample(q_hr3_obs) # The values of the observed nodes need to be tiled to match the dims # of the above hidden samples. The `expand` function achieves this. def expand(observed_value): return jnp.tile(observed_value, (num_prediction_samples, 1)) expanded_a = expand(node_a.observed_value) expanded_a_baseline = expand(a_all_male) expanded_c1 = expand(node_c1.observed_value) expanded_c2 = expand(node_c2.observed_value) # For M, and all subsequent variables, we only generate one sample. This # is because we already have *many* samples from the latent variables, and # all we require is an independent sample from the distribution. m_pred_sample = node_m.make_distribution({ node_a: expanded_a_baseline, node_hm: hm_pred_sample, node_c1: expanded_c1, node_c2: expanded_c2}).sample(seed=rnd_key) l_pred_sample = node_l.make_distribution({ node_a: expanded_a, node_hl: hl_pred_sample, node_c1: expanded_c1, node_c2: expanded_c2, node_m: m_pred_sample}).sample(seed=rnd_key) r1_pred_sample = node_r1.make_distribution({ node_a: expanded_a, node_hr1: hr1_pred_sample, node_c1: expanded_c1, node_c2: expanded_c2, node_m: m_pred_sample, node_l: l_pred_sample}).sample(seed=rnd_key) r2_pred_sample = node_r2.make_distribution({ node_a: expanded_a, node_hr2: hr2_pred_sample, node_c1: expanded_c1, node_c2: expanded_c2, node_m: m_pred_sample, node_l: l_pred_sample}).sample(seed=rnd_key) r3_pred_sample = node_r3.make_distribution({ node_a: expanded_a, node_hr3: hr3_pred_sample, node_c1: expanded_c1, node_c2: expanded_c2, node_m: m_pred_sample, node_l: l_pred_sample}).sample(seed=rnd_key) # Finally, we sample from the distribution for Y. Like above, we only # draw one sample per element in the array. y_pred_sample = node_y.make_distribution({ node_a: expanded_a_baseline, # node_a: expanded_a, node_c1: expanded_c1, node_c2: expanded_c2, node_m: m_pred_sample, node_l: l_pred_sample, node_r1: r1_pred_sample, node_r2: r2_pred_sample, node_r3: r3_pred_sample}).sample(seed=rnd_key) # Reshape back to (num_samples, batch_size, y_dim), undoing the expanding # operation used for sampling. y_pred_sample = y_pred_sample.reshape( (num_prediction_samples, batch_size, -1)) # Now form an array of shape (batch_size, y_dim) by taking an expectation # over the sample dimension. This represents the probability that the # result is in each class. y_pred_expectation = jnp.mean(y_pred_sample, axis=0) # Find out the predicted y, for later use in a confusion matrix. predicted_class_y_fair = utils.multinomial_class(y_pred_expectation) # *** NAIVE INFERENCE *** predicted_class_y_unfair = utils.multinomial_class(node_y.distribution) return predicted_class_y_fair, predicted_class_y_unfair return forward_fn, fair_inference_fn def _loss_fn( forward_fn, beta: float, mmd_sample_size: int, constraint_multiplier: float, constraint_ratio: float, params: hk.Params, rng: jnp.ndarray, inputs: jnp.ndarray, ) -> jnp.ndarray: """Loss function definition.""" outputs = forward_fn(params, rng, inputs) loss = _loss_klqp(outputs, beta) # if (constraint_ratio * constraint_multiplier) > 0: constraint_loss = 0. # Create constraint penalty and add to overall loss term. for distribution in outputs.q_hidden_obs: constraint_loss += (constraint_ratio * constraint_multiplier * utils.mmd_loss(distribution, outputs.is_male, mmd_sample_size, rng)) # Optimisation - don't do the computation if the multiplier is set to zero. loss += constraint_loss return loss def _evaluate( fair_inference_fn, params: hk.Params, rng: jnp.ndarray, inputs: jnp.ndarray, batch_size: int, num_prediction_samples: int, ): """Perform evaluation of fair inference.""" output = fair_inference_fn(params, rng, inputs, batch_size, num_prediction_samples) return output def _loss_klqp(outputs: CausalNetOutput, beta: float) -> jnp.ndarray: """Compute the loss on data wrt params.""" expected_log_q_hidden_obs = sum( jnp.sum(q_hidden_obs.log_prob(hidden_sample), axis=1) for q_hidden_obs, hidden_sample in zip(outputs.q_hidden_obs, outputs.hidden_samples)) assert expected_log_q_hidden_obs.ndim == 1 # For log probabilities computed from distributions, we need to sum along # the last axis, which takes the product of distributions for # multi-dimensional hidden variables. log_p_hidden = sum( jnp.sum(p_hidden.log_prob(hidden_sample), axis=1) for p_hidden, hidden_sample in zip(outputs.p_hidden, outputs.hidden_samples)) assert outputs.log_p_obs_hidden.ndim == 1 kl_divergence = ( beta * (expected_log_q_hidden_obs - log_p_hidden) - outputs.log_p_obs_hidden) return jnp.mean(kl_divergence) class Updater: """A stateless abstraction around an init_fn/update_fn pair. This extracts some common boilerplate from the training loop. """ def __init__(self, net_init, loss_fn, eval_fn, optimizer: optax.GradientTransformation, constraint_turn_on_step): self._net_init = net_init self._loss_fn = loss_fn self._eval_fn = eval_fn self._opt = optimizer self._constraint_turn_on_step = constraint_turn_on_step @functools.partial(jax.jit, static_argnums=0) def init(self, init_rng, data): """Initializes state of the updater.""" params = self._net_init(init_rng, data) opt_state = self._opt.init(params) out = dict( step=np.array(0), rng=init_rng, opt_state=opt_state, params=params, ) return out @functools.partial(jax.jit, static_argnums=0) def update(self, state: Mapping[str, Any], data: jnp.ndarray): """Updates the state using some data and returns metrics.""" rng = state['rng'] params = state['params'] constraint_ratio = (state['step'] > self._constraint_turn_on_step).astype( float) loss, g = jax.value_and_grad(self._loss_fn, argnums=1)( constraint_ratio, params, rng, data) updates, opt_state = self._opt.update(g, state['opt_state']) params = optax.apply_updates(params, updates) new_state = { 'step': state['step'] + 1, 'rng': rng, 'opt_state': opt_state, 'params': params, } new_metrics = { 'step': state['step'], 'loss': loss, } return new_state, new_metrics @functools.partial(jax.jit, static_argnums=(0, 3, 4)) def evaluate(self, state: Mapping[str, Any], inputs: jnp.ndarray, batch_size: int, num_prediction_samples: int): """Evaluate fair inference.""" rng = state['rng'] params = state['params'] fair_pred, unfair_pred = self._eval_fn(params, rng, inputs, batch_size, num_prediction_samples) return fair_pred, unfair_pred def main(_): flags_config = FLAGS.config # Create the dataset. train_data, test_data = adult.read_all_data(FLAGS.dataset_dir) column_names = list(train_data.columns) train_input = build_input(train_data, flags_config.batch_size, flags_config.num_steps) # Set up the model, loss, and updater. forward_fn, fair_inference_fn = build_forward_fn( train_data, column_names, flags_config.likelihood_multiplier) forward_fn = hk.transform(forward_fn) fair_inference_fn = hk.transform(fair_inference_fn) loss_fn = functools.partial(_loss_fn, forward_fn.apply, flags_config.beta, flags_config.mmd_sample_size, flags_config.constraint_multiplier) eval_fn = functools.partial(_evaluate, fair_inference_fn.apply) optimizer = optax.adam(flags_config.learning_rate) updater = Updater(forward_fn.init, loss_fn, eval_fn, optimizer, flags_config.constraint_turn_on_step) # Initialize parameters. logging.info('Initializing parameters...') rng = jax.random.PRNGKey(42) train_data = next(train_input) state = updater.init(rng, train_data) # Training loop. logging.info('Starting train loop...') prev_time = time.time() for step in range(flags_config.num_steps): train_data = next(train_input) state, stats = updater.update(state, train_data) if step % LOG_EVERY == 0: steps_per_sec = LOG_EVERY / (time.time() - prev_time) prev_time = time.time() stats.update({'steps_per_sec': steps_per_sec}) logging.info({k: float(v) for k, v in stats.items()}) # Evaluate. logging.info('Starting evaluation...') test_input = build_input(test_data, flags_config.batch_size, training_steps=0, shuffle_size=0) predicted_test_y = [] corrected_test_y = [] while True: try: eval_data = next(test_input) # Now run the fair prediction; this projects the input to the latent space # and then performs sampling. predicted_class_y_fair, predicted_class_y_unfair = updater.evaluate( state, eval_data, flags_config.batch_size, flags_config.num_prediction_samples) predicted_test_y.append(predicted_class_y_unfair) corrected_test_y.append(predicted_class_y_fair) # logging.info('Completed evaluation step %d', step) except StopIteration: logging.info('Finished evaluation') break # Join together the predictions from each batch. test_y = np.concatenate(predicted_test_y, axis=0) tweaked_test_y = np.concatenate(corrected_test_y, axis=0) # Note the true values for computing accuracy and confusion matrices. y_true = test_data['income'].cat.codes # Make sure y_true is the same size y_true = y_true[:len(test_y)] test_accuracy = metrics.accuracy_score(y_true, test_y) tweaked_test_accuracy = metrics.accuracy_score( y_true, tweaked_test_y) # Print out accuracy and confusion matrices. logging.info('Accuracy (full model): %f', test_accuracy) logging.info('Confusion matrix:') logging.info(metrics.confusion_matrix(y_true, test_y)) logging.info('') logging.info('Accuracy (tweaked with baseline: Male): %f', tweaked_test_accuracy) logging.info('Confusion matrix:') logging.info(metrics.confusion_matrix(y_true, tweaked_test_y)) if __name__ == '__main__': app.run(main)
deepmind-research-master
counterfactual_fairness/adult_pscf.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Common utilities.""" from typing import Optional, Union from jax import random import jax.numpy as jnp import pandas as pd import tensorflow as tf from tensorflow_probability.substrates import jax as tfp tfd = tfp.distributions def get_dataset(dataset: pd.DataFrame, batch_size: int, shuffle_size: int = 10000, num_epochs: Optional[int] = None) -> tf.data.Dataset: """Makes a tf.Dataset with correct preprocessing.""" dataset_copy = dataset.copy() for column in dataset.columns: if dataset[column].dtype.name == 'category': dataset_copy.loc[:, column] = dataset[column].cat.codes ds = tf.data.Dataset.from_tensor_slices(dataset_copy.values) if shuffle_size > 0: ds = ds.shuffle(shuffle_size, reshuffle_each_iteration=True) ds = ds.repeat(num_epochs) return ds.batch(batch_size, drop_remainder=True) def multinomial_mode( distribution_or_probs: Union[tfd.Distribution, jnp.DeviceArray] ) -> jnp.DeviceArray: """Calculates the (one-hot) mode of a multinomial distribution. Args: distribution_or_probs: `tfp.distributions.Distribution` | List[tensors]. If the former, it is assumed that it has a `probs` property, and represents a distribution over categories. If the latter, these are taken to be the probabilities of categories directly. In either case, it is assumed that `probs` will be shape (batch_size, dim). Returns: `DeviceArray`, float32, (batch_size, dim). The mode of the distribution - this will be in one-hot form, but contain multiple non-zero entries in the event that more than one probability is joint-highest. """ if isinstance(distribution_or_probs, tfd.Distribution): probs = distribution_or_probs.probs_parameter() else: probs = distribution_or_probs max_prob = jnp.max(probs, axis=1, keepdims=True) mode = jnp.int32(jnp.equal(probs, max_prob)) return jnp.float32(mode / jnp.sum(mode, axis=1, keepdims=True)) def multinomial_class( distribution_or_probs: Union[tfd.Distribution, jnp.DeviceArray] ) -> jnp.DeviceArray: """Computes the mode class of a multinomial distribution. Args: distribution_or_probs: `tfp.distributions.Distribution` | DeviceArray. As for `multinomial_mode`. Returns: `DeviceArray`, float32, (batch_size,). For each element in the batch, the index of the class with highest probability. """ if isinstance(distribution_or_probs, tfd.Distribution): return jnp.argmax(distribution_or_probs.logits_parameter(), axis=1) return jnp.argmax(distribution_or_probs, axis=1) def multinomial_mode_ndarray(probs: jnp.DeviceArray) -> jnp.DeviceArray: """Calculates the (one-hot) mode from an ndarray of class probabilities. Equivalent to `multinomial_mode` above, but implemented for numpy ndarrays rather than Tensors. Args: probs: `DeviceArray`, (batch_size, dim). Probabilities for each class, for each element in a batch. Returns: `DeviceArray`, (batch_size, dim). """ max_prob = jnp.amax(probs, axis=1, keepdims=True) mode = jnp.equal(probs, max_prob).astype(jnp.int32) return (mode / jnp.sum(mode, axis=1, keepdims=True)).astype(jnp.float32) def multinomial_accuracy(distribution_or_probs: tfd.Distribution, data: jnp.DeviceArray) -> jnp.DeviceArray: """Compute the accuracy, averaged over a batch of data. Args: distribution_or_probs: `tfp.distributions.Distribution` | List[tensors]. As for functions above. data: `DeviceArray`. Reference data, of shape (batch_size, dim). Returns: `DeviceArray`, float32, (). Overall scalar accuracy. """ return jnp.mean( jnp.sum(multinomial_mode(distribution_or_probs) * data, axis=1)) def softmax_ndarray(logits: jnp.DeviceArray) -> jnp.DeviceArray: """Softmax function, implemented for numpy ndarrays.""" assert len(logits.shape) == 2 # Normalise for better stability. s = jnp.max(logits, axis=1, keepdims=True) e_x = jnp.exp(logits - s) return e_x / jnp.sum(e_x, axis=1, keepdims=True) def get_samples(distribution, num_samples, seed=None): """Given a batched distribution, compute samples and reshape along batch. That is, we have a distribution of shape (batch_size, ...), where each element of the tensor is independent. We then draw num_samples from each component, to give a tensor of shape: (num_samples, batch_size, ...) Args: distribution: `tfp.distributions.Distribution`. The distribution from which to sample. num_samples: `Integral` | `DeviceArray`, int32, (). The number of samples. seed: `Integral` | `None`. The seed that will be forwarded to the call to distribution.sample. Defaults to `None`. Returns: `DeviceArray`, float32, (batch_size * num_samples, ...). Samples for each element of the batch. """ # Obtain the sample from the distribution, which will be of shape # [num_samples] + batch_shape + event_shape. sample = distribution.sample(num_samples, seed=seed) sample = sample.reshape((-1, sample.shape[-1])) # Combine the first two dimensions through a reshape, so the result will # be of shape (num_samples * batch_size,) + shape_tail. return sample def mmd_loss(distribution: tfd.Distribution, is_a: jnp.DeviceArray, num_samples: int, rng: jnp.ndarray, num_random_features: int = 50, gamma: float = 1.): """Given two distributions, compute the Maximum Mean Discrepancy (MMD). More exactly, this uses the 'FastMMD' approximation, a.k.a. 'Random Fourier Features'. See the description, for example, in sections 2.3.1 and 2.4 of https://arxiv.org/pdf/1511.00830.pdf. Args: distribution: Distribution whose `sample()` method will return a DeviceArray of shape (batch_size, dim). is_a: A boolean array indicating which elements of the batch correspond to class A (the remaining indices correspond to class B). num_samples: The number of samples to draw from `distribution`. rng: Random seed provided by the user. num_random_features: The number of random fourier features used in the expansion. gamma: The value of gamma in the Gaussian MMD kernel. Returns: `DeviceArray`, shape (). The scalar MMD value for samples taken from the given distributions. """ if distribution.event_shape == (): # pylint: disable=g-explicit-bool-comparison dim_x = distribution.batch_shape[1] else: dim_x, = distribution.event_shape # Obtain samples from the distribution, which will be of shape # [num_samples] + batch_shape + event_shape. samples = distribution.sample(num_samples, seed=rng) w = random.normal(rng, shape=((dim_x, num_random_features))) b = random.uniform(rng, shape=(num_random_features,), minval=0, maxval=2*jnp.pi) def features(x): """Compute the kitchen sink feature.""" # We need to contract last axis of x with first of W - do this with # tensordot. The result has shape: # (?, ?, num_random_features) return jnp.sqrt(2 / num_random_features) * jnp.cos( jnp.sqrt(2 / gamma) * jnp.tensordot(x, w, axes=1) + b) # Compute the expected values of the given features. # The first axis represents the samples from the distribution, # second axis represents the batch_size. # Each of these now has shape (num_random_features,) exp_features = features(samples) # Swap axes so that batch_size is the last dimension to be compatible # with is_a and is_b shape at the next step exp_features_reshaped = jnp.swapaxes(exp_features, 1, 2) # Current dimensions [num_samples, num_random_features, batch_size] exp_features_reshaped_a = jnp.where(is_a, exp_features_reshaped, 0) exp_features_reshaped_b = jnp.where(is_a, 0, exp_features_reshaped) exp_features_a = jnp.mean(exp_features_reshaped_a, axis=(0, 2)) exp_features_b = jnp.mean(exp_features_reshaped_b, axis=(0, 2)) assert exp_features_a.shape == (num_random_features,) difference = exp_features_a - exp_features_b # Compute the squared norm. Shape (). return jnp.tensordot(difference, difference, axes=1) def mmd_loss_exact(distribution_a, distribution_b, num_samples, gamma=1.): """Exact estimate of MMD.""" assert distribution_a.event_shape == distribution_b.event_shape assert distribution_a.batch_shape[1:] == distribution_b.batch_shape[1:] # shape (num_samples * batch_size_a, dim_x) samples_a = get_samples(distribution_a, num_samples) # shape (num_samples * batch_size_b, dim_x) samples_b = get_samples(distribution_b, num_samples) # Make matrices of shape # (size_b, size_a, dim_x) # where: # size_a = num_samples * batch_size_a # size_b = num_samples * batch_size_b size_a = samples_a.shape[0] size_b = samples_b.shape[0] x_a = jnp.expand_dims(samples_a, axis=0) x_a = jnp.tile(x_a, (size_b, 1, 1)) x_b = jnp.expand_dims(samples_b, axis=1) x_b = jnp.tile(x_b, (1, size_a, 1)) def kernel_mean(x, y): """Gaussian kernel mean.""" diff = x - y # Contract over dim_x. exponent = - jnp.einsum('ijk,ijk->ij', diff, diff) / gamma # This has shape (size_b, size_a). kernel_matrix = jnp.exp(exponent) # Shape (). return jnp.mean(kernel_matrix) # Equation 7 from arxiv 1511.00830 return ( kernel_mean(x_a, x_a) + kernel_mean(x_b, x_b) - 2 * kernel_mean(x_a, x_b)) def scalar_log_prob(distribution, val): """Compute the log_prob per batch entry. It is conceptually similar to: jnp.sum(distribution.log_prob(val), axis=1) However, classes like `tfp.distributions.Multinomial` have a log_prob which returns a tensor of shape (batch_size,), which will cause the above incantation to fail. In these cases we fall back to returning just: distribution.log_prob(val) Args: distribution: `tfp.distributions.Distribution` which implements log_prob. val: `DeviceArray`, (batch_size, dim). Returns: `DeviceArray`, (batch_size,). If the result of log_prob has a trailing dimension, we perform a reduce_sum over it. Raises: ValueError: If distribution.log_prob(val) has an unsupported shape. """ log_prob_val = distribution.log_prob(val) if len(log_prob_val.shape) == 1: return log_prob_val elif len(log_prob_val.shape) > 2: raise ValueError('log_prob_val has unexpected shape {}.'.format( log_prob_val.shape)) return jnp.sum(log_prob_val, axis=1)
deepmind-research-master
counterfactual_fairness/utils.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Config for adult PSCF experiment.""" import ml_collections def get_config(): """Return the default configuration.""" config = ml_collections.ConfigDict() config.num_steps = 10000 # Number of training steps to perform. config.batch_size = 128 # Batch size. config.learning_rate = 0.01 # Learning rate # Number of samples to draw for prediction. config.num_prediction_samples = 500 # Batch size to use for prediction. Ideally as big as possible, but may need # to be reduced for memory reasons depending on the value of # `num_prediction_samples`. config.prediction_batch_size = 500 # Multiplier for the likelihood term in the loss config.likelihood_multiplier = 5. # Multiplier for the MMD constraint term in the loss config.constraint_multiplier = 0. # Scaling factor to use in KL term. config.beta = 1.0 # The number of samples we draw from each latent variable distribution. config.mmd_sample_size = 100 # Directory into which results should be placed. By default it is the empty # string, in which case no saving will occur. The directory specified will be # created if it does not exist. config.output_dir = '' # The index of the step at which to turn on the constraint multiplier. For # steps prior to this the multiplier will be zero. config.constraint_turn_on_step = 0 # The random seed for tensorflow that is applied to the graph iff the value is # non-negative. By default the seed is not constrained. config.seed = -1 # When doing fair inference, don't sample when given a sample for the baseline # gender. config.baseline_passthrough = False return config
deepmind-research-master
counterfactual_fairness/adult_pscf_config.py
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Functions and classes for performing variational inference.""" from typing import Callable, Iterable, Optional import haiku as hk import jax.numpy as jnp from tensorflow_probability.substrates import jax as tfp tfd = tfp.distributions class Variational(hk.Module): """A module representing the variational distribution q(H | *O). H is assumed to be a continuous variable. """ def __init__(self, common_layer_sizes: Iterable[int], activation: Callable[[jnp.DeviceArray], jnp.DeviceArray] = jnp.tanh, output_dim: int = 1, name: Optional[str] = None): """Initialises a `Variational` instance. Args: common_layer_sizes: The number of hidden units in the shared dense network layers. activation: Nonlinearity function to apply to each of the common layers. output_dim: The dimensionality of `H`. name: A name to assign to the module instance. """ super().__init__(name=name) self._common_layer_sizes = common_layer_sizes self._activation = activation self._output_dim = output_dim self._linear_layers = [ hk.Linear(layer_size) for layer_size in self._common_layer_sizes ] self._mean_output = hk.Linear(self._output_dim) self._log_var_output = hk.Linear(self._output_dim) def __call__(self, *args) -> tfd.Distribution: """Create a distribution for q(H | *O). Args: *args: `List[DeviceArray]`. Corresponds to the values of whatever variables are in the conditional set *O. Returns: `tfp.distributions.NormalDistribution` instance. """ # Stack all inputs, ensuring that shapes are consistent and that they are # all of dtype float32. input_ = [hk.Flatten()(arg) for arg in args] input_ = jnp.concatenate(input_, axis=1) # Create a common set of layers, then final layer separates mean & log_var for layer in self._linear_layers: input_ = layer(input_) input_ = self._activation(input_) # input_ now represents a tensor of shape (batch_size, final_layer_size). # This is now put through two final layers, one for the computation of each # of the mean and standard deviation of the resultant distribution. mean = self._mean_output(input_) log_var = self._log_var_output(input_) std = jnp.sqrt(jnp.exp(log_var)) return tfd.Normal(mean, std)
deepmind-research-master
counterfactual_fairness/variational.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Ensemble k-fold predictions and generate final submission file.""" import collections import os from absl import app from absl import flags from absl import logging import dill import jax import numpy as np from ogb import lsc # pylint: disable=g-bad-import-order import data_utils import losses _NUM_KFOLD_SPLITS = 10 FLAGS = flags.FLAGS _DATA_ROOT = flags.DEFINE_string('data_root', None, 'Path to the data root') _SPLIT = flags.DEFINE_enum('split', None, ['valid', 'test'], 'Data split') _PREDICTIONS_PATH = flags.DEFINE_string( 'predictions_path', None, 'Path with the output of the k-fold models.') _OUTPUT_PATH = flags.DEFINE_string('output_path', None, 'Output path.') def _np_one_hot(targets: np.ndarray, nb_classes: int): res = np.zeros(targets.shape + (nb_classes,), dtype=np.float32) np.put_along_axis(res, targets.astype(np.int32)[..., None], 1.0, axis=-1) return res def ensemble_predictions( node_idx_to_logits_list, all_labels, node_indices, use_mode_break_tie_by_mean: bool = True, ): """Ensemble together predictions for each node and generate final predictions.""" # First, assert that each node has the same number of predictions to ensemble. num_predictions_per_node = [ len(x) for x in node_idx_to_logits_list.values() ] num_models = np.unique(num_predictions_per_node) assert num_models.shape[0] == 1 num_models = num_models[0] # Gather all logits, shape should be [num_nodes, num_models, num_classes]. all_logits = np.stack( [np.stack(node_idx_to_logits_list[idx]) for idx in node_indices]) assert all_logits.shape == (node_indices.shape[0], num_models, data_utils.NUM_CLASSES) # Softmax on the final axis. all_probs = jax.nn.softmax(all_logits, axis=-1) # Take average across models axis to get probabilities. mean_probs = np.mean(all_probs, axis=1) # Assert there are no 2 equal logits for different classes. max_logit_value = np.max(all_logits, axis=-1) num_classes_with_max_value = ( all_logits == max_logit_value[..., None]).sum(axis=-1) num_logit_ties = (num_classes_with_max_value > 1).sum() if num_logit_ties: logging.warn( 'Found %d models with the exact same logits for two of the classes. ' '`argmax` will choose the first.', num_logit_ties) # Each model votes on one class per type. all_votes = np.argmax(all_logits, axis=-1) assert all_votes.shape == (node_indices.shape[0], num_models) all_votes_one_hot = _np_one_hot(all_votes, data_utils.NUM_CLASSES) assert all_votes_one_hot.shape == (node_indices.shape[0], num_models, data_utils.NUM_CLASSES) num_votes_per_class = np.sum(all_votes_one_hot, axis=1) assert num_votes_per_class.shape == ( node_indices.shape[0], data_utils.NUM_CLASSES) if use_mode_break_tie_by_mean: # Slight hack, give high weight to votes (any number > 1 works really) # and add probabilities between [0, 1] per class to tie-break only within # classes with equal votes. total_score = 10 * num_votes_per_class + mean_probs else: # Just take mean. total_score = mean_probs ensembled_logits = np.log(total_score) return losses.Predictions( node_indices=node_indices, labels=all_labels, logits=ensembled_logits, predictions=np.argmax(ensembled_logits, axis=-1), ) def load_predictions(predictions_path, split): """Loads set of predictions made by given XID.""" # Generate list of predictions per node. # Note for validation each validation index is only present in exactly 1 # model of the k-fold, however for test it is present in all of them. node_idx_to_logits_list = collections.defaultdict(list) # For the 10 models in the ensemble. for i in range(_NUM_KFOLD_SPLITS): path = os.path.join(predictions_path, str(i)) # Find subdirectories. # Directories will be something like: # os.path.join(path, "step_104899_2021-06-14T18:20:05", "(test|valid).dill") # So we make sure there is only one. candidates = [] for date_str in os.listdir(path): candidate_path = os.path.join(path, date_str, f'{split}.dill') if os.path.exists(candidate_path): candidates.append(candidate_path) if not candidates: raise ValueError(f'No {split} predictions found at {path}') elif len(candidates) > 1: raise ValueError(f'Found more than one {split} predictions: {candidates}') path_for_kth_model_predictions = candidates[0] with open(path_for_kth_model_predictions, 'rb') as f: results = dill.load(f) logging.info('Loaded %s', path_for_kth_model_predictions) for (node_idx, logits) in zip(results.node_indices, results.logits): node_idx_to_logits_list[node_idx].append(logits) return node_idx_to_logits_list def generate_ensembled_predictions( data_root: str, predictions_path: str, split: str) -> losses.Predictions: """Ensemble checkpoints from all WIDs in XID and generates submission file.""" array_dict = data_utils.get_arrays( data_root=data_root, return_pca_embeddings=False, return_adjacencies=False) # Load all valid and test predictions. node_idx_to_logits_list = load_predictions(predictions_path, split) # Assert that the indices loaded are as expected. expected_idx = array_dict[f'{split}_indices'] idx_found = np.array(list(node_idx_to_logits_list.keys())) assert np.all(np.sort(idx_found) == expected_idx) if split == 'valid': true_labels = array_dict['paper_label'][expected_idx.astype(np.int32)] else: # Don't know the test labels. true_labels = np.full(expected_idx.shape, np.nan) # Ensemble together all predictions. return ensemble_predictions( node_idx_to_logits_list, true_labels, expected_idx) def evaluate_validation(valid_predictions): evaluator = lsc.MAG240MEvaluator() evaluator_ouput = evaluator.eval( dict(y_pred=valid_predictions.predictions.astype(np.float64), y_true=valid_predictions.labels)) logging.info( 'Validation accuracy as reported by MAG240MEvaluator: %s', evaluator_ouput) def save_test_submission_file(test_predictions, output_dir): evaluator = lsc.MAG240MEvaluator() evaluator.save_test_submission( dict(y_pred=test_predictions.predictions.astype(np.float64)), output_dir) logging.info('Test submission file generated at %s', output_dir) def main(argv): del argv split = _SPLIT.value ensembled_predictions = generate_ensembled_predictions( data_root=_DATA_ROOT.value, predictions_path=_PREDICTIONS_PATH.value, split=split) output_dir = _OUTPUT_PATH.value os.makedirs(output_dir, exist_ok=True) if split == 'valid': evaluate_validation(ensembled_predictions) elif split == 'test': save_test_submission_file(ensembled_predictions, output_dir) ensembled_predictions_path = os.path.join(output_dir, f'{split}.dill') assert not os.path.exists(ensembled_predictions_path) with open(ensembled_predictions_path, 'wb') as f: dill.dump(ensembled_predictions, f) logging.info( '%s predictions stored at %s', split, ensembled_predictions_path) if __name__ == '__main__': app.run(main)
deepmind-research-master
ogb_lsc/mag/ensemble_predictions.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generates the k-fold validation splits.""" import os from absl import app from absl import flags import data_utils _DATA_ROOT = flags.DEFINE_string( 'data_root', None, required=True, help='Path containing the downloaded data.') _OUTPUT_DIR = flags.DEFINE_string( 'output_dir', None, required=True, help='Output directory to write the splits to') def main(argv): del argv array_dict = data_utils.get_arrays( data_root=_DATA_ROOT.value, return_pca_embeddings=False, return_adjacencies=False) os.makedirs(_OUTPUT_DIR.value, exist_ok=True) data_utils.generate_k_fold_splits( train_idx=array_dict['train_indices'], valid_idx=array_dict['valid_indices'], output_path=_OUTPUT_DIR.value, num_splits=data_utils.NUM_K_FOLD_SPLITS) if __name__ == '__main__': app.run(main)
deepmind-research-master
ogb_lsc/mag/generate_validation_splits.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Split and save the train/valid/test indices. Usage: python3 split_and_save_indices.py --data_root="mag_data" """ import pathlib from absl import app from absl import flags import numpy as np import torch Path = pathlib.Path FLAGS = flags.FLAGS flags.DEFINE_string('data_root', None, 'Data root directory') def main(argv) -> None: if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') mag_directory = Path(FLAGS.data_root) / 'mag240m_kddcup2021' raw_directory = mag_directory / 'raw' raw_directory.parent.mkdir(parents=True, exist_ok=True) splits_dict = torch.load(str(mag_directory / 'split_dict.pt')) for key, indices in splits_dict.items(): np.save(str(raw_directory / f'{key}_idx.npy'), indices) if __name__ == '__main__': flags.mark_flag_as_required('root') app.run(main)
deepmind-research-master
ogb_lsc/mag/split_and_save_indices.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Apply PCA to the papers' BERT features. Compute papers' PCA features. Recompute author and institution features from the paper PCA features. """ import pathlib import time from absl import app from absl import flags from absl import logging import numpy as np # pylint: disable=g-bad-import-order import data_utils Path = pathlib.Path _NUMBER_OF_PAPERS_TO_ESTIMATE_PCA_ON = 1000000 # None indicates all. FLAGS = flags.FLAGS flags.DEFINE_string('data_root', None, 'Data root directory') def _sample_vectors(vectors, num_samples, seed=0): """Randomly sample some vectors.""" rand = np.random.RandomState(seed=seed) indices = rand.choice(vectors.shape[0], size=num_samples, replace=False) return vectors[indices] def _pca(feat): """Returns evals (variances), evecs (rows are principal components).""" cov = np.cov(feat.T) _, evals, evecs = np.linalg.svd(cov, full_matrices=True) return evals, evecs def _read_raw_paper_features(): """Load raw paper features.""" path = Path(FLAGS.data_root) / data_utils.RAW_NODE_FEATURES_FILENAME try: # Use mmap if possible. features = np.load(path, mmap_mode='r') except FileNotFoundError: with open(path, 'rb') as fid: features = np.load(fid) return features def _get_principal_components(features, num_principal_components=129, num_samples=10000, seed=2, dtype='f4'): """Estimate PCA features.""" sample = _sample_vectors( features[:_NUMBER_OF_PAPERS_TO_ESTIMATE_PCA_ON], num_samples, seed=seed) # Compute PCA basis. _, evecs = _pca(sample) return evecs[:num_principal_components].T.astype(dtype) def _project_features_onto_principal_components(features, principal_components, block_size=1000000): """Apply PCA iteratively.""" num_principal_components = principal_components.shape[1] dtype = principal_components.dtype num_vectors = features.shape[0] num_features = features.shape[0] num_blocks = (num_features - 1) // block_size + 1 pca_features = np.empty([num_vectors, num_principal_components], dtype=dtype) # Loop through in blocks. start_time = time.time() for i in range(num_blocks): i_start = i * block_size i_end = (i + 1) * block_size f = np.array(features[i_start:i_end].copy()) pca_features[i_start:i_end] = np.dot(f, principal_components).astype(dtype) del f elapsed_time = time.time() - start_time time_left = elapsed_time / (i + 1) * (num_blocks - i - 1) logging.info('Features %d / %d. Elapsed time %.1f. Time left: %.1f', i_end, num_vectors, elapsed_time, time_left) return pca_features def _read_adjacency_indices(): # Get adjacencies. return data_utils.get_arrays( data_root=FLAGS.data_root, use_fused_node_labels=False, use_fused_node_adjacencies=False, return_pca_embeddings=False, ) def _compute_author_pca_features(paper_pca_features, index_arrays): return data_utils.paper_features_to_author_features( index_arrays['author_paper_index'], paper_pca_features) def _compute_institution_pca_features(author_pca_features, index_arrays): return data_utils.author_features_to_institution_features( index_arrays['institution_author_index'], author_pca_features) def _write_array(path, array): path.parent.mkdir(parents=True, exist_ok=True) with open(path, 'wb') as fid: np.save(fid, array) def main(unused_argv): data_root = Path(FLAGS.data_root) raw_paper_features = _read_raw_paper_features() principal_components = _get_principal_components(raw_paper_features) paper_pca_features = _project_features_onto_principal_components( raw_paper_features, principal_components) del raw_paper_features del principal_components paper_pca_path = data_root / data_utils.PCA_PAPER_FEATURES_FILENAME author_pca_path = data_root / data_utils.PCA_AUTHOR_FEATURES_FILENAME institution_pca_path = ( data_root / data_utils.PCA_INSTITUTION_FEATURES_FILENAME) merged_pca_path = data_root / data_utils.PCA_MERGED_FEATURES_FILENAME _write_array(paper_pca_path, paper_pca_features) # Compute author and institution features from paper PCA features. index_arrays = _read_adjacency_indices() author_pca_features = _compute_author_pca_features(paper_pca_features, index_arrays) _write_array(author_pca_path, author_pca_features) institution_pca_features = _compute_institution_pca_features( author_pca_features, index_arrays) _write_array(institution_pca_path, institution_pca_features) merged_pca_features = np.concatenate( [paper_pca_features, author_pca_features, institution_pca_features], axis=0) del author_pca_features del institution_pca_features _write_array(merged_pca_path, merged_pca_features) if __name__ == '__main__': flags.mark_flag_as_required('data_root') app.run(main)
deepmind-research-master
ogb_lsc/mag/pca_builder.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Experiment config for MAG240M-LSC entry.""" from jaxline import base_config from ml_collections import config_dict def get_config(debug: bool = False) -> config_dict.ConfigDict: """Get Jaxline experiment config.""" config = base_config.get_base_config() config.random_seed = 42 # E.g. '/data/pretrained_models/k0_seed100' (and set k_fold_split_id=0, below) config.restore_path = config_dict.placeholder(str) config.experiment_kwargs = config_dict.ConfigDict( dict( config=dict( debug=debug, predictions_dir=config_dict.placeholder(str), # 5 for model selection and early stopping, 50 for final eval. num_eval_iterations_to_ensemble=5, dataset_kwargs=dict( data_root='/data/', online_subsampling_kwargs=dict( max_nb_neighbours_per_type=[ [[40, 20, 0, 40], [0, 0, 0, 0], [0, 0, 0, 0]], [[40, 20, 0, 40], [40, 0, 10, 0], [0, 0, 0, 0]], ], remove_future_nodes=True, deduplicate_nodes=True, ), ratio_unlabeled_data_to_labeled_data=10.0, k_fold_split_id=config_dict.placeholder(int), use_all_labels_when_not_training=False, use_dummy_adjacencies=debug, ), optimizer=dict( name='adamw', kwargs=dict(weight_decay=1e-5, b1=0.9, b2=0.999), learning_rate_schedule=dict( use_schedule=True, base_learning_rate=1e-2, warmup_steps=50000, total_steps=config.get_ref('training_steps'), ), ), model_config=dict( mlp_hidden_sizes=[32] if debug else [512], latent_size=32 if debug else 256, num_message_passing_steps=2 if debug else 4, activation='relu', dropout_rate=0.3, dropedge_rate=0.25, disable_edge_updates=True, use_sent_edges=True, normalization_type='layer_norm', aggregation_function='sum', ), training=dict( loss_config=dict( bgrl_loss_config=dict( stop_gradient_for_supervised_loss=False, bgrl_loss_scale=1.0, symmetrize=True, first_graph_corruption_config=dict( feature_drop_prob=0.4, edge_drop_prob=0.2, ), second_graph_corruption_config=dict( feature_drop_prob=0.4, edge_drop_prob=0.2, ), ), ), # GPU memory may require reducing the `256`s below to `48`. dynamic_batch_size_config=dict( n_node=256 if debug else 340 * 256, n_edge=512 if debug else 720 * 256, n_graph=4 if debug else 256, ), ), eval=dict( split='valid', ema_annealing_schedule=dict( use_schedule=True, base_rate=0.999, total_steps=config.get_ref('training_steps')), dynamic_batch_size_config=dict( n_node=256 if debug else 340 * 128, n_edge=512 if debug else 720 * 128, n_graph=4 if debug else 128, ), )))) ## Training loop config. config.training_steps = 500000 config.checkpoint_dir = '/tmp/checkpoint/mag/' config.train_checkpoint_all_hosts = False config.log_train_data_interval = 10 config.log_tensors_interval = 10 config.save_checkpoint_interval = 30 config.best_model_eval_metric = 'accuracy' config.best_model_eval_metric_higher_is_better = True return config
deepmind-research-master
ogb_lsc/mag/config.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Builds CSR matrices which store the MAG graphs.""" import pathlib from absl import app from absl import flags from absl import logging import numpy as np import scipy.sparse # pylint: disable=g-bad-import-order import data_utils Path = pathlib.Path FLAGS = flags.FLAGS _DATA_FILES_AND_PARAMETERS = { 'author_affiliated_with_institution_edges.npy': { 'content_names': ('author', 'institution'), 'use_boolean': False }, 'author_writes_paper_edges.npy': { 'content_names': ('author', 'paper'), 'use_boolean': False }, 'paper_cites_paper_edges.npy': { 'content_names': ('paper', 'paper'), 'use_boolean': True }, } flags.DEFINE_string('data_root', None, 'Data root directory') flags.DEFINE_boolean('skip_existing', True, 'Skips existing CSR files') flags.mark_flags_as_required(['data_root']) def _read_edge_data(path): try: return np.load(path, mmap_mode='r') except FileNotFoundError: # If the file path can't be found by np.load, use the file handle w/o mmap. with path.open('rb') as fid: return np.load(fid) def _build_coo(edges_data, use_boolean=False): if use_boolean: mat_coo = scipy.sparse.coo_matrix( (np.ones_like(edges_data[1, :], dtype=bool), (edges_data[0, :], edges_data[1, :]))) else: mat_coo = scipy.sparse.coo_matrix( (edges_data[1, :], (edges_data[0, :], edges_data[1, :]))) return mat_coo def _get_output_paths(directory, content_names, use_boolean): boolean_str = '_b' if use_boolean else '' transpose_str = '_t' if len(set(content_names)) == 1 else '' output_prefix = '_'.join(content_names) output_prefix_t = '_'.join(content_names[::-1]) output_filename = f'{output_prefix}{boolean_str}.npz' output_filename_t = f'{output_prefix_t}{boolean_str}{transpose_str}.npz' output_path = directory / output_filename output_path_t = directory / output_filename_t return output_path, output_path_t def _write_csr(path, csr): path.parent.mkdir(parents=True, exist_ok=True) with path.open('wb') as fid: scipy.sparse.save_npz(fid, csr) def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') raw_data_dir = Path(FLAGS.data_root) / data_utils.RAW_DIR preprocessed_dir = Path(FLAGS.data_root) / data_utils.PREPROCESSED_DIR for input_filename, parameters in _DATA_FILES_AND_PARAMETERS.items(): input_path = raw_data_dir / input_filename output_path, output_path_t = _get_output_paths(preprocessed_dir, **parameters) if FLAGS.skip_existing and output_path.exists() and output_path_t.exists(): # If both files exist, skip. When only one exists, that's handled below. logging.info( '%s and %s exist: skipping. Use flag `--skip_existing=False`' 'to force overwrite existing.', output_path, output_path_t) continue logging.info('Reading edge data from: %s', input_path) edge_data = _read_edge_data(input_path) logging.info('Building CSR matrices') mat_coo = _build_coo(edge_data, use_boolean=parameters['use_boolean']) # Convert matrices to CSR and write to disk. if not FLAGS.skip_existing or not output_path.exists(): logging.info('Writing CSR matrix to: %s', output_path) mat_csr = mat_coo.tocsr() _write_csr(output_path, mat_csr) del mat_csr # Free up memory asap. else: logging.info( '%s exists: skipping. Use flag `--skip_existing=False`' 'to force overwrite existing.', output_path) if not FLAGS.skip_existing or not output_path_t.exists(): logging.info('Writing (transposed) CSR matrix to: %s', output_path_t) mat_csr_t = mat_coo.transpose().tocsr() _write_csr(output_path_t, mat_csr_t) del mat_csr_t # Free up memory asap. else: logging.info( '%s exists: skipping. Use flag `--skip_existing=False`' 'to force overwrite existing.', output_path_t) del mat_coo # Free up memory asap. if __name__ == '__main__': app.run(main)
deepmind-research-master
ogb_lsc/mag/csr_builder.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """MAG240M-LSC models.""" from typing import Callable, NamedTuple, Sequence import haiku as hk import jax import jax.numpy as jnp import jraph _REDUCER_NAMES = { 'sum': jax.ops.segment_sum, 'mean': jraph.segment_mean, 'softmax': jraph.segment_softmax, } class ModelOutput(NamedTuple): node_embeddings: jnp.ndarray node_embedding_projections: jnp.ndarray node_projection_predictions: jnp.ndarray node_logits: jnp.ndarray def build_update_fn( name: str, output_sizes: Sequence[int], activation: Callable[[jnp.ndarray], jnp.ndarray], normalization_type: str, is_training: bool, ): """Builds update function.""" def single_mlp(inner_name: str): """Creates a single MLP performing the update.""" mlp = hk.nets.MLP( output_sizes=output_sizes, name=inner_name, activation=activation) mlp = jraph.concatenated_args(mlp) if normalization_type == 'layer_norm': norm = hk.LayerNorm( axis=-1, create_scale=True, create_offset=True, name=name + '_layer_norm') elif normalization_type == 'batch_norm': batch_norm = hk.BatchNorm( create_scale=True, create_offset=True, decay_rate=0.9, name=f'{inner_name}_batch_norm', cross_replica_axis=None if hk.running_init() else 'i', ) norm = lambda x: batch_norm(x, is_training) elif normalization_type == 'none': return mlp else: raise ValueError(f'Unknown normalization type {normalization_type}') return jraph.concatenated_args(hk.Sequential([mlp, norm])) return single_mlp(f'{name}_homogeneous') def build_gn( output_sizes: Sequence[int], activation: Callable[[jnp.ndarray], jnp.ndarray], suffix: str, use_sent_edges: bool, is_training: bool, dropedge_rate: float, normalization_type: str, aggregation_function: str, ): """Builds an InteractionNetwork with MLP update functions.""" node_update_fn = build_update_fn( f'node_processor_{suffix}', output_sizes, activation=activation, normalization_type=normalization_type, is_training=is_training, ) edge_update_fn = build_update_fn( f'edge_processor_{suffix}', output_sizes, activation=activation, normalization_type=normalization_type, is_training=is_training, ) def maybe_dropedge(x): """Dropout on edge messages.""" if not is_training: return x return x * hk.dropout( hk.next_rng_key(), dropedge_rate, jnp.ones([x.shape[0], 1]), ) dropped_edge_update_fn = lambda *args: maybe_dropedge(edge_update_fn(*args)) return jraph.InteractionNetwork( update_edge_fn=dropped_edge_update_fn, update_node_fn=node_update_fn, aggregate_edges_for_nodes_fn=_REDUCER_NAMES[aggregation_function], include_sent_messages_in_node_update=use_sent_edges, ) def _get_activation_fn(name: str) -> Callable[[jnp.ndarray], jnp.ndarray]: if name == 'identity': return lambda x: x if hasattr(jax.nn, name): return getattr(jax.nn, name) raise ValueError('Unknown activation function %s specified. ' 'See https://jax.readthedocs.io/en/latest/jax.nn.html' 'for the list of supported function names.') class NodePropertyEncodeProcessDecode(hk.Module): """Node Property Prediction Encode Process Decode Model.""" def __init__( self, mlp_hidden_sizes: Sequence[int], latent_size: int, num_classes: int, num_message_passing_steps: int = 2, activation: str = 'relu', dropout_rate: float = 0.0, dropedge_rate: float = 0.0, use_sent_edges: bool = False, disable_edge_updates: bool = False, normalization_type: str = 'layer_norm', aggregation_function: str = 'sum', name='NodePropertyEncodeProcessDecode', ): super().__init__(name=name) self._num_classes = num_classes self._latent_size = latent_size self._output_sizes = list(mlp_hidden_sizes) + [latent_size] self._num_message_passing_steps = num_message_passing_steps self._activation = _get_activation_fn(activation) self._dropout_rate = dropout_rate self._dropedge_rate = dropedge_rate self._use_sent_edges = use_sent_edges self._disable_edge_updates = disable_edge_updates self._normalization_type = normalization_type self._aggregation_function = aggregation_function def _dropout_graph(self, graph: jraph.GraphsTuple) -> jraph.GraphsTuple: node_key, edge_key = hk.next_rng_keys(2) nodes = hk.dropout(node_key, self._dropout_rate, graph.nodes) edges = graph.edges if not self._disable_edge_updates: edges = hk.dropout(edge_key, self._dropout_rate, edges) return graph._replace(nodes=nodes, edges=edges) def _encode( self, graph: jraph.GraphsTuple, is_training: bool, ) -> jraph.GraphsTuple: node_embed_fn = build_update_fn( 'node_encoder', self._output_sizes, activation=self._activation, normalization_type=self._normalization_type, is_training=is_training, ) edge_embed_fn = build_update_fn( 'edge_encoder', self._output_sizes, activation=self._activation, normalization_type=self._normalization_type, is_training=is_training, ) gn = jraph.GraphMapFeatures(edge_embed_fn, node_embed_fn) graph = gn(graph) if is_training: graph = self._dropout_graph(graph) return graph def _process( self, graph: jraph.GraphsTuple, is_training: bool, ) -> jraph.GraphsTuple: for idx in range(self._num_message_passing_steps): net = build_gn( output_sizes=self._output_sizes, activation=self._activation, suffix=str(idx), use_sent_edges=self._use_sent_edges, is_training=is_training, dropedge_rate=self._dropedge_rate, normalization_type=self._normalization_type, aggregation_function=self._aggregation_function) residual_graph = net(graph) graph = graph._replace(nodes=graph.nodes + residual_graph.nodes) if not self._disable_edge_updates: graph = graph._replace(edges=graph.edges + residual_graph.edges) if is_training: graph = self._dropout_graph(graph) return graph def _node_mlp( self, graph: jraph.GraphsTuple, is_training: bool, output_size: int, name: str, ) -> jnp.ndarray: decoder_sizes = list(self._output_sizes[:-1]) + [output_size] net = build_update_fn( name, decoder_sizes, self._activation, normalization_type=self._normalization_type, is_training=is_training, ) return net(graph.nodes) def __call__( self, graph: jraph.GraphsTuple, is_training: bool, stop_gradient_embedding_to_logits: bool = False, ) -> ModelOutput: # Note that these update configs may need to change if # we switch back to GraphNetwork rather than InteractionNetwork. graph = self._encode(graph, is_training) graph = self._process(graph, is_training) node_embeddings = graph.nodes node_projections = self._node_mlp(graph, is_training, self._latent_size, 'projector') node_predictions = self._node_mlp( graph._replace(nodes=node_projections), is_training, self._latent_size, 'predictor', ) if stop_gradient_embedding_to_logits: graph = jax.tree_map(jax.lax.stop_gradient, graph) node_logits = self._node_mlp(graph, is_training, self._num_classes, 'logits_decoder') return ModelOutput( node_embeddings=node_embeddings, node_logits=node_logits, node_embedding_projections=node_projections, node_projection_predictions=node_predictions, )
deepmind-research-master
ogb_lsc/mag/models.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for subsampling the MAG dataset.""" import collections import jraph import numpy as np def get_or_sample_row(node_id: int, nb_neighbours: int, csr_matrix, remove_duplicates: bool): """Either obtain entire row or a subsampled set of neighbours.""" if node_id + 1 >= csr_matrix.indptr.shape[0]: lo = 0 hi = 0 else: lo = csr_matrix.indptr[node_id] hi = csr_matrix.indptr[node_id + 1] if lo == hi: # Skip empty neighbourhoods neighbours = None elif hi - lo <= nb_neighbours: neighbours = csr_matrix.indices[lo:hi] elif hi - lo < 5 * nb_neighbours: # For small surroundings, sample directly nb_neighbours = min(nb_neighbours, hi - lo) inds = lo + np.random.choice(hi - lo, size=(nb_neighbours,), replace=False) neighbours = csr_matrix.indices[inds] else: # Otherwise, do not slice -- sample indices instead # To extend GraphSAGE ("uniform w/ replacement"), modify this call inds = np.random.randint(lo, hi, size=(nb_neighbours,)) if remove_duplicates: inds = np.unique(inds) neighbours = csr_matrix.indices[inds] return neighbours def get_neighbours(node_id: int, node_type: int, neighbour_type: int, nb_neighbours: int, remove_duplicates: bool, author_institution_csr, institution_author_csr, author_paper_csr, paper_author_csr, paper_paper_csr, paper_paper_transpose_csr): """Fetch the edge indices from one node to corresponding neighbour type.""" if node_type == 0 and neighbour_type == 0: csr = paper_paper_transpose_csr # Citing elif node_type == 0 and neighbour_type == 1: csr = paper_author_csr elif node_type == 0 and neighbour_type == 3: csr = paper_paper_csr # Cited elif node_type == 1 and neighbour_type == 0: csr = author_paper_csr elif node_type == 1 and neighbour_type == 2: csr = author_institution_csr elif node_type == 2 and neighbour_type == 1: csr = institution_author_csr else: raise ValueError('Non-existent edge type requested') return get_or_sample_row(node_id, nb_neighbours, csr, remove_duplicates) def get_senders(neighbour_type: int, sender_index, paper_features): """Get the sender features from given neighbours.""" if neighbour_type == 0 or neighbour_type == 3: sender_features = paper_features[sender_index] elif neighbour_type == 1 or neighbour_type == 2: sender_features = np.zeros((sender_index.shape[0], paper_features.shape[1])) # Consider averages else: raise ValueError('Non-existent node type requested') return sender_features def make_edge_type_feature(node_type: int, neighbour_type: int): edge_feats = np.zeros(7) edge_feats[node_type] = 1.0 edge_feats[neighbour_type + 3] = 1.0 return edge_feats def subsample_graph(paper_id: int, author_institution_csr, institution_author_csr, author_paper_csr, paper_author_csr, paper_paper_csr, paper_paper_transpose_csr, max_nb_neighbours_per_type, max_nodes=None, max_edges=None, paper_years=None, remove_future_nodes=False, deduplicate_nodes=False) -> jraph.GraphsTuple: """Subsample a graph around given paper ID.""" if paper_years is not None: root_paper_year = paper_years[paper_id] else: root_paper_year = None # Add the center node as "node-zero" sub_nodes = [paper_id] num_nodes_in_subgraph = 1 num_edges_in_subgraph = 0 reached_node_budget = False reached_edge_budget = False node_and_type_to_index_in_subgraph = dict() node_and_type_to_index_in_subgraph[(paper_id, 0)] = 0 # Store all (integer) depths as an additional feature depths = [0] types = [0] sub_edges = [] sub_senders = [] sub_receivers = [] # Store all unprocessed neighbours # Each neighbour is stored as a 4-tuple (node_index in original graph, # node_index in subsampled graph, type, number of hops away from source). # TYPES: 0: paper, 1: author, 2: institution, 3: paper (for bidirectional) neighbour_deque = collections.deque([(paper_id, 0, 0, 0)]) max_depth = len(max_nb_neighbours_per_type) while neighbour_deque and not reached_edge_budget: left_entry = neighbour_deque.popleft() node_index, node_index_in_sampled_graph, node_type, node_depth = left_entry # Expand from this node, to a node of related type for neighbour_type in range(4): if reached_edge_budget: break # Budget may have been reached in previous type; break here. nb_neighbours = max_nb_neighbours_per_type[node_depth][node_type][neighbour_type] # pylint:disable=line-too-long # Only extend if we want to sample further in this edge type if nb_neighbours > 0: sampled_neighbors = get_neighbours( node_index, node_type, neighbour_type, nb_neighbours, deduplicate_nodes, author_institution_csr, institution_author_csr, author_paper_csr, paper_author_csr, paper_paper_csr, paper_paper_transpose_csr, ) if sampled_neighbors is not None: if remove_future_nodes and root_paper_year is not None: if neighbour_type in [0, 3]: sampled_neighbors = [ x for x in sampled_neighbors if paper_years[x] <= root_paper_year ] if not sampled_neighbors: continue nb_neighbours = len(sampled_neighbors) edge_feature = make_edge_type_feature(node_type, neighbour_type) for neighbor_original_idx in sampled_neighbors: # Key into dict of existing nodes using both node id and type. neighbor_key = (neighbor_original_idx, neighbour_type % 3) # Get existing idx in subgraph if it exists. neighbor_subgraph_idx = node_and_type_to_index_in_subgraph.get( neighbor_key, None) if (not reached_node_budget and (not deduplicate_nodes or neighbor_subgraph_idx is None)): # If it does not exist already, or we are not deduplicating, # just create a new node and update the dict. neighbor_subgraph_idx = num_nodes_in_subgraph node_and_type_to_index_in_subgraph[neighbor_key] = ( neighbor_subgraph_idx) num_nodes_in_subgraph += 1 sub_nodes.append(neighbor_original_idx) types.append(neighbour_type % 3) depths.append(node_depth + 1) if max_nodes is not None and num_nodes_in_subgraph >= max_nodes: reached_node_budget = True continue # Move to next neighbor which might already exist. if node_depth < max_depth - 1: # If the neighbours are to be further expanded, enqueue them. # Expand only if the nodes did not already exist. neighbour_deque.append( (neighbor_original_idx, neighbor_subgraph_idx, neighbour_type % 3, node_depth + 1)) # The neighbor id within graph is now fixed; just add edges. if neighbor_subgraph_idx is not None: # Either node existed before or was successfully added. sub_senders.append(neighbor_subgraph_idx) sub_receivers.append(node_index_in_sampled_graph) sub_edges.append(edge_feature) num_edges_in_subgraph += 1 if max_edges is not None and num_edges_in_subgraph >= max_edges: reached_edge_budget = True break # Break out of adding edges for this neighbor type # Stitch the graph together sub_nodes = np.array(sub_nodes, dtype=np.int32) if sub_senders: sub_senders = np.array(sub_senders, dtype=np.int32) sub_receivers = np.array(sub_receivers, dtype=np.int32) sub_edges = np.stack(sub_edges, axis=0) else: # Use empty arrays. sub_senders = np.zeros([0], dtype=np.int32) sub_receivers = np.zeros([0], dtype=np.int32) sub_edges = np.zeros([0, 7]) # Finally, derive the sizes sub_n_node = np.array([sub_nodes.shape[0]]) sub_n_edge = np.array([sub_senders.shape[0]]) assert sub_nodes.shape[0] == num_nodes_in_subgraph assert sub_edges.shape[0] == num_edges_in_subgraph if max_nodes is not None: assert num_nodes_in_subgraph <= max_nodes if max_edges is not None: assert num_edges_in_subgraph <= max_edges types = np.array(types) depths = np.array(depths) sub_nodes = { 'index': sub_nodes.astype(np.int32), 'type': types.astype(np.int16), 'depth': depths.astype(np.int16), } return jraph.GraphsTuple(nodes=sub_nodes, edges=sub_edges.astype(np.float16), senders=sub_senders.astype(np.int32), receivers=sub_receivers.astype(np.int32), globals=np.array([0], dtype=np.int16), n_node=sub_n_node.astype(dtype=np.int32), n_edge=sub_n_edge.astype(dtype=np.int32))
deepmind-research-master
ogb_lsc/mag/sub_sampler.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """MAG240M-LSC datasets.""" import threading from typing import NamedTuple, Optional import jax import jraph from ml_collections import config_dict import numpy as np import tensorflow.compat.v2 as tf import tensorflow_datasets as tfds # pylint: disable=g-bad-import-order # pytype: disable=import-error import batching_utils import data_utils # We only want to load these arrays once for all threads. # `get_arrays` uses an LRU cache which is not thread safe. LOADING_RAW_ARRAYS_LOCK = threading.Lock() NUM_CLASSES = data_utils.NUM_CLASSES _MAX_DEPTH_IN_SUBGRAPH = 3 class Batch(NamedTuple): """NamedTuple to represent batches of data.""" graph: jraph.GraphsTuple node_labels: np.ndarray label_mask: np.ndarray central_node_mask: np.ndarray node_indices: np.ndarray absolute_node_indices: np.ndarray def build_dataset_iterator( data_root: str, split: str, dynamic_batch_size_config: config_dict.ConfigDict, online_subsampling_kwargs: dict, # pylint: disable=g-bare-generic debug: bool = False, is_training: bool = True, k_fold_split_id: Optional[int] = None, ratio_unlabeled_data_to_labeled_data: float = 0.0, use_all_labels_when_not_training: bool = False, use_dummy_adjacencies: bool = False, ): """Returns an iterator over Batches from the dataset.""" if split == 'test': use_all_labels_when_not_training = True if not is_training: ratio_unlabeled_data_to_labeled_data = 0.0 # Load the master data arrays. with LOADING_RAW_ARRAYS_LOCK: array_dict = data_utils.get_arrays( data_root, k_fold_split_id=k_fold_split_id, use_dummy_adjacencies=use_dummy_adjacencies) node_labels = array_dict['paper_label'].reshape(-1) train_indices = array_dict['train_indices'].astype(np.int32) is_train_index = np.zeros(node_labels.shape[0], dtype=np.int32) is_train_index[train_indices] = 1 valid_indices = array_dict['valid_indices'].astype(np.int32) is_valid_index = np.zeros(node_labels.shape[0], dtype=np.int32) is_valid_index[valid_indices] = 1 is_train_or_valid_index = is_train_index + is_valid_index def sstable_to_intermediate_graph(graph): indices = tf.cast(graph.nodes['index'], tf.int32) first_index = indices[..., 0] # Add an additional absolute index, but adding offsets to authors, and # institution indices. absolute_index = graph.nodes['index'] is_author = graph.nodes['type'] == 1 absolute_index = tf.where( is_author, absolute_index + data_utils.NUM_PAPERS, absolute_index) is_institution = graph.nodes['type'] == 2 absolute_index = tf.where( is_institution, absolute_index + data_utils.NUM_PAPERS + data_utils.NUM_AUTHORS, absolute_index) is_same_as_central_node = tf.math.equal(indices, first_index) input_nodes = graph.nodes graph = graph._replace( nodes={ 'one_hot_type': tf.one_hot(tf.cast(input_nodes['type'], tf.int32), 3), 'one_hot_depth': tf.one_hot( tf.cast(input_nodes['depth'], tf.int32), _MAX_DEPTH_IN_SUBGRAPH), 'year': tf.expand_dims(input_nodes['year'], axis=-1), 'label': tf.one_hot( tf.cast(input_nodes['label'], tf.int32), NUM_CLASSES), 'is_same_as_central_node': is_same_as_central_node, # Only first node in graph has a valid label. 'is_central_node': tf.one_hot(0, tf.shape(input_nodes['label'])[0]), 'index': input_nodes['index'], 'absolute_index': absolute_index, }, globals=tf.expand_dims(graph.globals, axis=-1), ) return graph ds = data_utils.get_graph_subsampling_dataset( split, array_dict, shuffle_indices=is_training, ratio_unlabeled_data_to_labeled_data=ratio_unlabeled_data_to_labeled_data, max_nodes=dynamic_batch_size_config.n_node - 1, # Keep space for pads. max_edges=dynamic_batch_size_config.n_edge, **online_subsampling_kwargs) if debug: ds = ds.take(50) ds = ds.map( sstable_to_intermediate_graph, num_parallel_calls=tf.data.experimental.AUTOTUNE) if is_training: ds = ds.shard(jax.process_count(), jax.process_index()) ds = ds.shuffle(buffer_size=1 if debug else 128) ds = ds.repeat() ds = ds.prefetch(1 if debug else tf.data.experimental.AUTOTUNE) np_ds = iter(tfds.as_numpy(ds)) batched_np_ds = batching_utils.dynamically_batch( np_ds, **dynamic_batch_size_config, ) def intermediate_graph_to_batch(graph): central_node_mask = graph.nodes['is_central_node'] label = graph.nodes['label'] node_indices = graph.nodes['index'] absolute_indices = graph.nodes['absolute_index'] ### Construct label as a feature for non-central nodes. # First do a lookup with node indices, with a np.minimum to ensure we do not # index out of bounds due to num_authors being larger than num_papers. is_same_as_central_node = graph.nodes['is_same_as_central_node'] capped_indices = np.minimum(node_indices, node_labels.shape[0] - 1) label_as_feature = node_labels[capped_indices] # Nodes which are not in train set should get `num_classes` label. # Nodes in test set or non-arXiv nodes have -1 or nan labels. # Mask out invalid labels and non-papers. use_label_as_feature = np.logical_and(label_as_feature >= 0, graph.nodes['one_hot_type'][..., 0]) if split == 'train' or not use_all_labels_when_not_training: # Mask out validation papers and non-arxiv papers who # got labels from fusing with arxiv papers. use_label_as_feature = np.logical_and(is_train_index[capped_indices], use_label_as_feature) label_as_feature = np.where(use_label_as_feature, label_as_feature, NUM_CLASSES) # Mask out central node label in case it appears again. label_as_feature = np.where(is_same_as_central_node, NUM_CLASSES, label_as_feature) # Nodes which are not papers get `NUM_CLASSES+1` label. label_as_feature = np.where(graph.nodes['one_hot_type'][..., 0], label_as_feature, NUM_CLASSES+1) nodes = { 'label_as_feature': label_as_feature, 'year': graph.nodes['year'], 'bitstring_year': _get_bitstring_year_representation( graph.nodes['year']), 'one_hot_type': graph.nodes['one_hot_type'], 'one_hot_depth': graph.nodes['one_hot_depth'], } graph = graph._replace( nodes=nodes, globals={}, ) is_train_or_valid_node = np.logical_and( is_train_or_valid_index[capped_indices], graph.nodes['one_hot_type'][..., 0]) if is_training: label_mask = np.logical_and(central_node_mask, is_train_or_valid_node) else: # `label_mask` is used to index into valid central nodes by prediction # calculator. Since that computation is only done when not training, and # at that time we are guaranteed all central nodes have valid labels, # we just set label_mask = central_node_mask when not training. label_mask = central_node_mask batch = Batch( graph=graph, node_labels=label, central_node_mask=central_node_mask, label_mask=label_mask, node_indices=node_indices, absolute_node_indices=absolute_indices) # Transform integers into one-hots. batch = _add_one_hot_features_to_batch(batch) # Gather PCA features. return _add_embeddings_to_batch(batch, array_dict['bert_pca_129']) batch_list = [] for batch in batched_np_ds: with jax.profiler.StepTraceAnnotation('batch_postprocessing'): batch = intermediate_graph_to_batch(batch) if is_training: batch_list.append(batch) if len(batch_list) == jax.local_device_count(): yield jax.device_put_sharded(batch_list, jax.local_devices()) batch_list = [] else: yield batch def _get_bitstring_year_representation(year: np.ndarray): """Return year as bitstring.""" min_year = 1900 max_training_year = 2018 offseted_year = np.minimum(year, max_training_year) - min_year return np.unpackbits(offseted_year.astype(np.uint8), axis=-1) def _np_one_hot(targets: np.ndarray, nb_classes: int): res = np.zeros(targets.shape + (nb_classes,), dtype=np.float16) np.put_along_axis(res, targets.astype(np.int32)[..., None], 1.0, axis=-1) return res def _get_one_hot_year_representation( year: np.ndarray, one_hot_type: np.ndarray, ): """Returns good representation for year.""" # Bucket edges found based on quantiles to bucket into 20 equal sized buckets. bucket_edges = np.array([ 1964, 1975, 1983, 1989, 1994, 1998, 2001, 2004, 2006, 2008, 2009, 2011, 2012, 2013, 2014, 2016, 2017, # 2018, 2019, 2020 contain last-year-of-train, eval, test nodes ]) year = np.squeeze(year, axis=-1) year_id = np.searchsorted(bucket_edges, year) is_paper = one_hot_type[..., 0] bucket_id_for_non_paper = len(bucket_edges) + 1 bucket_id = np.where(is_paper, year_id, bucket_id_for_non_paper) one_hot_year = _np_one_hot(bucket_id, len(bucket_edges) + 2) return one_hot_year def _add_one_hot_features_to_batch(batch: Batch) -> Batch: """Transforms integer features into one-hot features.""" nodes = batch.graph.nodes.copy() nodes['one_hot_year'] = _get_one_hot_year_representation( nodes['year'], nodes['one_hot_type']) del nodes['year'] # NUM_CLASSES plus one category for papers for which a class is not provided # and another for nodes that are not papers. nodes['one_hot_label_as_feature'] = _np_one_hot( nodes['label_as_feature'], NUM_CLASSES + 2) del nodes['label_as_feature'] return batch._replace(graph=batch.graph._replace(nodes=nodes)) def _add_embeddings_to_batch(batch: Batch, embeddings: np.ndarray) -> Batch: nodes = batch.graph.nodes.copy() nodes['features'] = embeddings[batch.absolute_node_indices] graph = batch.graph._replace(nodes=nodes) return batch._replace(graph=graph)
deepmind-research-master
ogb_lsc/mag/datasets.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=line-too-long r"""MAG240M-LSC Jaxline experiment. Usage: ``` # A path pointing to the data root. DATA_ROOT=/tmp/mag/data # A path for checkpoints. CHECKPOINT_DIR=/tmp/checkpoint/ # A path for output predictions. OUTPUT_DIR=/tmp/predictions/ # Whether we are training a model of a k_fold of models (None for no k-fold) K_FOLD_INDEX=0 ``` Some reusable arguments: ``` SHARED_ARGUMENTS="--config=ogb_lsc/mag/config.py \ --config.experiment_kwargs.config.dataset_kwargs.data_root=${DATA_ROOT} \ --config.experiment_kwargs.config.dataset_kwargs.k_fold_split_id=${K_FOLD_INDEX} \ --config.checkpoint_dir=${CHECKPOINT_DIR}" ``` Train only: ``` python -m ogb_lsc.mag.experiment \ ${SHARED_ARGUMENTS} --jaxline_mode="train" RESTORE_PATH=${CHECKPOINT_DIR}/models/latest/step_${STEP}_${TIMESTAMP} ``` Train with early stopping on a separate eval thread: ``` python -m ogb_lsc.mag.experiment \ ${SHARED_ARGUMENTS} --jaxline_mode="train_eval_multithreaded" RESTORE_PATH=${CHECKPOINT_DIR}/models/best/step_${STEP}_${TIMESTAMP} ``` Produce predictions with a pretrained model: ``` SPLIT="valid" # Or "test" EPOCHS_TO_ENSEMBLE=50 # We used this in the submission. python -m ogb_lsc.mag.experiment \ ${SHARED_ARGUMENTS} --jaxline_mode="eval" \ --config.one_off_evaluate=True \ --config.experiment_kwargs.config.num_eval_iterations_to_ensemble=${EPOCHS_TO_ENSEMBLE} \ --config.restore_path=${RESTORE_PATH} \ --config.experiment_kwargs.config.predictions_dir=${OUTPUT_DIR} \ --config.experiment_kwargs.config.eval.split=${SPLIT} ``` Note it is also possible to pass a `restore_path` with `--jaxline_mode="train"` and training will continue where it left off. In the case of `--jaxline_mode="train_eval_multithreaded"` this will also work, but early stopping will not take into account any past best performance up to that restored model. Other useful options: To reduce the training batch size in case of OOM, for example for a batch size of approximately 48 on average. ``` SHARED_ARGUMENTS="${SHARED_ARGUMENTS} \ --config.experiment_kwargs.config.training.dynamic_batch_size_config.n_node=16320 \ --config.experiment_kwargs.config.training.dynamic_batch_size_config.n_edge=34560 \ --config.experiment_kwargs.config.training.dynamic_batch_size_config.n_graph=48" ``` To reduce lead time by using dummy adjacency matrices, instead of loading the the full ones into memory. ``` SHARED_ARGUMENTS="${SHARED_ARGUMENTS} \ --config.experiment_kwargs.config.dataset_kwargs.use_dummy_adjacencies=True" ``` """ # pylint: enable=line-too-long import datetime import functools import os import signal import threading from typing import Tuple from absl import app from absl import flags from absl import logging import chex import dill import haiku as hk import jax from jax.config import config as jax_config import jax.numpy as jnp from jaxline import experiment from jaxline import platform from jaxline import utils import jraph from ml_collections import config_dict import numpy as np import optax import tensorflow.compat.v2 as tf # pylint: disable=g-bad-import-order import datasets import losses import models import schedules FLAGS = flags.FLAGS class Experiment(experiment.AbstractExperiment): """MAG240M-LSC Jaxline experiment.""" CHECKPOINT_ATTRS = { '_params': 'params', '_opt_state': 'opt_state', '_network_state': 'network_state', '_ema_network_state': 'ema_network_state', '_ema_params': 'ema_params', } def __init__( self, mode: str, init_rng: jnp.ndarray, config: config_dict.ConfigDict, ): """Initializes experiment.""" super(Experiment, self).__init__(mode=mode, init_rng=init_rng) tf.config.experimental.set_visible_devices([], device_type='GPU') tf.config.experimental.set_visible_devices([], device_type='TPU') if mode not in ('train', 'eval', 'train_eval_multithreaded'): raise ValueError(f'Invalid mode {mode}.') self.mode = mode self.config = config self.init_rng = init_rng self.forward = hk.transform_with_state(self._forward_fn) self._predictions = None # Needed for checkpoint restore. self._params = None self._ema_params = None self._network_state = None self._ema_network_state = None self._opt_state = None # Track what has started. self._training = False self._evaluating = False def _train_init(self): iterator = self._build_numpy_dataset_iterator('train', is_training=True) self._train_input = utils.py_prefetch(lambda: iterator) dummy_batch = next(self._train_input) if self._params is None: self._initialize_experiment_state(self.init_rng, dummy_batch) self._update_func = jax.pmap( self._update_func, axis_name='i', donate_argnums=3, ) self._training = True def _eval_init(self): split = self.config.eval.split # Will build the iterator at each evaluation. self._make_eval_dataset_iterator = functools.partial( utils.py_prefetch, lambda: self._build_numpy_dataset_iterator(split, is_training=False)) self.eval_forward = jax.jit( functools.partial(self.forward.apply, is_training=False)) self._evaluating = True # _ _ # | |_ _ __ __ _(_)_ __ # | __| '__/ _` | | '_ \ # | |_| | | (_| | | | | | # \__|_| \__,_|_|_| |_| # def step( self, global_step: jnp.ndarray, rng: jnp.ndarray, **unused_args, ) -> losses.LogsDict: """See Jaxline base class.""" if not self._training: self._train_init() with jax.profiler.StepTraceAnnotation('next_train_input'): batch = next(self._train_input) with jax.profiler.StepTraceAnnotation('update_step'): (self._params, self._ema_params, self._network_state, self._ema_network_state, self._opt_state, stats) = self._update_func( self._params, self._ema_params, self._network_state, self._ema_network_state, self._opt_state, global_step, rng, batch, ) del batch # Buffers donated to _update_func. with jax.profiler.StepTraceAnnotation('get_stats'): stats = utils.get_first(stats) return stats def _build_numpy_dataset_iterator(self, split: str, is_training: bool): if is_training: dynamic_batch_size_config = self.config.training.dynamic_batch_size_config else: dynamic_batch_size_config = self.config.eval.dynamic_batch_size_config return datasets.build_dataset_iterator( split=split, dynamic_batch_size_config=dynamic_batch_size_config, debug=self.config.debug, is_training=is_training, **self.config.dataset_kwargs) def _initialize_experiment_state( self, init_rng: jnp.ndarray, dummy_batch: datasets.Batch, ): """Initialize parameters and opt state if not restoring from checkpoint.""" dummy_graph = dummy_batch.graph # Cast features to float32 so that parameters are as appropriate. dummy_graph = dummy_graph._replace( nodes=jax.tree_map(lambda x: x.astype(np.float32), dummy_graph.nodes), edges=jax.tree_map(lambda x: x.astype(np.float32), dummy_graph.edges), ) init_key = utils.bcast_local_devices(init_rng) p_init = jax.pmap(functools.partial(self.forward.init, is_training=True)) params, network_state = p_init(init_key, dummy_graph) opt_init, _ = self._optimizer( utils.bcast_local_devices(jnp.zeros([], jnp.int32))) opt_state = jax.pmap(opt_init)(params) # For EMA decay to work correctly, params/state must be floats. chex.assert_type(jax.tree_leaves(params), jnp.floating) chex.assert_type(jax.tree_leaves(network_state), jnp.floating) self._params = params self._ema_params = params self._network_state = network_state self._ema_network_state = network_state self._opt_state = opt_state def _get_learning_rate(self, global_step: jnp.ndarray) -> jnp.ndarray: return schedules.learning_schedule( global_step, **self.config.optimizer.learning_rate_schedule, ) def _optimizer( self, learning_rate: jnp.ndarray, ) -> optax.GradientTransformation: optimizer_fn = getattr(optax, self.config.optimizer.name) return optimizer_fn( learning_rate=learning_rate, **self.config.optimizer.kwargs, ) def _forward_fn( self, input_graph: jraph.GraphsTuple, is_training: bool, stop_gradient_embedding_to_logits: bool = False, ): model = models.NodePropertyEncodeProcessDecode( num_classes=datasets.NUM_CLASSES, **self.config.model_config, ) return model(input_graph, is_training, stop_gradient_embedding_to_logits) def _bgrl_loss( self, params: hk.Params, ema_params: hk.Params, network_state: hk.State, ema_network_state: hk.State, rng: jnp.ndarray, batch: datasets.Batch, ) -> Tuple[jnp.ndarray, Tuple[losses.LogsDict, hk.State]]: """Computes fully supervised loss.""" # First compute 2 graph corrupted views. first_corruption_key, second_corruption_key, rng = jax.random.split(rng, 3) (first_model_key, first_model_key_ema, second_model_key, second_model_key_ema, rng) = jax.random.split(rng, 5) first_corrupted_graph = losses.get_corrupted_view( batch.graph, rng_key=first_corruption_key, **self.config.training.loss_config.bgrl_loss_config.first_graph_corruption_config, # pylint:disable=line-too-long ) second_corrupted_graph = losses.get_corrupted_view( batch.graph, rng_key=second_corruption_key, **self.config.training.loss_config.bgrl_loss_config.second_graph_corruption_config, # pylint:disable=line-too-long ) # Then run the model on both. first_corrupted_output, _ = self.forward.apply( params, network_state, first_model_key, first_corrupted_graph, is_training=True, stop_gradient_embedding_to_logits=True, ) second_corrupted_output, _ = self.forward.apply( params, network_state, second_model_key, second_corrupted_graph, is_training=True, stop_gradient_embedding_to_logits=True, ) first_corrupted_output_ema, _ = self.forward.apply( ema_params, ema_network_state, first_model_key_ema, first_corrupted_graph, is_training=True, stop_gradient_embedding_to_logits=True, ) second_corrupted_output_ema, _ = self.forward.apply( ema_params, ema_network_state, second_model_key_ema, second_corrupted_graph, is_training=True, stop_gradient_embedding_to_logits=True, ) # These also contain projections for non-central nodes; remove them. num_nodes_per_graph = batch.graph.n_node node_central_indices = jnp.concatenate( [jnp.array([0]), jnp.cumsum(num_nodes_per_graph[:-1])]) bgrl_loss, bgrl_stats = losses.bgrl_loss( first_online_predictions=first_corrupted_output .node_projection_predictions[node_central_indices], second_target_projections=second_corrupted_output_ema .node_embedding_projections[node_central_indices], second_online_predictions=second_corrupted_output .node_projection_predictions[node_central_indices], first_target_projections=first_corrupted_output_ema .node_embedding_projections[node_central_indices], symmetrize=self.config.training.loss_config.bgrl_loss_config.symmetrize, valid_mask=batch.central_node_mask[node_central_indices], ) # Finally train decoder on original graph with optional stop gradient. stop_gradient = ( self.config.training.loss_config.bgrl_loss_config .stop_gradient_for_supervised_loss) model_output, new_network_state = self.forward.apply( params, network_state, rng, batch.graph, is_training=True, stop_gradient_embedding_to_logits=stop_gradient, ) supervised_loss, supervised_stats = losses.node_classification_loss( model_output.node_logits, batch, ) stats = dict(**supervised_stats, **bgrl_stats) total_loss = ( supervised_loss + self.config.training.loss_config.bgrl_loss_config.bgrl_loss_scale * bgrl_loss) return total_loss, (stats, new_network_state) def _loss( self, params: hk.Params, ema_params: hk.Params, network_state: hk.State, ema_network_state: hk.State, rng: jnp.ndarray, batch: datasets.Batch, ) -> Tuple[jnp.ndarray, Tuple[losses.LogsDict, hk.State]]: """Compute loss from params and batch.""" # Cast to float32 since some losses are unstable with float16. graph = batch.graph._replace( nodes=jax.tree_map(lambda x: x.astype(jnp.float32), batch.graph.nodes), edges=jax.tree_map(lambda x: x.astype(jnp.float32), batch.graph.edges), ) batch = batch._replace(graph=graph) return self._bgrl_loss(params, ema_params, network_state, ema_network_state, rng, batch) def _update_func( self, params: hk.Params, ema_params: hk.Params, network_state: hk.State, ema_network_state: hk.State, opt_state: optax.OptState, global_step: jnp.ndarray, rng: jnp.ndarray, batch: datasets.Batch, ) -> Tuple[hk.Params, hk.Params, hk.State, hk.State, optax.OptState, losses.LogsDict]: """Updates parameters.""" grad_fn = jax.value_and_grad(self._loss, has_aux=True) (_, (stats, new_network_state)), grads = grad_fn( params, ema_params, network_state, ema_network_state, rng, batch) learning_rate = self._get_learning_rate(global_step) _, opt_apply = self._optimizer(learning_rate) grad = jax.lax.pmean(grads, axis_name='i') updates, opt_state = opt_apply(grad, opt_state, params) params = optax.apply_updates(params, updates) # Stats and logging. param_norm = optax.global_norm(params) grad_norm = optax.global_norm(grad) ema_rate = schedules.ema_decay_schedule( step=global_step, **self.config.eval.ema_annealing_schedule) num_non_padded_nodes = ( batch.graph.n_node.sum() - jraph.get_number_of_padding_with_graphs_nodes(batch.graph)) num_non_padded_edges = ( batch.graph.n_edge.sum() - jraph.get_number_of_padding_with_graphs_edges(batch.graph)) num_non_padded_graphs = ( batch.graph.n_node.shape[0] - jraph.get_number_of_padding_with_graphs_graphs(batch.graph)) avg_num_nodes = num_non_padded_nodes / num_non_padded_graphs avg_num_edges = num_non_padded_edges / num_non_padded_graphs stats.update( dict( global_step=global_step, grad_norm=grad_norm, param_norm=param_norm, learning_rate=learning_rate, ema_rate=ema_rate, avg_num_nodes=avg_num_nodes, avg_num_edges=avg_num_edges, )) ema_fn = (lambda x, y: # pylint:disable=g-long-lambda schedules.apply_ema_decay(x, y, ema_rate)) ema_params = jax.tree_multimap(ema_fn, ema_params, params) ema_network_state = jax.tree_multimap( ema_fn, ema_network_state, network_state, ) return (params, ema_params, new_network_state, ema_network_state, opt_state, stats) # _ # _____ ____ _| | # / _ \ \ / / _` | | # | __/\ V / (_| | | # \___| \_/ \__,_|_| # def evaluate(self, global_step, rng, **unused_kwargs): """See base class.""" if not self._evaluating: self._eval_init() global_step = np.array(utils.get_first(global_step)) ema_params = utils.get_first(self._ema_params) ema_network_state = utils.get_first(self._ema_network_state) rng = utils.get_first(rng) # Evaluate using the ema params. results, predictions = self._evaluate_with_ensemble(ema_params, ema_network_state, rng) results['global_step'] = global_step # Store predictions if we got a path. self._maybe_save_predictions(predictions, global_step) return results def _evaluate_with_ensemble( self, params: hk.Params, state: hk.State, rng: jnp.ndarray, ): predictions_for_ensemble = [] num_iterations = self.config.num_eval_iterations_to_ensemble for iteration in range(num_iterations): results, predictions = self._evaluate_params(params, state, rng) self._log_results(f'Eval iteration {iteration}/{num_iterations}', results) predictions_for_ensemble.append(predictions) if len(predictions_for_ensemble) > 1: predictions = losses.ensemble_predictions_by_probability_average( predictions_for_ensemble) results = losses.get_accuracy_dict(predictions) self._log_results(f'Ensembled {num_iterations} iterations', results) return results, predictions def _maybe_save_predictions(self, predictions, global_step): if not self.config.predictions_dir: return split = self.config.eval.split output_dir = os.path.join( self.config.predictions_dir, _get_step_date_label(global_step)) os.makedirs(output_dir, exist_ok=True) output_path = os.path.join(output_dir, split + '.dill') with open(output_path, 'wb') as f: dill.dump(predictions, f) logging.info('Saved %s predictions at: %s', split, output_path) def _evaluate_params( self, params: hk.Params, state: hk.State, rng: jnp.ndarray, ): """Evaluate given set of parameters.""" num_valid = 0 predictions_list = [] labels_list = [] logits_list = [] indices_list = [] for i, batch in enumerate(self._make_eval_dataset_iterator()): model_output, _ = self.eval_forward( params, state, rng, batch.graph, ) (masked_indices, masked_predictions, masked_labels, masked_logits) = losses.get_predictions_labels_and_logits( model_output.node_logits, batch) predictions_list.append(masked_predictions) indices_list.append(masked_indices) labels_list.append(masked_labels) logits_list.append(masked_logits) num_valid += jnp.sum(batch.label_mask) if i % 10 == 0: logging.info('Generate predictons for %d batches so far', i + 1) predictions = losses.Predictions( np.concatenate(indices_list, axis=0), np.concatenate(labels_list, axis=0), np.concatenate(predictions_list, axis=0), np.concatenate(logits_list, axis=0)) if self.config.eval.split == 'test': results = dict(num_valid=num_valid, accuracy=np.nan) else: results = losses.get_accuracy_dict(predictions) return results, predictions def _log_results(self, prefix, results): logging_str = ', '.join( ['{}={:.4f}'.format(k, float(results[k])) for k in sorted(results.keys())]) logging.info('%s: %s', prefix, logging_str) def _restore_state_to_in_memory_checkpointer(restore_path): """Initializes experiment state from a checkpoint.""" # Load pretrained experiment state. python_state_path = os.path.join(restore_path, 'checkpoint.dill') with open(python_state_path, 'rb') as f: pretrained_state = dill.load(f) logging.info('Restored checkpoint from %s', python_state_path) # Assign state to a dummy experiment instance for the in-memory checkpointer, # broadcasting to devices. dummy_experiment = Experiment( mode='train', init_rng=0, config=FLAGS.config.experiment_kwargs.config) for attribute, key in Experiment.CHECKPOINT_ATTRS.items(): setattr(dummy_experiment, attribute, utils.bcast_local_devices(pretrained_state[key])) jaxline_state = dict( global_step=pretrained_state['global_step'], experiment_module=dummy_experiment) snapshot = utils.SnapshotNT(0, jaxline_state) # Finally, seed the jaxline `utils.InMemoryCheckpointer` global dict. utils.GLOBAL_CHECKPOINT_DICT['latest'] = utils.CheckpointNT( threading.local(), [snapshot]) def _get_step_date_label(global_step): # Date removing microseconds. date_str = datetime.datetime.now().isoformat().split('.')[0] return f'step_{global_step}_{date_str}' def _save_state_from_in_memory_checkpointer( save_path, experiment_class: experiment.AbstractExperiment): """Saves experiment state to a checkpoint.""" logging.info('Saving model.') for checkpoint_name, checkpoint in utils.GLOBAL_CHECKPOINT_DICT.items(): if not checkpoint.history: logging.info('Nothing to save in "%s"', checkpoint_name) continue pickle_nest = checkpoint.history[-1].pickle_nest global_step = pickle_nest['global_step'] state_dict = {'global_step': global_step} for attribute, key in experiment_class.CHECKPOINT_ATTRS.items(): state_dict[key] = utils.get_first( getattr(pickle_nest['experiment_module'], attribute)) save_dir = os.path.join( save_path, checkpoint_name, _get_step_date_label(global_step)) python_state_path = os.path.join(save_dir, 'checkpoint.dill') os.makedirs(save_dir, exist_ok=True) with open(python_state_path, 'wb') as f: dill.dump(state_dict, f) logging.info( 'Saved "%s" checkpoint to %s', checkpoint_name, python_state_path) def _setup_signals(save_model_fn): """Sets up a signal for model saving.""" # Save a model on Ctrl+C. def sigint_handler(unused_sig, unused_frame): # Ideally, rather than saving immediately, we would then "wait" for a good # time to save. In practice this reads from an in-memory checkpoint that # only saves every 30 seconds or so, so chances of race conditions are very # small. save_model_fn() logging.info(r'Use `Ctrl+\` to save and exit.') # Exit on `Ctrl+\`, saving a model. prev_sigquit_handler = signal.getsignal(signal.SIGQUIT) def sigquit_handler(unused_sig, unused_frame): # Restore previous handler early, just in case something goes wrong in the # next lines, so it is possible to press again and exit. signal.signal(signal.SIGQUIT, prev_sigquit_handler) save_model_fn() logging.info(r'Exiting on `Ctrl+\`') # Re-raise for clean exit. os.kill(os.getpid(), signal.SIGQUIT) signal.signal(signal.SIGINT, sigint_handler) signal.signal(signal.SIGQUIT, sigquit_handler) def main(argv, experiment_class: experiment.AbstractExperiment): # Maybe restore a model. restore_path = FLAGS.config.restore_path if restore_path: _restore_state_to_in_memory_checkpointer(restore_path) # Maybe save a model. save_dir = os.path.join(FLAGS.config.checkpoint_dir, 'models') if FLAGS.config.one_off_evaluate: save_model_fn = lambda: None # No need to save checkpoint in this case. else: save_model_fn = functools.partial( _save_state_from_in_memory_checkpointer, save_dir, experiment_class) _setup_signals(save_model_fn) # Save on Ctrl+C (continue) or Ctrl+\ (exit). try: platform.main(experiment_class, argv) finally: save_model_fn() # Save at the end of training or in case of exception. if __name__ == '__main__': jax_config.update('jax_debug_nans', False) flags.mark_flag_as_required('config') app.run(lambda argv: main(argv, Experiment))
deepmind-research-master
ogb_lsc/mag/experiment.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Dataset utilities.""" import functools import pathlib from typing import Dict, Tuple from absl import logging from graph_nets import graphs as tf_graphs from graph_nets import utils_tf import numpy as np import scipy.sparse as sp import tensorflow as tf import tqdm # pylint: disable=g-bad-import-order import sub_sampler Path = pathlib.Path NUM_PAPERS = 121751666 NUM_AUTHORS = 122383112 NUM_INSTITUTIONS = 25721 EMBEDDING_SIZE = 768 NUM_CLASSES = 153 NUM_NODES = NUM_PAPERS + NUM_AUTHORS + NUM_INSTITUTIONS NUM_EDGES = 1_728_364_232 assert NUM_NODES == 244_160_499 NUM_K_FOLD_SPLITS = 10 OFFSETS = { "paper": 0, "author": NUM_PAPERS, "institution": NUM_PAPERS + NUM_AUTHORS, } SIZES = { "paper": NUM_PAPERS, "author": NUM_AUTHORS, "institution": NUM_INSTITUTIONS } RAW_DIR = Path("raw") PREPROCESSED_DIR = Path("preprocessed") RAW_NODE_FEATURES_FILENAME = RAW_DIR / "node_feat.npy" RAW_NODE_LABELS_FILENAME = RAW_DIR / "node_label.npy" RAW_NODE_YEAR_FILENAME = RAW_DIR / "node_year.npy" TRAIN_INDEX_FILENAME = RAW_DIR / "train_idx.npy" VALID_INDEX_FILENAME = RAW_DIR / "train_idx.npy" TEST_INDEX_FILENAME = RAW_DIR / "train_idx.npy" EDGES_PAPER_PAPER_B = PREPROCESSED_DIR / "paper_paper_b.npz" EDGES_PAPER_PAPER_B_T = PREPROCESSED_DIR / "paper_paper_b_t.npz" EDGES_AUTHOR_INSTITUTION = PREPROCESSED_DIR / "author_institution.npz" EDGES_INSTITUTION_AUTHOR = PREPROCESSED_DIR / "institution_author.npz" EDGES_AUTHOR_PAPER = PREPROCESSED_DIR / "author_paper.npz" EDGES_PAPER_AUTHOR = PREPROCESSED_DIR / "paper_author.npz" PCA_PAPER_FEATURES_FILENAME = PREPROCESSED_DIR / "paper_feat_pca_129.npy" PCA_AUTHOR_FEATURES_FILENAME = ( PREPROCESSED_DIR / "author_feat_from_paper_feat_pca_129.npy") PCA_INSTITUTION_FEATURES_FILENAME = ( PREPROCESSED_DIR / "institution_feat_from_paper_feat_pca_129.npy") PCA_MERGED_FEATURES_FILENAME = ( PREPROCESSED_DIR / "merged_feat_from_paper_feat_pca_129.npy") NEIGHBOR_INDICES_FILENAME = PREPROCESSED_DIR / "neighbor_indices.npy" NEIGHBOR_DISTANCES_FILENAME = PREPROCESSED_DIR / "neighbor_distances.npy" FUSED_NODE_LABELS_FILENAME = PREPROCESSED_DIR / "fused_node_labels.npy" FUSED_PAPER_EDGES_FILENAME = PREPROCESSED_DIR / "fused_paper_edges.npz" FUSED_PAPER_EDGES_T_FILENAME = PREPROCESSED_DIR / "fused_paper_edges_t.npz" K_FOLD_SPLITS_DIR = Path("k_fold_splits") def get_raw_directory(data_root): return Path(data_root) / "raw" def get_preprocessed_directory(data_root): return Path(data_root) / "preprocessed" def _log_path_decorator(fn): def _decorated_fn(path, **kwargs): logging.info("Loading %s", path) output = fn(path, **kwargs) logging.info("Finish loading %s", path) return output return _decorated_fn @_log_path_decorator def load_csr(path, debug=False): if debug: # Dummy matrix for debugging. return sp.csr_matrix(np.zeros([10, 10])) return sp.load_npz(str(path)) @_log_path_decorator def load_npy(path): return np.load(str(path)) @functools.lru_cache() def get_arrays(data_root="/data/", use_fused_node_labels=True, use_fused_node_adjacencies=True, return_pca_embeddings=True, k_fold_split_id=None, return_adjacencies=True, use_dummy_adjacencies=False): """Returns all arrays needed for training.""" logging.info("Starting to get files") data_root = Path(data_root) array_dict = {} array_dict["paper_year"] = load_npy(data_root / RAW_NODE_YEAR_FILENAME) if k_fold_split_id is None: train_indices = load_npy(data_root / TRAIN_INDEX_FILENAME) valid_indices = load_npy(data_root / VALID_INDEX_FILENAME) else: train_indices, valid_indices = get_train_and_valid_idx_for_split( k_fold_split_id, num_splits=NUM_K_FOLD_SPLITS, root_path=data_root / K_FOLD_SPLITS_DIR) array_dict["train_indices"] = train_indices array_dict["valid_indices"] = valid_indices array_dict["test_indices"] = load_npy(data_root / TEST_INDEX_FILENAME) if use_fused_node_labels: array_dict["paper_label"] = load_npy(data_root / FUSED_NODE_LABELS_FILENAME) else: array_dict["paper_label"] = load_npy(data_root / RAW_NODE_LABELS_FILENAME) if return_adjacencies: logging.info("Starting to get adjacencies.") if use_fused_node_adjacencies: paper_paper_index = load_csr( data_root / FUSED_PAPER_EDGES_FILENAME, debug=use_dummy_adjacencies) paper_paper_index_t = load_csr( data_root / FUSED_PAPER_EDGES_T_FILENAME, debug=use_dummy_adjacencies) else: paper_paper_index = load_csr( data_root / EDGES_PAPER_PAPER_B, debug=use_dummy_adjacencies) paper_paper_index_t = load_csr( data_root / EDGES_PAPER_PAPER_B_T, debug=use_dummy_adjacencies) array_dict.update( dict( author_institution_index=load_csr( data_root / EDGES_AUTHOR_INSTITUTION, debug=use_dummy_adjacencies), institution_author_index=load_csr( data_root / EDGES_INSTITUTION_AUTHOR, debug=use_dummy_adjacencies), author_paper_index=load_csr( data_root / EDGES_AUTHOR_PAPER, debug=use_dummy_adjacencies), paper_author_index=load_csr( data_root / EDGES_PAPER_AUTHOR, debug=use_dummy_adjacencies), paper_paper_index=paper_paper_index, paper_paper_index_t=paper_paper_index_t, )) if return_pca_embeddings: array_dict["bert_pca_129"] = np.load( data_root / PCA_MERGED_FEATURES_FILENAME, mmap_mode="r") assert array_dict["bert_pca_129"].shape == (NUM_NODES, 129) logging.info("Finish getting files") # pytype: disable=attribute-error assert array_dict["paper_year"].shape[0] == NUM_PAPERS assert array_dict["paper_label"].shape[0] == NUM_PAPERS if return_adjacencies and not use_dummy_adjacencies: array_dict = _fix_adjacency_shapes(array_dict) assert array_dict["paper_author_index"].shape == (NUM_PAPERS, NUM_AUTHORS) assert array_dict["author_paper_index"].shape == (NUM_AUTHORS, NUM_PAPERS) assert array_dict["paper_paper_index"].shape == (NUM_PAPERS, NUM_PAPERS) assert array_dict["paper_paper_index_t"].shape == (NUM_PAPERS, NUM_PAPERS) assert array_dict["institution_author_index"].shape == ( NUM_INSTITUTIONS, NUM_AUTHORS) assert array_dict["author_institution_index"].shape == ( NUM_AUTHORS, NUM_INSTITUTIONS) # pytype: enable=attribute-error return array_dict def add_nodes_year(graph, paper_year): nodes = graph.nodes.copy() indices = nodes["index"] year = paper_year[np.minimum(indices, paper_year.shape[0] - 1)].copy() year[nodes["type"] != 0] = 1900 nodes["year"] = year return graph._replace(nodes=nodes) def add_nodes_label(graph, paper_label): nodes = graph.nodes.copy() indices = nodes["index"] label = paper_label[np.minimum(indices, paper_label.shape[0] - 1)] label[nodes["type"] != 0] = 0 nodes["label"] = label return graph._replace(nodes=nodes) def add_nodes_embedding_from_array(graph, array): """Adds embeddings from the sstable_service for the indices.""" nodes = graph.nodes.copy() indices = nodes["index"] embedding_indices = indices.copy() embedding_indices[nodes["type"] == 1] += NUM_PAPERS embedding_indices[nodes["type"] == 2] += NUM_PAPERS + NUM_AUTHORS # Gather the embeddings for the indices. nodes["features"] = array[embedding_indices] return graph._replace(nodes=nodes) def get_graph_subsampling_dataset( prefix, arrays, shuffle_indices, ratio_unlabeled_data_to_labeled_data, max_nodes, max_edges, **subsampler_kwargs): """Returns tf_dataset for online sampling.""" def generator(): labeled_indices = arrays[f"{prefix}_indices"] if ratio_unlabeled_data_to_labeled_data > 0: num_unlabeled_data_to_add = int(ratio_unlabeled_data_to_labeled_data * labeled_indices.shape[0]) unlabeled_indices = np.random.choice( NUM_PAPERS, size=num_unlabeled_data_to_add, replace=False) root_node_indices = np.concatenate([labeled_indices, unlabeled_indices]) else: root_node_indices = labeled_indices if shuffle_indices: root_node_indices = root_node_indices.copy() np.random.shuffle(root_node_indices) for index in root_node_indices: graph = sub_sampler.subsample_graph( index, arrays["author_institution_index"], arrays["institution_author_index"], arrays["author_paper_index"], arrays["paper_author_index"], arrays["paper_paper_index"], arrays["paper_paper_index_t"], paper_years=arrays["paper_year"], max_nodes=max_nodes, max_edges=max_edges, **subsampler_kwargs) graph = add_nodes_label(graph, arrays["paper_label"]) graph = add_nodes_year(graph, arrays["paper_year"]) graph = tf_graphs.GraphsTuple(*graph) yield graph sample_graph = next(generator()) return tf.data.Dataset.from_generator( generator, output_signature=utils_tf.specs_from_graphs_tuple(sample_graph)) def paper_features_to_author_features( author_paper_index, paper_features): """Averages paper features to authors.""" assert paper_features.shape[0] == NUM_PAPERS assert author_paper_index.shape[0] == NUM_AUTHORS author_features = np.zeros( [NUM_AUTHORS, paper_features.shape[1]], dtype=paper_features.dtype) for author_i in range(NUM_AUTHORS): paper_indices = author_paper_index[author_i].indices author_features[author_i] = paper_features[paper_indices].mean( axis=0, dtype=np.float32) if author_i % 10000 == 0: logging.info("%d/%d", author_i, NUM_AUTHORS) return author_features def author_features_to_institution_features( institution_author_index, author_features): """Averages author features to institutions.""" assert author_features.shape[0] == NUM_AUTHORS assert institution_author_index.shape[0] == NUM_INSTITUTIONS institution_features = np.zeros( [NUM_INSTITUTIONS, author_features.shape[1]], dtype=author_features.dtype) for institution_i in range(NUM_INSTITUTIONS): author_indices = institution_author_index[institution_i].indices institution_features[institution_i] = author_features[ author_indices].mean(axis=0, dtype=np.float32) if institution_i % 10000 == 0: logging.info("%d/%d", institution_i, NUM_INSTITUTIONS) return institution_features def generate_fused_paper_adjacency_matrix(neighbor_indices, neighbor_distances, paper_paper_csr): """Generates fused adjacency matrix for identical nodes.""" # First construct set of identical node indices. # NOTE: Since we take only top K=26 identical pairs for each node, this is not # actually exhaustive. Also, if A and B are equal, and B and C are equal, # this method would not necessarily detect A and C being equal. # However, this should capture almost all cases. logging.info("Generating fused paper adjacency matrix") eps = 0.0 mask = ((neighbor_indices != np.mgrid[:neighbor_indices.shape[0], :1]) & (neighbor_distances <= eps)) identical_pairs = list(map(tuple, np.nonzero(mask))) del mask # Have a csc version for fast column access. paper_paper_csc = paper_paper_csr.tocsc() # Construct new matrix as coo, starting off with original rows/cols. paper_paper_coo = paper_paper_csr.tocoo() new_rows = [paper_paper_coo.row] new_cols = [paper_paper_coo.col] for pair in tqdm.tqdm(identical_pairs): # STEP ONE: First merge papers being cited by the pair. # Add edges from second paper, to all papers cited by first paper. cited_by_first = paper_paper_csr.getrow(pair[0]).nonzero()[1] if cited_by_first.shape[0] > 0: new_rows.append(pair[1] * np.ones_like(cited_by_first)) new_cols.append(cited_by_first) # Add edges from first paper, to all papers cited by second paper. cited_by_second = paper_paper_csr.getrow(pair[1]).nonzero()[1] if cited_by_second.shape[0] > 0: new_rows.append(pair[0] * np.ones_like(cited_by_second)) new_cols.append(cited_by_second) # STEP TWO: Then merge papers that cite the pair. # Add edges to second paper, from all papers citing the first paper. citing_first = paper_paper_csc.getcol(pair[0]).nonzero()[0] if citing_first.shape[0] > 0: new_rows.append(citing_first) new_cols.append(pair[1] * np.ones_like(citing_first)) # Add edges to first paper, from all papers citing the second paper. citing_second = paper_paper_csc.getcol(pair[1]).nonzero()[0] if citing_second.shape[0] > 0: new_rows.append(citing_second) new_cols.append(pair[0] * np.ones_like(citing_second)) logging.info("Done with adjacency loop") paper_paper_coo_shape = paper_paper_coo.shape del paper_paper_csr del paper_paper_csc del paper_paper_coo # All done; now concatenate everything together and form new matrix. new_rows = np.concatenate(new_rows) new_cols = np.concatenate(new_cols) return sp.coo_matrix( (np.ones_like(new_rows, dtype=np.bool), (new_rows, new_cols)), shape=paper_paper_coo_shape).tocsr() def generate_k_fold_splits( train_idx, valid_idx, output_path, num_splits=NUM_K_FOLD_SPLITS): """Generates splits adding fractions of the validation split to training.""" output_path = Path(output_path) np.random.seed(42) valid_idx = np.random.permutation(valid_idx) # Split into `num_parts` (almost) identically sized arrays. valid_idx_parts = np.array_split(valid_idx, num_splits) for i in range(num_splits): # Add all but the i'th subpart to training set. new_train_idx = np.concatenate( [train_idx, *valid_idx_parts[:i], *valid_idx_parts[i+1:]]) # i'th subpart is validation set. new_valid_idx = valid_idx_parts[i] train_path = output_path / f"train_idx_{i}_{num_splits}.npy" valid_path = output_path / f"valid_idx_{i}_{num_splits}.npy" np.save(train_path, new_train_idx) np.save(valid_path, new_valid_idx) logging.info("Saved: %s", train_path) logging.info("Saved: %s", valid_path) def get_train_and_valid_idx_for_split( split_id: int, num_splits: int, root_path: str, ) -> Tuple[np.ndarray, np.ndarray]: """Returns train and valid indices for given split.""" new_train_idx = load_npy(f"{root_path}/train_idx_{split_id}_{num_splits}.npy") new_valid_idx = load_npy(f"{root_path}/valid_idx_{split_id}_{num_splits}.npy") return new_train_idx, new_valid_idx def generate_fused_node_labels(neighbor_indices, neighbor_distances, node_labels, train_indices, valid_indices, test_indices): """Generates fused adjacency matrix for identical nodes.""" logging.info("Generating fused node labels") valid_indices = set(valid_indices.tolist()) test_indices = set(test_indices.tolist()) valid_or_test_indices = valid_indices | test_indices train_indices = train_indices[train_indices < neighbor_indices.shape[0]] # Go through list of all pairs where one node is in training set, and for i in tqdm.tqdm(train_indices): for j in range(neighbor_indices.shape[1]): other_index = neighbor_indices[i][j] # if the other is not a validation or test node, if other_index in valid_or_test_indices: continue # and they are identical, if neighbor_distances[i][j] == 0: # assign the label of the training node to the other node node_labels[other_index] = node_labels[i] return node_labels def _pad_to_shape( sparse_csr_matrix: sp.csr_matrix, output_shape: Tuple[int, int]) -> sp.csr_matrix: """Pads a csr sparse matrix to the given shape.""" # We should not try to expand anything smaller. assert np.all(sparse_csr_matrix.shape <= output_shape) # Maybe it already has the right shape. if sparse_csr_matrix.shape == output_shape: return sparse_csr_matrix # Append as many indptr elements as we need to match the leading size, # This is achieved by just padding with copies of the last indptr element. required_padding = output_shape[0] - sparse_csr_matrix.shape[0] updated_indptr = np.concatenate( [sparse_csr_matrix.indptr] + [sparse_csr_matrix.indptr[-1:]] * required_padding, axis=0) # The change in trailing size does not have structural implications, it just # determines the highest possible value for the indices, so it is sufficient # to just pass the new output shape, with the correct trailing size. return sp.csr.csr_matrix( (sparse_csr_matrix.data, sparse_csr_matrix.indices, updated_indptr), shape=output_shape) def _fix_adjacency_shapes( arrays: Dict[str, sp.csr.csr_matrix], ) -> Dict[str, sp.csr.csr_matrix]: """Fixes the shapes of the adjacency matrices.""" arrays = arrays.copy() for key in ["author_institution_index", "author_paper_index", "paper_paper_index", "institution_author_index", "paper_author_index", "paper_paper_index_t"]: type_sender = key.split("_")[0] type_receiver = key.split("_")[1] arrays[key] = _pad_to_shape( arrays[key], output_shape=(SIZES[type_sender], SIZES[type_receiver])) return arrays
deepmind-research-master
ogb_lsc/mag/data_utils.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Losses and related utilities.""" from typing import Mapping, Tuple, Sequence, NamedTuple, Dict, Optional import jax import jax.numpy as jnp import jraph import numpy as np # pylint: disable=g-bad-import-order import datasets LogsDict = Mapping[str, jnp.ndarray] class Predictions(NamedTuple): node_indices: np.ndarray labels: np.ndarray predictions: np.ndarray logits: np.ndarray def node_classification_loss( logits: jnp.ndarray, batch: datasets.Batch, extra_stats: bool = False, ) -> Tuple[jnp.ndarray, LogsDict]: """Gets node-wise classification loss and statistics.""" log_probs = jax.nn.log_softmax(logits) loss = -jnp.sum(log_probs * batch.node_labels, axis=-1) num_valid = jnp.sum(batch.label_mask) labels = jnp.argmax(batch.node_labels, axis=-1) is_correct = (jnp.argmax(log_probs, axis=-1) == labels) num_correct = jnp.sum(is_correct * batch.label_mask) loss = jnp.sum(loss * batch.label_mask) / (num_valid + 1e-8) accuracy = num_correct / (num_valid + 1e-8) entropy = -jnp.mean(jnp.sum(jax.nn.softmax(logits) * log_probs, axis=-1)) stats = { 'classification_loss': loss, 'prediction_entropy': entropy, 'accuracy': accuracy, 'num_valid': num_valid, 'num_correct': num_correct, } if extra_stats: for k in range(1, 6): stats[f'top_{k}_correct'] = topk_correct(logits, labels, batch.label_mask, k) return loss, stats def get_predictions_labels_and_logits( logits: jnp.ndarray, batch: datasets.Batch, ) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]: """Gets prediction labels and logits.""" mask = batch.label_mask > 0. indices = batch.node_indices[mask] logits = logits[mask] predictions = jnp.argmax(logits, axis=-1) labels = jnp.argmax(batch.node_labels[mask], axis=-1) return indices, predictions, labels, logits def topk_correct( logits: jnp.ndarray, labels: jnp.ndarray, valid_mask: jnp.ndarray, topk: int, ) -> jnp.ndarray: """Calculates top-k accuracy.""" pred_ranking = jnp.argsort(logits, axis=1)[:, ::-1] pred_ranking = pred_ranking[:, :topk] is_correct = jnp.any(pred_ranking == labels[:, jnp.newaxis], axis=1) return (is_correct * valid_mask).sum() def ensemble_predictions_by_probability_average( predictions_list: Sequence[Predictions]) -> Predictions: """Ensemble predictions by ensembling the probabilities.""" _assert_consistent_predictions(predictions_list) all_probs = np.stack([ jax.nn.softmax(predictions.logits, axis=-1) for predictions in predictions_list ], axis=0) ensembled_logits = np.log(all_probs.mean(0)) return predictions_list[0]._replace( logits=ensembled_logits, predictions=np.argmax(ensembled_logits, axis=-1)) def get_accuracy_dict(predictions: Predictions) -> Dict[str, float]: """Returns the accuracy dict.""" output_dict = {} output_dict['num_valid'] = predictions.predictions.shape[0] matches = (predictions.labels == predictions.predictions) output_dict['accuracy'] = matches.mean() pred_ranking = jnp.argsort(predictions.logits, axis=1)[:, ::-1] for k in range(1, 6): matches = jnp.any( pred_ranking[:, :k] == predictions.labels[:, None], axis=1) output_dict[f'top_{k}_correct'] = matches.mean() return output_dict def bgrl_loss( first_online_predictions: jnp.ndarray, second_target_projections: jnp.ndarray, second_online_predictions: jnp.ndarray, first_target_projections: jnp.ndarray, symmetrize: bool, valid_mask: jnp.ndarray, ) -> Tuple[jnp.ndarray, LogsDict]: """Implements BGRL loss.""" first_side_node_loss = jnp.sum( jnp.square( _l2_normalize(first_online_predictions, axis=-1) - _l2_normalize(second_target_projections, axis=-1)), axis=-1) if symmetrize: second_side_node_loss = jnp.sum( jnp.square( _l2_normalize(second_online_predictions, axis=-1) - _l2_normalize(first_target_projections, axis=-1)), axis=-1) node_loss = first_side_node_loss + second_side_node_loss else: node_loss = first_side_node_loss loss = (node_loss * valid_mask).sum() / (valid_mask.sum() + 1e-6) return loss, dict(bgrl_loss=loss) def get_corrupted_view( graph: jraph.GraphsTuple, feature_drop_prob: float, edge_drop_prob: float, rng_key: jnp.ndarray, ) -> jraph.GraphsTuple: """Returns corrupted graph view.""" node_key, edge_key = jax.random.split(rng_key) def mask_feature(x): mask = jax.random.bernoulli(node_key, 1 - feature_drop_prob, x.shape) return x * mask # Randomly mask features with fixed probability. nodes = jax.tree_map(mask_feature, graph.nodes) # Simulate dropping of edges by changing genuine edges to self-loops on # the padded node. num_edges = graph.senders.shape[0] last_node_idx = graph.n_node.sum() - 1 edge_mask = jax.random.bernoulli(edge_key, 1 - edge_drop_prob, [num_edges]) senders = jnp.where(edge_mask, graph.senders, last_node_idx) receivers = jnp.where(edge_mask, graph.receivers, last_node_idx) # Note that n_edge will now be invalid since edges in the middle of the list # will correspond to the final graph. Set n_edge to None to ensure we do not # accidentally use this. return graph._replace( nodes=nodes, senders=senders, receivers=receivers, n_edge=None, ) def _assert_consistent_predictions(predictions_list: Sequence[Predictions]): first_predictions = predictions_list[0] for predictions in predictions_list: assert np.all(predictions.node_indices == first_predictions.node_indices) assert np.all(predictions.labels == first_predictions.labels) assert np.all( predictions.predictions == np.argmax(predictions.logits, axis=-1)) def _l2_normalize( x: jnp.ndarray, axis: Optional[int] = None, epsilon: float = 1e-6, ) -> jnp.ndarray: return x * jax.lax.rsqrt( jnp.sum(jnp.square(x), axis=axis, keepdims=True) + epsilon)
deepmind-research-master
ogb_lsc/mag/losses.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Find neighborhoods around paper feature embeddings.""" import pathlib from absl import app from absl import flags from absl import logging import annoy import numpy as np import scipy.sparse as sp # pylint: disable=g-bad-import-order import data_utils Path = pathlib.Path _PAPER_PAPER_B_PATH = 'ogb_mag_adjacencies/paper_paper_b.npz' FLAGS = flags.FLAGS flags.DEFINE_string('data_root', None, 'Data root directory') def _read_paper_pca_features(): data_root = Path(FLAGS.data_root) path = data_root / data_utils.PCA_PAPER_FEATURES_FILENAME with open(path, 'rb') as fid: return np.load(fid) def _read_adjacency_indices(): # Get adjacencies. return data_utils.get_arrays( data_root=FLAGS.data_root, use_fused_node_labels=False, use_fused_node_adjacencies=False, return_pca_embeddings=False, ) def build_annoy_index(features): """Build the Annoy index.""" logging.info('Building annoy index') num_vectors, vector_size = features.shape annoy_index = annoy.AnnoyIndex(vector_size, 'euclidean') for i, x in enumerate(features): annoy_index.add_item(i, x) if i % 1000000 == 0: logging.info('Adding: %d / %d (%.3g %%)', i, num_vectors, 100 * i / num_vectors) n_trees = 10 _ = annoy_index.build(n_trees) return annoy_index def _get_annoy_index_path(): return Path(FLAGS.data_root) / data_utils.PREPROCESSED_DIR / 'annoy_index.ann' def save_annoy_index(annoy_index): logging.info('Saving annoy index') index_path = _get_annoy_index_path() index_path.parent.mkdir(parents=True, exist_ok=True) annoy_index.save(str(index_path)) def read_annoy_index(features): index_path = _get_annoy_index_path() vector_size = features.shape[1] annoy_index = annoy.AnnoyIndex(vector_size, 'euclidean') annoy_index.load(str(index_path)) return annoy_index def compute_neighbor_indices_and_distances(features): """Use the pre-built Annoy index to compute neighbor indices and distances.""" logging.info('Computing neighbors and distances') annoy_index = read_annoy_index(features) num_vectors = features.shape[0] k = 20 pad_k = 5 search_k = -1 neighbor_indices = np.zeros([num_vectors, k + pad_k + 1], dtype=np.int32) neighbor_distances = np.zeros([num_vectors, k + pad_k + 1], dtype=np.float32) for i in range(num_vectors): neighbor_indices[i], neighbor_distances[i] = annoy_index.get_nns_by_item( i, k + pad_k + 1, search_k=search_k, include_distances=True) if i % 10000 == 0: logging.info('Finding neighbors %d / %d', i, num_vectors) return neighbor_indices, neighbor_distances def _write_neighbors(neighbor_indices, neighbor_distances): """Write neighbor indices and distances.""" logging.info('Writing neighbors') indices_path = Path(FLAGS.data_root) / data_utils.NEIGHBOR_INDICES_FILENAME distances_path = ( Path(FLAGS.data_root) / data_utils.NEIGHBOR_DISTANCES_FILENAME) indices_path.parent.mkdir(parents=True, exist_ok=True) distances_path.parent.mkdir(parents=True, exist_ok=True) with open(indices_path, 'wb') as fid: np.save(fid, neighbor_indices) with open(distances_path, 'wb') as fid: np.save(fid, neighbor_distances) def _write_fused_edges(fused_paper_adjacency_matrix): """Write fused edges.""" data_root = Path(FLAGS.data_root) edges_path = data_root / data_utils.FUSED_PAPER_EDGES_FILENAME edges_t_path = data_root / data_utils.FUSED_PAPER_EDGES_T_FILENAME edges_path.parent.mkdir(parents=True, exist_ok=True) edges_t_path.parent.mkdir(parents=True, exist_ok=True) with open(edges_path, 'wb') as fid: sp.save_npz(fid, fused_paper_adjacency_matrix) with open(edges_t_path, 'wb') as fid: sp.save_npz(fid, fused_paper_adjacency_matrix.T) def _write_fused_nodes(fused_node_labels): """Write fused nodes.""" labels_path = Path(FLAGS.data_root) / data_utils.FUSED_NODE_LABELS_FILENAME labels_path.parent.mkdir(parents=True, exist_ok=True) with open(labels_path, 'wb') as fid: np.save(fid, fused_node_labels) def main(unused_argv): paper_pca_features = _read_paper_pca_features() # Find neighbors. annoy_index = build_annoy_index(paper_pca_features) save_annoy_index(annoy_index) neighbor_indices, neighbor_distances = compute_neighbor_indices_and_distances( paper_pca_features) del paper_pca_features _write_neighbors(neighbor_indices, neighbor_distances) data = _read_adjacency_indices() paper_paper_csr = data['paper_paper_index'] paper_label = data['paper_label'] train_indices = data['train_indices'] valid_indices = data['valid_indices'] test_indices = data['test_indices'] del data fused_paper_adjacency_matrix = data_utils.generate_fused_paper_adjacency_matrix( neighbor_indices, neighbor_distances, paper_paper_csr) _write_fused_edges(fused_paper_adjacency_matrix) del fused_paper_adjacency_matrix del paper_paper_csr fused_node_labels = data_utils.generate_fused_node_labels( neighbor_indices, neighbor_distances, paper_label, train_indices, valid_indices, test_indices) _write_fused_nodes(fused_node_labels) if __name__ == '__main__': flags.mark_flag_as_required('data_root') app.run(main)
deepmind-research-master
ogb_lsc/mag/neighbor_builder.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Dynamic batching utilities.""" from typing import Generator, Iterable, Iterator, Sequence, Tuple import jax.tree_util as tree import jraph import numpy as np _NUMBER_FIELDS = ("n_node", "n_edge", "n_graph") def dynamically_batch(graphs_tuple_iterator: Iterator[jraph.GraphsTuple], n_node: int, n_edge: int, n_graph: int) -> Generator[jraph.GraphsTuple, None, None]: """Dynamically batches trees with `jraph.GraphsTuples` to `graph_batch_size`. Elements of the `graphs_tuple_iterator` will be incrementally added to a batch until the limits defined by `n_node`, `n_edge` and `n_graph` are reached. This means each element yielded by this generator For situations where you have variable sized data, it"s useful to be able to have variable sized batches. This is especially the case if you have a loss defined on the variable shaped element (for example, nodes in a graph). Args: graphs_tuple_iterator: An iterator of `jraph.GraphsTuples`. n_node: The maximum number of nodes in a batch. n_edge: The maximum number of edges in a batch. n_graph: The maximum number of graphs in a batch. Yields: A `jraph.GraphsTuple` batch of graphs. Raises: ValueError: if the number of graphs is < 2. RuntimeError: if the `graphs_tuple_iterator` contains elements which are not `jraph.GraphsTuple`s. RuntimeError: if a graph is found which is larger than the batch size. """ if n_graph < 2: raise ValueError("The number of graphs in a batch size must be greater or " f"equal to `2` for padding with graphs, got {n_graph}.") valid_batch_size = (n_node - 1, n_edge, n_graph - 1) accumulated_graphs = [] num_accumulated_nodes = 0 num_accumulated_edges = 0 num_accumulated_graphs = 0 for element in graphs_tuple_iterator: element_nodes, element_edges, element_graphs = _get_graph_size(element) if _is_over_batch_size(element, valid_batch_size): graph_size = element_nodes, element_edges, element_graphs graph_size = {k: v for k, v in zip(_NUMBER_FIELDS, graph_size)} batch_size = {k: v for k, v in zip(_NUMBER_FIELDS, valid_batch_size)} raise RuntimeError("Found graph bigger than batch size. Valid Batch " f"Size: {batch_size}, Graph Size: {graph_size}") if not accumulated_graphs: # If this is the first element of the batch, set it and continue. accumulated_graphs = [element] num_accumulated_nodes = element_nodes num_accumulated_edges = element_edges num_accumulated_graphs = element_graphs continue else: # Otherwise check if there is space for the graph in the batch: if ((num_accumulated_graphs + element_graphs > n_graph - 1) or (num_accumulated_nodes + element_nodes > n_node - 1) or (num_accumulated_edges + element_edges > n_edge)): # If there is, add it to the batch batched_graph = _batch_np(accumulated_graphs) yield jraph.pad_with_graphs(batched_graph, n_node, n_edge, n_graph) accumulated_graphs = [element] num_accumulated_nodes = element_nodes num_accumulated_edges = element_edges num_accumulated_graphs = element_graphs else: # Otherwise, return the old batch and start a new batch. accumulated_graphs.append(element) num_accumulated_nodes += element_nodes num_accumulated_edges += element_edges num_accumulated_graphs += element_graphs # We may still have data in batched graph. if accumulated_graphs: batched_graph = _batch_np(accumulated_graphs) yield jraph.pad_with_graphs(batched_graph, n_node, n_edge, n_graph) def _batch_np(graphs: Sequence[jraph.GraphsTuple]) -> jraph.GraphsTuple: # Calculates offsets for sender and receiver arrays, caused by concatenating # the nodes arrays. offsets = np.cumsum(np.array([0] + [np.sum(g.n_node) for g in graphs[:-1]])) def _map_concat(nests): concat = lambda *args: np.concatenate(args) return tree.tree_multimap(concat, *nests) return jraph.GraphsTuple( n_node=np.concatenate([g.n_node for g in graphs]), n_edge=np.concatenate([g.n_edge for g in graphs]), nodes=_map_concat([g.nodes for g in graphs]), edges=_map_concat([g.edges for g in graphs]), globals=_map_concat([g.globals for g in graphs]), senders=np.concatenate([g.senders + o for g, o in zip(graphs, offsets)]), receivers=np.concatenate( [g.receivers + o for g, o in zip(graphs, offsets)])) def _get_graph_size(graph: jraph.GraphsTuple) -> Tuple[int, int, int]: n_node = np.sum(graph.n_node) n_edge = len(graph.senders) n_graph = len(graph.n_node) return n_node, n_edge, n_graph def _is_over_batch_size( graph: jraph.GraphsTuple, graph_batch_size: Iterable[int], ) -> bool: graph_size = _get_graph_size(graph) return any([x > y for x, y in zip(graph_size, graph_batch_size)])
deepmind-research-master
ogb_lsc/mag/batching_utils.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Scheduling utilities.""" import jax.numpy as jnp def apply_ema_decay( ema_value: jnp.ndarray, current_value: jnp.ndarray, decay: jnp.ndarray, ) -> jnp.ndarray: """Implements EMA.""" return ema_value * decay + current_value * (1 - decay) def ema_decay_schedule( base_rate: jnp.ndarray, step: jnp.ndarray, total_steps: jnp.ndarray, use_schedule: bool, ) -> jnp.ndarray: """Anneals decay rate to 1 with cosine schedule.""" if not use_schedule: return base_rate multiplier = _cosine_decay(step, total_steps, 1.) return 1. - (1. - base_rate) * multiplier def _cosine_decay( global_step: jnp.ndarray, max_steps: int, initial_value: float, ) -> jnp.ndarray: """Simple implementation of cosine decay from TF1.""" global_step = jnp.minimum(global_step, max_steps).astype(jnp.float32) cosine_decay_value = 0.5 * (1 + jnp.cos(jnp.pi * global_step / max_steps)) decayed_learning_rate = initial_value * cosine_decay_value return decayed_learning_rate def learning_schedule( global_step: jnp.ndarray, base_learning_rate: float, total_steps: int, warmup_steps: int, use_schedule: bool, ) -> float: """Cosine learning rate scheduler.""" # Compute LR & Scaled LR if not use_schedule: return base_learning_rate warmup_learning_rate = ( global_step.astype(jnp.float32) / int(warmup_steps) * base_learning_rate if warmup_steps > 0 else base_learning_rate) # Cosine schedule after warmup. decay_learning_rate = _cosine_decay(global_step - warmup_steps, total_steps - warmup_steps, base_learning_rate) return jnp.where(global_step < warmup_steps, warmup_learning_rate, decay_learning_rate)
deepmind-research-master
ogb_lsc/mag/schedules.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Download data required for training and evaluating models.""" import pathlib from absl import app from absl import flags from absl import logging from google.cloud import storage # pylint: disable=g-bad-import-order import data_utils Path = pathlib.Path _BUCKET_NAME = 'deepmind-ogb-lsc' _MAX_DOWNLOAD_ATTEMPTS = 5 FLAGS = flags.FLAGS flags.DEFINE_enum('payload', None, ['data', 'models'], 'Download "data" or "models"?') flags.DEFINE_string('task_root', None, 'Local task root directory') DATA_RELATIVE_PATHS = ( data_utils.RAW_NODE_YEAR_FILENAME, data_utils.TRAIN_INDEX_FILENAME, data_utils.VALID_INDEX_FILENAME, data_utils.TEST_INDEX_FILENAME, data_utils.K_FOLD_SPLITS_DIR, data_utils.FUSED_NODE_LABELS_FILENAME, data_utils.FUSED_PAPER_EDGES_FILENAME, data_utils.FUSED_PAPER_EDGES_T_FILENAME, data_utils.EDGES_AUTHOR_INSTITUTION, data_utils.EDGES_INSTITUTION_AUTHOR, data_utils.EDGES_AUTHOR_PAPER, data_utils.EDGES_PAPER_AUTHOR, data_utils.PCA_MERGED_FEATURES_FILENAME, ) class DataCorruptionError(Exception): pass def _get_gcs_root(): return Path('mag') / FLAGS.payload def _get_gcs_bucket(): storage_client = storage.Client.create_anonymous_client() return storage_client.bucket(_BUCKET_NAME) def _write_blob_to_destination(blob, task_root, ignore_existing=True): """Write the blob.""" logging.info("Copying blob: '%s'", blob.name) destination_path = Path(task_root) / Path(*Path(blob.name).parts[1:]) logging.info(" ... to: '%s'", str(destination_path)) if ignore_existing and destination_path.exists(): return destination_path.parent.mkdir(parents=True, exist_ok=True) checksum = 'crc32c' for attempt in range(_MAX_DOWNLOAD_ATTEMPTS): try: blob.download_to_filename(destination_path.as_posix(), checksum=checksum) except storage.client.resumable_media.common.DataCorruption: pass else: break else: raise DataCorruptionError(f"Checksum ('{checksum}') for {blob.name} failed " f'after {attempt + 1} attempts') def main(unused_argv): bucket = _get_gcs_bucket() if FLAGS.payload == 'data': relative_paths = DATA_RELATIVE_PATHS else: relative_paths = (None,) for relative_path in relative_paths: if relative_path is None: relative_path = str(_get_gcs_root()) else: relative_path = str(_get_gcs_root() / relative_path) logging.info("Copying relative path: '%s'", relative_path) blobs = bucket.list_blobs(prefix=relative_path) for blob in blobs: _write_blob_to_destination(blob, FLAGS.task_root) if __name__ == '__main__': flags.mark_flag_as_required('payload') flags.mark_flag_as_required('task_root') app.run(main)
deepmind-research-master
ogb_lsc/mag/download_mag.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Script to generate ensembled PCQ test predictions.""" import collections import os import pathlib from typing import List, NamedTuple from absl import app from absl import flags from absl import logging import dill import numpy as np from ogb import lsc # pylint: disable=g-bad-import-order # pytype: disable=import-error import datasets _NUM_SEEDS = 2 _CLIP_VALUE = 20. _NUM_KFOLD_SPLITS = 10 _SEED_START = flags.DEFINE_integer( 'seed_start', 42, 'Initial seed for the list of ensemble models.') _CONFORMER_PATH = flags.DEFINE_string( 'conformer_path', None, 'Path to conformer predictions.', required=True) _NON_CONFORMER_PATH = flags.DEFINE_string( 'non_conformer_path', None, 'Path to non-conformer predictions.', required=True) _OUTPUT_PATH = flags.DEFINE_string('output_path', None, 'Output path.') _SPLIT = flags.DEFINE_enum('split', 'test', ['test', 'valid'], 'Split: valid or test.') class _Predictions(NamedTuple): predictions: np.ndarray indices: np.ndarray def _load_dill(fname) -> bytes: with open(fname, 'rb') as f: return dill.load(f) def _sort_by_indices(predictions: _Predictions) -> _Predictions: order = np.argsort(predictions.indices) return _Predictions( predictions=predictions.predictions[order], indices=predictions.indices[order]) def load_predictions(path: str, split: str) -> _Predictions: """Load written prediction file.""" if len(os.listdir(path)) != 1: raise ValueError('Prediction directory must have exactly ' 'one prediction sub-directory: %s' % path) prediction_subdir = os.listdir(path)[0] return _Predictions(*_load_dill(f'{path}/{prediction_subdir}/{split}.dill')) def mean_mae_distance(x, y): return np.abs(x - y).mean() def _load_valid_labels() -> np.ndarray: labels = [label for _, label in datasets.load_smile_strings(with_labels=True)] return np.array([labels[i] for i in datasets.load_splits()['valid']]) def evaluate_valid_predictions(ensembled_predictions: _Predictions): """Evaluates the predictions on the validation set.""" ensembled_predictions = _sort_by_indices(ensembled_predictions) evaluator = lsc.PCQM4MEvaluator() results = evaluator.eval( dict( y_pred=ensembled_predictions.predictions, y_true=_load_valid_labels())) logging.info('MAE on validation dataset: %f', results['mae']) def clip_predictions(predictions: _Predictions) -> _Predictions: return predictions._replace( predictions=np.clip(predictions.predictions, 0., _CLIP_VALUE)) def _generate_test_prediction_file(test_predictions: np.ndarray, output_path: pathlib.Path) -> pathlib.Path: """Generates the final file for submission.""" # Check that predictions are not nuts. assert test_predictions.dtype in [np.float64, np.float32] assert not np.any(np.isnan(test_predictions)) assert np.all(np.isfinite(test_predictions)) assert test_predictions.min() >= 0. assert test_predictions.max() <= 40. # Too risky to overwrite. if output_path.exists(): raise ValueError(f'{output_path} already exists') # Write to a local directory, and copy to final path (possibly cns). # It is not possible to write directlt on CNS. evaluator = lsc.PCQM4MEvaluator() evaluator.save_test_submission(dict(y_pred=test_predictions), output_path) return output_path def merge_complementary_results(split: str, results_a: _Predictions, results_b: _Predictions) -> _Predictions: """Merges two prediction results with no overlap.""" indices_a = set(results_a.indices) indices_b = set(results_b.indices) assert not indices_a.intersection(indices_b) if split == 'test': merged_indices = list(sorted(indices_a | indices_b)) expected_indices = datasets.load_splits()[split] assert np.all(expected_indices == merged_indices) predictions = np.concatenate([results_a.predictions, results_b.predictions]) indices = np.concatenate([results_a.indices, results_b.indices]) predictions = _sort_by_indices( _Predictions(indices=indices, predictions=predictions)) return predictions def ensemble_valid_predictions( predictions_list: List[_Predictions]) -> _Predictions: """Ensembles a list of predictions.""" index_to_predictions = collections.defaultdict(list) for predictions in predictions_list: for idx, pred in zip(predictions.indices, predictions.predictions): index_to_predictions[idx].append(pred) for idx, ensemble_list in index_to_predictions.items(): if len(ensemble_list) != _NUM_SEEDS: raise RuntimeError( 'Graph index in the validation set received wrong number of ' 'predictions to ensemble.') index_to_predictions = { k: np.median(pred_list, axis=0) for k, pred_list in index_to_predictions.items() } return _sort_by_indices( _Predictions( indices=np.array(list(index_to_predictions.keys())), predictions=np.array(list(index_to_predictions.values())))) def ensemble_test_predictions( predictions_list: List[_Predictions]) -> _Predictions: """Ensembles a list of predictions.""" predictions = np.median([pred.predictions for pred in predictions_list], axis=0) common_indices = predictions_list[0].indices for preds in predictions_list[1:]: assert np.all(preds.indices == common_indices) return _Predictions(predictions=predictions, indices=common_indices) def create_submission_from_predictions( output_path: pathlib.Path, test_predictions: _Predictions) -> pathlib.Path: """Creates a submission for predictions on a path.""" assert _SPLIT.value == 'test' output_path = _generate_test_prediction_file( test_predictions.predictions, output_path=output_path / 'submission_files') return output_path / 'y_pred_pcqm4m.npz' def merge_predictions(split: str) -> List[_Predictions]: """Generates features merged from conformer and non-conformer predictions.""" merged_predictions: List[_Predictions] = [] seed = _SEED_START.value # Load conformer and non-conformer predictions. for unused_seed_group in (0, 1): for k in range(_NUM_KFOLD_SPLITS): conformer_predictions: _Predictions = load_predictions( f'{_CONFORMER_PATH.value}/k{k}_seed{seed}', split) non_conformer_predictions: _Predictions = load_predictions( f'{_NON_CONFORMER_PATH.value}/k{k}_seed{seed}', split) merged_predictions.append( merge_complementary_results(_SPLIT.value, conformer_predictions, non_conformer_predictions)) seed += 1 return merged_predictions def main(_): split: str = _SPLIT.value # Merge conformer and non-conformer predictions. merged_predictions = merge_predictions(split) # Clip before ensembling. clipped_predictions = list(map(clip_predictions, merged_predictions)) # Ensemble predictions. if split == 'valid': ensembled_predictions = ensemble_valid_predictions(clipped_predictions) else: assert split == 'test' ensembled_predictions = ensemble_test_predictions(clipped_predictions) # Clip after ensembling. ensembled_predictions = clip_predictions(ensembled_predictions) ensembled_predictions_path = pathlib.Path(_OUTPUT_PATH.value) ensembled_predictions_path.mkdir(parents=True, exist_ok=True) with open(ensembled_predictions_path / f'{split}_predictions.dill', 'wb') as f: dill.dump(ensembled_predictions, f) if split == 'valid': evaluate_valid_predictions(ensembled_predictions) else: assert split == 'test' output_path = create_submission_from_predictions(ensembled_predictions_path, ensembled_predictions) logging.info('Submission files written to %s', output_path) if __name__ == '__main__': app.run(main)
deepmind-research-master
ogb_lsc/pcq/ensemble_predictions.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generates the k-fold validation splits.""" import os import pickle from absl import app from absl import flags from absl import logging import numpy as np # pylint: disable=g-bad-import-order import datasets _OUTPUT_DIR = flags.DEFINE_string( 'output_dir', None, required=True, help='Output directory to write the splits to') K = 10 def main(argv): del argv valid_indices = datasets.load_splits()['valid'] k_splits = np.split(valid_indices, K) os.makedirs(_OUTPUT_DIR.value, exist_ok=True) for k_i, split in enumerate(k_splits): fname = os.path.join(_OUTPUT_DIR.value, f'{k_i}.pkl') with open(fname, 'wb') as f: pickle.dump(split, f) logging.info('Saved: %s', fname) if __name__ == '__main__': app.run(main)
deepmind-research-master
ogb_lsc/pcq/generate_validation_splits.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Experiment config for PCQM4M-LSC entry.""" from jaxline import base_config from ml_collections import config_dict def get_config(debug: bool = False) -> config_dict.ConfigDict: """Get Jaxline experiment config.""" config = base_config.get_base_config() # E.g. '/data/pretrained_models/k0_seed100' (and set k_fold_split_id=0, below) config.restore_path = config_dict.placeholder(str) training_batch_size = 64 eval_batch_size = 64 ## Experiment config. loss_config_name = 'RegressionLossConfig' loss_kwargs = dict( exponent=1., # 2 for l2 loss, 1 for l1 loss, etc... ) dataset_config = dict( data_root=config_dict.placeholder(str), augment_with_random_mirror_symmetry=True, k_fold_split_id=config_dict.placeholder(int), num_k_fold_splits=config_dict.placeholder(int), # Options: "in" or "out". # Filter=in would keep the samples with nans in the conformer features. # Filter=out would keep the samples with no NaNs anywhere in the conformer # features. filter_in_or_out_samples_with_nans_in_conformers=( config_dict.placeholder(str)), cached_conformers_file=config_dict.placeholder(str)) model_config = dict( mlp_hidden_size=512, mlp_layers=2, latent_size=512, use_layer_norm=False, num_message_passing_steps=32, shared_message_passing_weights=False, mask_padding_graph_at_every_step=True, loss_config_name=loss_config_name, loss_kwargs=loss_kwargs, processor_mode='resnet', global_reducer='sum', node_reducer='sum', dropedge_rate=0.1, dropnode_rate=0.1, aux_multiplier=0.1, add_relative_distance=True, add_relative_displacement=True, add_absolute_positions=False, position_normalization=2., relative_displacement_normalization=1., ignore_globals=False, ignore_globals_from_final_layer_for_predictions=True, ) if debug: # Make network smaller. model_config.update(dict( mlp_hidden_size=32, mlp_layers=1, latent_size=32, num_message_passing_steps=1)) config.experiment_kwargs = config_dict.ConfigDict( dict( config=dict( debug=debug, predictions_dir=config_dict.placeholder(str), ema=True, ema_decay=0.9999, sample_random=0.05, optimizer=dict( name='adam', optimizer_kwargs=dict(b1=.9, b2=.95), lr_schedule=dict( warmup_steps=int(5e4), decay_steps=int(5e5), init_value=1e-5, peak_value=1e-4, end_value=0., ), ), model=model_config, dataset_config=dataset_config, # As a rule of thumb, use the following statistics: # Avg. # nodes in graph: 16. # Avg. # edges in graph: 40. training=dict( dynamic_batch_size={ 'n_node': 256 if debug else 16 * training_batch_size, 'n_edge': 512 if debug else 40 * training_batch_size, 'n_graph': 2 if debug else training_batch_size, },), evaluation=dict( split='valid', dynamic_batch_size=dict( n_node=256 if debug else 16 * eval_batch_size, n_edge=512 if debug else 40 * eval_batch_size, n_graph=2 if debug else eval_batch_size, ))))) ## Training loop config. config.training_steps = int(5e6) config.checkpoint_dir = '/tmp/checkpoint/pcq/' config.train_checkpoint_all_hosts = False config.save_checkpoint_interval = 300 config.log_train_data_interval = 60 config.log_tensors_interval = 60 config.best_model_eval_metric = 'mae' config.best_model_eval_metric_higher_is_better = False return config
deepmind-research-master
ogb_lsc/pcq/config.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Download data required for training and evaluating models.""" import pathlib from absl import app from absl import flags from absl import logging from google.cloud import storage Path = pathlib.Path _BUCKET_NAME = 'deepmind-ogb-lsc' _MAX_DOWNLOAD_ATTEMPTS = 5 FLAGS = flags.FLAGS flags.DEFINE_enum('payload', None, ['data', 'models'], 'Download "data" or "models"?') flags.DEFINE_string('task_root', None, 'Local task root directory') # GCS_DATA_ROOT = Path('pcq/data') # GCS_MODEL_ROOT = Path('pcq/models') DATA_RELATIVE_PATHS = [ 'raw/data.csv.gz', 'preprocessed/smile_to_conformer.pkl', 'k_fold_splits' ] class DataCorruptionError(Exception): pass def _get_gcs_root(): return Path('pcq') / FLAGS.payload def _get_gcs_bucket(): storage_client = storage.Client.create_anonymous_client() return storage_client.bucket(_BUCKET_NAME) def _write_blob_to_destination(blob, task_root, ignore_existing=True): """Write the blob.""" logging.info("Copying blob: '%s'", blob.name) destination_path = Path(task_root) / Path(*Path(blob.name).parts[1:]) logging.info(" ... to: '%s'", str(destination_path)) if ignore_existing and destination_path.exists(): return destination_path.parent.mkdir(parents=True, exist_ok=True) checksum = 'crc32c' for attempt in range(_MAX_DOWNLOAD_ATTEMPTS): try: blob.download_to_filename(destination_path.as_posix(), checksum=checksum) except storage.client.resumable_media.common.DataCorruption: pass else: break else: raise DataCorruptionError(f"Checksum ('{checksum}') for {blob.name} failed " f'after {attempt + 1} attempts') def main(unused_argv): bucket = _get_gcs_bucket() if FLAGS.payload == 'data': relative_paths = DATA_RELATIVE_PATHS else: relative_paths = (None,) for relative_path in relative_paths: if relative_path is None: relative_path = str(_get_gcs_root()) else: relative_path = str(_get_gcs_root() / relative_path) logging.info("Copying relative path: '%s'", relative_path) blobs = bucket.list_blobs(prefix=relative_path) for blob in blobs: _write_blob_to_destination(blob, FLAGS.task_root) if __name__ == '__main__': flags.mark_flag_as_required('payload') flags.mark_flag_as_required('task_root') app.run(main)
deepmind-research-master
ogb_lsc/pcq/download_pcq.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PCQM4M-LSC datasets.""" import functools import pickle from typing import Dict, List, Tuple, Union import numpy as np from ogb import lsc NUM_VALID_SAMPLES = 380_670 NUM_TEST_SAMPLES = 377_423 NORMALIZE_TARGET_MEAN = 5.690944545356371 NORMALIZE_TARGET_STD = 1.1561347795107815 def load_splits() -> Dict[str, List[int]]: """Loads dataset splits.""" dataset = _get_pcq_dataset(only_smiles=True) return dataset.get_idx_split() def load_kth_fold_indices(data_root: str, k_fold_split_id: int) -> List[int]: """Loads k-th fold indices.""" fname = f"{data_root}/k_fold_splits/{k_fold_split_id}.pkl" return list(map(int, _load_pickle(fname))) def load_all_except_kth_fold_indices(data_root: str, k_fold_split_id: int, num_k_fold_splits: int) -> List[int]: """Loads indices except for the kth fold.""" if k_fold_split_id is None: raise ValueError("Expected integer value for `k_fold_split_id`.") indices = [] for index in range(num_k_fold_splits): if index != k_fold_split_id: indices += load_kth_fold_indices(data_root, index) return indices def load_smile_strings( with_labels=False) -> List[Union[str, Tuple[str, np.ndarray]]]: """Loads the smile strings in the PCQ dataset.""" dataset = _get_pcq_dataset(only_smiles=True) smiles = [] for i in range(len(dataset)): smile, label = dataset[i] if with_labels: smiles.append((smile, label)) else: smiles.append(smile) return smiles @functools.lru_cache() def load_cached_conformers(cached_fname: str) -> Dict[str, np.ndarray]: """Returns cached dict mapping smile strings to conformer features.""" return _load_pickle(cached_fname) @functools.lru_cache() def _get_pcq_dataset(only_smiles: bool): return lsc.PCQM4MDataset(only_smiles=only_smiles) def _load_pickle(fname: str): with open(fname, "rb") as f: return pickle.load(f)
deepmind-research-master
ogb_lsc/pcq/datasets.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Dataset utilities.""" from typing import List, Optional import jax import jraph from ml_collections import config_dict import numpy as np from ogb import utils from ogb.utils import features import tensorflow.compat.v2 as tf import tensorflow_datasets as tfds import tree # pylint: disable=g-bad-import-order # pytype: disable=import-error import batching_utils import conformer_utils import datasets def build_dataset_iterator( data_root: str, split: str, dynamic_batch_size_config: config_dict.ConfigDict, sample_random: float, cached_conformers_file: str, debug: bool = False, is_training: bool = True, augment_with_random_mirror_symmetry: bool = False, positions_noise_std: Optional[float] = None, k_fold_split_id: Optional[int] = None, num_k_fold_splits: Optional[int] = None, filter_in_or_out_samples_with_nans_in_conformers: Optional[str] = None, ): """Returns an iterator over Batches from the dataset.""" if debug: max_items_to_read_from_dataset = 10 prefetch_buffer_size = 1 shuffle_buffer_size = 1 else: max_items_to_read_from_dataset = -1 # < 0 means no limit. prefetch_buffer_size = 64 # It can take a while to fill the shuffle buffer with k fold splits. shuffle_buffer_size = 128 if k_fold_split_id is None else int(1e6) num_local_devices = jax.local_device_count() # Load all smile strings. indices, smiles, labels = _load_smiles( data_root, split, k_fold_split_id=k_fold_split_id, num_k_fold_splits=num_k_fold_splits) if debug: indices = indices[:100] smiles = smiles[:100] labels = labels[:100] # Generate all conformer features from smile strings ahead of time. # This gives us a boost from multi-parallelism as opposed to doing it # online. conformers = _load_conformers(indices, smiles, cached_conformers_file) data_generator = ( lambda: _get_pcq_graph_generator(indices, smiles, labels, conformers)) # Create a dataset yielding graphs from smile strings. example = next(data_generator()) signature_from_example = tree.map_structure(_numpy_to_tensor_spec, example) ds = tf.data.Dataset.from_generator( data_generator, output_signature=signature_from_example) ds = ds.take(max_items_to_read_from_dataset) ds = ds.cache() if is_training: ds = ds.shuffle(shuffle_buffer_size) # Apply transformations. def map_fn(graph, conformer_positions): graph = _maybe_one_hot_atoms_with_noise( graph, is_training=is_training, sample_random=sample_random) # Add conformer features. graph = _add_conformer_features( graph, conformer_positions, augment_with_random_mirror_symmetry=augment_with_random_mirror_symmetry, noise_std=positions_noise_std, is_training=is_training, ) return _downcast_ints(graph) ds = ds.map(map_fn, num_parallel_calls=tf.data.AUTOTUNE) if filter_in_or_out_samples_with_nans_in_conformers: if filter_in_or_out_samples_with_nans_in_conformers not in ("in", "out"): raise ValueError( "Unknown value specified for the argument " "`filter_in_or_out_samples_with_nans_in_conformers`: %s" % filter_in_or_out_samples_with_nans_in_conformers) filter_fn = _get_conformer_filter( with_nans=(filter_in_or_out_samples_with_nans_in_conformers == "in")) ds = ds.filter(filter_fn) if is_training: ds = ds.shard(jax.process_count(), jax.process_index()) ds = ds.repeat() ds = ds.prefetch(prefetch_buffer_size) it = tfds.as_numpy(ds) # Dynamic batching. batched_gen = batching_utils.dynamically_batch( it, n_node=dynamic_batch_size_config.n_node + 1, n_edge=dynamic_batch_size_config.n_edge, n_graph=dynamic_batch_size_config.n_graph + 1, ) if is_training: # Stack `num_local_devices` of batches together for pmap updates. batch_size = num_local_devices def _batch(l): assert l return tree.map_structure(lambda *l: np.stack(l, axis=0), *l) def batcher_fn(): batch = [] for sample in batched_gen: batch.append(sample) if len(batch) == batch_size: yield _batch(batch) batch = [] if batch: yield _batch(batch) for sample in batcher_fn(): yield sample else: for sample in batched_gen: yield sample def _get_conformer_filter(with_nans: bool): """Selects a conformer filter to apply. Args: with_nans: Filter only selects samples with NaNs in conformer features. Else, selects samples without any NaNs in conformer features. Returns: A function that can be used with tf.data.Dataset.filter(). Raises: ValueError: If the input graph to the filter has no conformer features to filter. """ def _filter(graph: jraph.GraphsTuple) -> tf.Tensor: if ("positions" not in graph.nodes) or ( "positions_targets" not in graph.nodes) or ( "positions_nan_mask" not in graph.globals): raise ValueError("Conformer features not available to filter.") any_nan = tf.logical_not(tf.squeeze(graph.globals["positions_nan_mask"])) return any_nan if with_nans else tf.logical_not(any_nan) return _filter def _numpy_to_tensor_spec(arr: np.ndarray) -> tf.TensorSpec: if not isinstance(arr, np.ndarray): return tf.TensorSpec([], dtype=tf.int32 if isinstance(arr, int) else tf.float32) elif arr.shape: return tf.TensorSpec((None,) + arr.shape[1:], arr.dtype) else: return tf.TensorSpec([], arr.dtype) def _sample_uniform_categorical(num: int, size: int) -> tf.Tensor: return tf.random.categorical(tf.math.log([[1 / size] * size]), num)[0] @jax.curry(jax.tree_map) def _downcast_ints(x): if x.dtype == tf.int64: return tf.cast(x, tf.int32) return x def _one_hot_atoms(atoms: tf.Tensor) -> tf.Tensor: vocab_sizes = features.get_atom_feature_dims() one_hots = [] for i in range(atoms.shape[1]): one_hots.append(tf.one_hot(atoms[:, i], vocab_sizes[i], dtype=tf.float32)) return tf.concat(one_hots, axis=-1) def _sample_one_hot_atoms(atoms: tf.Tensor) -> tf.Tensor: vocab_sizes = features.get_atom_feature_dims() one_hots = [] num_atoms = tf.shape(atoms)[0] for i in range(atoms.shape[1]): sampled_category = _sample_uniform_categorical(num_atoms, vocab_sizes[i]) one_hots.append( tf.one_hot(sampled_category, vocab_sizes[i], dtype=tf.float32)) return tf.concat(one_hots, axis=-1) def _one_hot_bonds(bonds: tf.Tensor) -> tf.Tensor: vocab_sizes = features.get_bond_feature_dims() one_hots = [] for i in range(bonds.shape[1]): one_hots.append(tf.one_hot(bonds[:, i], vocab_sizes[i], dtype=tf.float32)) return tf.concat(one_hots, axis=-1) def _sample_one_hot_bonds(bonds: tf.Tensor) -> tf.Tensor: vocab_sizes = features.get_bond_feature_dims() one_hots = [] num_bonds = tf.shape(bonds)[0] for i in range(bonds.shape[1]): sampled_category = _sample_uniform_categorical(num_bonds, vocab_sizes[i]) one_hots.append( tf.one_hot(sampled_category, vocab_sizes[i], dtype=tf.float32)) return tf.concat(one_hots, axis=-1) def _maybe_one_hot_atoms_with_noise( x, is_training: bool, sample_random: float, ): """One hot atoms with noise.""" gt_nodes = _one_hot_atoms(x.nodes) gt_edges = _one_hot_bonds(x.edges) if is_training: num_nodes = tf.shape(x.nodes)[0] sample_node_or_not = tf.random.uniform([num_nodes], maxval=1) < sample_random nodes = tf.where( tf.expand_dims(sample_node_or_not, axis=-1), _sample_one_hot_atoms(x.nodes), gt_nodes) num_edges = tf.shape(x.edges)[0] sample_edges_or_not = tf.random.uniform([num_edges], maxval=1) < sample_random edges = tf.where( tf.expand_dims(sample_edges_or_not, axis=-1), _sample_one_hot_bonds(x.edges), gt_edges) else: nodes = gt_nodes edges = gt_edges return x._replace( nodes={ "atom_one_hots_targets": gt_nodes, "atom_one_hots": nodes, }, edges={ "bond_one_hots_targets": gt_edges, "bond_one_hots": edges }) def _load_smiles( data_root: str, split: str, k_fold_split_id: int, num_k_fold_splits: int, ): """Loads smiles trings for the input split.""" if split == "test" or k_fold_split_id is None: indices = datasets.load_splits()[split] elif split == "train": indices = datasets.load_all_except_kth_fold_indices( data_root, k_fold_split_id, num_k_fold_splits) else: assert split == "valid" indices = datasets.load_kth_fold_indices(data_root, k_fold_split_id) smiles_and_labels = datasets.load_smile_strings(with_labels=True) smiles, labels = list(zip(*smiles_and_labels)) return indices, [smiles[i] for i in indices], [labels[i] for i in indices] def _convert_ogb_graph_to_graphs_tuple(ogb_graph): """Converts an OGB Graph to a GraphsTuple.""" senders = ogb_graph["edge_index"][0] receivers = ogb_graph["edge_index"][1] edges = ogb_graph["edge_feat"] nodes = ogb_graph["node_feat"] n_node = np.array([ogb_graph["num_nodes"]]) n_edge = np.array([len(senders)]) graph = jraph.GraphsTuple( nodes=nodes, edges=edges, senders=senders, receivers=receivers, n_node=n_node, n_edge=n_edge, globals=None) return tree.map_structure(lambda x: x if x is not None else np.array(0.), graph) def _load_conformers(indices: List[int], smiles: List[str], cached_conformers_file: str): """Loads conformers.""" smile_to_conformer = datasets.load_cached_conformers(cached_conformers_file) conformers = [] for graph_idx, smile in zip(indices, smiles): del graph_idx # Unused. if smile not in smile_to_conformer: raise KeyError("Cache did not have conformer entry for the smile %s" % str(smile)) conformers.append(dict(conformer=smile_to_conformer[smile])) return conformers def _add_conformer_features( graph, conformer_features, augment_with_random_mirror_symmetry: bool, noise_std: float, is_training: bool, ): """Adds conformer features.""" if not isinstance(graph.nodes, dict): raise ValueError("Expected a dict type for `graph.nodes`.") # Remove mean position to center around a canonical origin. positions = conformer_features["conformer"] # NaN's appear in ~0.13% of training, 0.104% of validation and 0.16% of test # nodes. # See this colab: http://shortn/_6UcuosxY7x. nan_mask = tf.reduce_any(tf.math.is_nan(positions)) positions = tf.where(nan_mask, tf.constant(0., positions.dtype), positions) positions -= tf.reduce_mean(positions, axis=0, keepdims=True) # Optionally augment with a random rotation. if is_training: rot_mat = conformer_utils.get_random_rotation_matrix( augment_with_random_mirror_symmetry) positions = conformer_utils.rotate(positions, rot_mat) positions_targets = positions # Optionally add noise to the positions. if noise_std and is_training: positions = tf.random.normal(tf.shape(positions), positions, noise_std) return graph._replace( nodes=dict( positions=positions, positions_targets=positions_targets, **graph.nodes), globals={ "positions_nan_mask": tf.expand_dims(tf.logical_not(nan_mask), axis=0), **(graph.globals if isinstance(graph.globals, dict) else {}) }) def _get_pcq_graph_generator(indices, smiles, labels, conformers): """Returns a generator to yield graph.""" for idx, smile, conformer_positions, label in zip(indices, smiles, conformers, labels): graph = utils.smiles2graph(smile) graph = _convert_ogb_graph_to_graphs_tuple(graph) graph = graph._replace( globals={ "target": np.array([label], dtype=np.float32), "graph_index": np.array([idx], dtype=np.int32), **(graph.globals if isinstance(graph.globals, dict) else {}) }) yield graph, conformer_positions
deepmind-research-master
ogb_lsc/pcq/dataset_utils.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PCQM4M-LSC models.""" import copy import functools from typing import Any, Dict, Mapping, Sequence, Tuple import chex import haiku as hk import jax import jax.numpy as jnp import jraph from ml_collections import config_dict _REDUCER_NAMES = { "sum": jax.ops.segment_sum, "mean": jraph.segment_mean, } _NUM_EDGE_FEATURES = 13 _NUM_NODE_FEATURES = 173 @chex.dataclass class RegressionLossConfig: """Regression Loss Config.""" # For normalization and denormalization. std: float mean: float kwargs: Mapping[str, Any] out_size: int = 1 def _sigmoid_cross_entropy( logits: jnp.DeviceArray, labels: jnp.DeviceArray, ) -> jnp.DeviceArray: log_p = jax.nn.log_sigmoid(logits) log_not_p = jax.nn.log_sigmoid(-logits) return -labels * log_p - (1. - labels) * log_not_p def _softmax_cross_entropy( logits: jnp.DeviceArray, targets: jnp.DeviceArray, ) -> jnp.DeviceArray: logits = jax.nn.log_softmax(logits) return -jnp.sum(targets * logits, axis=-1) def _regression_loss( pred: jnp.ndarray, targets: jnp.ndarray, exponent: int, ) -> jnp.ndarray: """Regression loss.""" error = pred - targets if exponent == 2: return error ** 2 elif exponent == 1: return jnp.abs(error) else: raise ValueError(f"Unsupported exponent value {exponent}.") def _build_mlp( name: str, output_sizes: Sequence[int], use_layer_norm=False, activation=jax.nn.relu, ): """Builds an MLP, optionally with layernorm.""" net = hk.nets.MLP( output_sizes=output_sizes, name=name + "_mlp", activation=activation) if use_layer_norm: layer_norm = hk.LayerNorm( axis=-1, create_scale=True, create_offset=True, name=name + "_layer_norm") net = hk.Sequential([net, layer_norm]) return jraph.concatenated_args(net) def _compute_relative_displacement_and_distance( graph: jraph.GraphsTuple, normalization_factor: float, use_target: bool, ) -> Tuple[jnp.ndarray, jnp.ndarray]: """Computes relative displacements and distances.""" if use_target: node_positions = graph.nodes["positions_targets"] else: node_positions = graph.nodes["positions"] relative_displacement = node_positions[ graph.receivers] - node_positions[graph.senders] # Note due to the random rotations in space, mean across all nodes across # all batches is guaranteed to be zero, and the standard deviation is # guaranteed to be the same for all 3 coordinates, so we only need to scale # by a single value. relative_displacement /= normalization_factor relative_distance = jnp.linalg.norm( relative_displacement, axis=-1, keepdims=True) return relative_displacement, relative_distance def _broadcast_global_to_nodes( global_feature: jnp.ndarray, graph: jraph.GraphsTuple, ) -> jnp.ndarray: graph_idx = jnp.arange(graph.n_node.shape[0]) sum_n_node = jax.tree_leaves(graph.nodes)[0].shape[0] node_graph_idx = jnp.repeat( graph_idx, graph.n_node, axis=0, total_repeat_length=sum_n_node) return global_feature[node_graph_idx] def _broadcast_global_to_edges( global_feature: jnp.ndarray, graph: jraph.GraphsTuple, ) -> jnp.ndarray: graph_idx = jnp.arange(graph.n_edge.shape[0]) sum_n_edge = graph.senders.shape[0] edge_graph_idx = jnp.repeat( graph_idx, graph.n_edge, axis=0, total_repeat_length=sum_n_edge) return global_feature[edge_graph_idx] class GraphPropertyEncodeProcessDecode(hk.Module): """Encode-process-decode model for graph property prediction.""" def __init__( self, loss_config: config_dict.ConfigDict, mlp_hidden_size: int, mlp_layers: int, latent_size: int, use_layer_norm: bool, num_message_passing_steps: int, shared_message_passing_weights: bool, mask_padding_graph_at_every_step: bool, loss_config_name: str, loss_kwargs: config_dict.ConfigDict, processor_mode: str, global_reducer: str, node_reducer: str, dropedge_rate: float, dropnode_rate: float, aux_multiplier: float, ignore_globals: bool, ignore_globals_from_final_layer_for_predictions: bool, add_relative_distance: bool = False, add_relative_displacement: bool = False, add_absolute_positions: bool = False, position_normalization: float = 1., relative_displacement_normalization: float = 1., add_misc_node_features: bool = None, name="GraphPropertyEncodeProcessDecode", ): super(GraphPropertyEncodeProcessDecode, self).__init__() self._loss_config = loss_config self._config = config_dict.ConfigDict(dict( loss_config=loss_config, mlp_hidden_size=mlp_hidden_size, mlp_layers=mlp_layers, latent_size=latent_size, use_layer_norm=use_layer_norm, num_message_passing_steps=num_message_passing_steps, shared_message_passing_weights=shared_message_passing_weights, mask_padding_graph_at_every_step=mask_padding_graph_at_every_step, loss_config_name=loss_config_name, loss_kwargs=loss_kwargs, processor_mode=processor_mode, global_reducer=global_reducer, node_reducer=node_reducer, dropedge_rate=dropedge_rate, dropnode_rate=dropnode_rate, aux_multiplier=aux_multiplier, ignore_globals=ignore_globals, ignore_globals_from_final_layer_for_predictions=ignore_globals_from_final_layer_for_predictions, add_relative_distance=add_relative_distance, add_relative_displacement=add_relative_displacement, add_absolute_positions=add_absolute_positions, position_normalization=position_normalization, relative_displacement_normalization=relative_displacement_normalization, add_misc_node_features=add_misc_node_features, )) def __call__(self, graph: jraph.GraphsTuple) -> chex.ArrayTree: """Model inference step.""" out = self._forward(graph, is_training=False) if isinstance(self._loss_config, RegressionLossConfig): out["globals"] = out[ "globals"]*self._loss_config.std + self._loss_config.mean return out @hk.experimental.name_like("__call__") def get_loss( self, graph: jraph.GraphsTuple, is_training: bool = True, ) -> Tuple[jnp.ndarray, chex.ArrayTree]: """Model loss.""" scalars = get_utilization_scalars(graph) targets = copy.deepcopy(graph.globals["target"]) if len(targets.shape) == 1: targets = targets[:, None] del graph.globals["target"] target_mask = None if "target_mask" in graph.globals: target_mask = copy.deepcopy(graph.globals["target_mask"]) del graph.globals["target_mask"] out = self._forward(graph, is_training) if isinstance(self._loss_config, RegressionLossConfig): normalized_targets = ( (targets - self._loss_config.mean) / self._loss_config.std) per_graph_and_head_loss = _regression_loss( out["globals"], normalized_targets, **self._loss_config.kwargs) else: raise TypeError(type(self._loss_config)) # Mask out nans if target_mask is None: per_graph_and_head_loss = jnp.mean(per_graph_and_head_loss, axis=1) else: per_graph_and_head_loss = jnp.sum( per_graph_and_head_loss * target_mask, axis=1) per_graph_and_head_loss /= jnp.sum(target_mask + 1e-8, axis=1) g_mask = jraph.get_graph_padding_mask(graph) loss = _mean_with_mask(per_graph_and_head_loss, g_mask) scalars.update({"loss": loss}) if self._config.aux_multiplier > 0: atom_loss = self._get_node_auxiliary_loss( graph, out["atom_one_hots"], graph.nodes["atom_one_hots_targets"], is_regression=False) bond_loss = self._get_edge_auxiliary_loss( graph, out["bond_one_hots"], graph.edges["bond_one_hots_targets"], is_regression=False) loss += (atom_loss + bond_loss)*self._config.aux_multiplier scalars.update({"atom_loss": atom_loss, "bond_loss": bond_loss}) scaled_loss = loss / jax.device_count() scalars.update({"total_loss": loss}) return scaled_loss, scalars @hk.transparent def _prepare_features(self, graph: jraph.GraphsTuple) -> jraph.GraphsTuple: """Prepares features keys into flat node, edge and global features.""" # Collect edge features. edge_features_list = [graph.edges["bond_one_hots"]] if (self._config.add_relative_displacement or self._config.add_relative_distance): (relative_displacement, relative_distance ) = _compute_relative_displacement_and_distance( graph, self._config.relative_displacement_normalization, use_target=False) if self._config.add_relative_displacement: edge_features_list.append(relative_displacement) if self._config.add_relative_distance: edge_features_list.append(relative_distance) mask_at_edges = _broadcast_global_to_edges( graph.globals["positions_nan_mask"], graph) edge_features_list.append(mask_at_edges[:, None].astype(jnp.float32)) edge_features = jnp.concatenate(edge_features_list, axis=-1) # Collect node features node_features_list = [graph.nodes["atom_one_hots"]] if self._config.add_absolute_positions: node_features_list.append( graph.nodes["positions"] / self._config.position_normalization) mask_at_nodes = _broadcast_global_to_nodes( graph.globals["positions_nan_mask"], graph) node_features_list.append(mask_at_nodes[:, None].astype(jnp.float32)) node_features = jnp.concatenate(node_features_list, axis=-1) global_features = jnp.zeros((len(graph.n_node), self._config.latent_size)) chex.assert_tree_shape_prefix(global_features, (len(graph.n_node),)) return graph._replace( nodes=node_features, edges=edge_features, globals=global_features) @hk.transparent def _encoder( self, graph: jraph.GraphsTuple, is_training: bool, ) -> jraph.GraphsTuple: """Builds the encoder.""" del is_training # unused graph = self._prepare_features(graph) # Run encoders in all of the node, edge and global features. output_sizes = [self._config.mlp_hidden_size] * self._config.mlp_layers output_sizes += [self._config.latent_size] build_mlp = functools.partial( _build_mlp, output_sizes=output_sizes, use_layer_norm=self._config.use_layer_norm, ) gmf = jraph.GraphMapFeatures( embed_edge_fn=build_mlp("edge_encoder"), embed_node_fn=build_mlp("node_encoder"), embed_global_fn=None if self._config.ignore_globals else build_mlp("global_encoder"), ) return gmf(graph) @hk.transparent def _processor( self, graph: jraph.GraphsTuple, is_training: bool, ) -> jraph.GraphsTuple: """Builds the processor.""" output_sizes = [self._config.mlp_hidden_size] * self._config.mlp_layers output_sizes += [self._config.latent_size] build_mlp = functools.partial( _build_mlp, output_sizes=output_sizes, use_layer_norm=self._config.use_layer_norm, ) shared_weights = self._config.shared_message_passing_weights node_reducer = _REDUCER_NAMES[self._config.node_reducer] global_reducer = _REDUCER_NAMES[self._config.global_reducer] def dropout_if_training(fn, dropout_rate: float): def wrapped(*args): out = fn(*args) if is_training: mask = hk.dropout(hk.next_rng_key(), dropout_rate, jnp.ones([out.shape[0], 1])) out = out * mask return out return wrapped num_mps = self._config.num_message_passing_steps for step in range(num_mps): if step == 0 or not shared_weights: suffix = "shared" if shared_weights else step update_edge_fn = dropout_if_training( build_mlp(f"edge_processor_{suffix}"), dropout_rate=self._config.dropedge_rate) update_node_fn = dropout_if_training( build_mlp(f"node_processor_{suffix}"), dropout_rate=self._config.dropnode_rate) if self._config.ignore_globals: gnn = jraph.InteractionNetwork( update_edge_fn=update_edge_fn, update_node_fn=update_node_fn, aggregate_edges_for_nodes_fn=node_reducer) else: gnn = jraph.GraphNetwork( update_edge_fn=update_edge_fn, update_node_fn=update_node_fn, update_global_fn=build_mlp(f"global_processor_{suffix}"), aggregate_edges_for_nodes_fn=node_reducer, aggregate_nodes_for_globals_fn=global_reducer, aggregate_edges_for_globals_fn=global_reducer, ) mode = self._config.processor_mode if mode == "mlp": graph = gnn(graph) elif mode == "resnet": new_graph = gnn(graph) graph = graph._replace( nodes=graph.nodes + new_graph.nodes, edges=graph.edges + new_graph.edges, globals=graph.globals + new_graph.globals, ) else: raise ValueError(f"Unknown processor_mode `{mode}`") if self._config.mask_padding_graph_at_every_step: graph = _mask_out_padding_graph(graph) return graph @hk.transparent def _decoder( self, graph: jraph.GraphsTuple, input_graph: jraph.GraphsTuple, is_training: bool, ) -> chex.ArrayTree: """Builds the decoder.""" del is_training # unused. output_sizes = [self._config.mlp_hidden_size] * self._config.mlp_layers output_sizes += [self._loss_config.out_size] net = _build_mlp("regress_out", output_sizes, use_layer_norm=False) summed_nodes = _aggregate_nodes_to_globals(graph, graph.nodes) inputs_to_global_decoder = [summed_nodes] if not self._config.ignore_globals_from_final_layer_for_predictions: inputs_to_global_decoder.append(graph.globals) out = net(jnp.concatenate(inputs_to_global_decoder, axis=-1)) out_dict = {} out_dict["globals"] = out # Note "linear" names are for compatibility with pre-trained model names. out_dict["bond_one_hots"] = hk.Linear( _NUM_EDGE_FEATURES, name="linear")(graph.edges) out_dict["atom_one_hots"] = hk.Linear( _NUM_NODE_FEATURES, name="linear_1")(graph.nodes) return out_dict @hk.transparent def _forward(self, graph: jraph.GraphsTuple, is_training: bool): input_graph = jraph.GraphsTuple(*graph) with hk.experimental.name_scope("encoder_scope"): graph = self._encoder(graph, is_training) with hk.experimental.name_scope("processor_scope"): graph = self._processor(graph, is_training) with hk.experimental.name_scope("decoder_scope"): out = self._decoder(graph, input_graph, is_training) return out def _get_node_auxiliary_loss( self, graph, pred, targets, is_regression, additional_mask=None): loss = self._get_loss(pred, targets, is_regression) target_mask = jraph.get_node_padding_mask(graph) if additional_mask is not None: loss *= additional_mask target_mask = jnp.logical_and(target_mask, additional_mask) return _mean_with_mask(loss, target_mask) def _get_edge_auxiliary_loss( self, graph, pred, targets, is_regression, additional_mask=None): loss = self._get_loss(pred, targets, is_regression) target_mask = jraph.get_edge_padding_mask(graph) if additional_mask is not None: loss *= additional_mask target_mask = jnp.logical_and(target_mask, additional_mask) return _mean_with_mask(loss, target_mask) def _get_loss(self, pred, targets, is_regression): if is_regression: loss = ((pred - targets)**2).mean(axis=-1) else: targets /= jnp.maximum(1., jnp.sum(targets, axis=-1, keepdims=True)) loss = _softmax_cross_entropy(pred, targets) return loss def get_utilization_scalars( padded_graph: jraph.GraphsTuple) -> Dict[str, float]: padding_nodes = jraph.get_number_of_padding_with_graphs_nodes(padded_graph) all_nodes = len(jax.tree_leaves(padded_graph.nodes)[0]) padding_edges = jraph.get_number_of_padding_with_graphs_edges(padded_graph) all_edges = len(jax.tree_leaves(padded_graph.edges)[0]) padding_graphs = jraph.get_number_of_padding_with_graphs_graphs(padded_graph) all_graphs = len(padded_graph.n_node) return {"node_utilization": 1 - (padding_nodes / all_nodes), "edge_utilization": 1 - (padding_edges / all_edges), "graph_utilization": 1 - (padding_graphs / all_graphs)} def sum_with_mask(array: jnp.ndarray, mask: jnp.ndarray) -> jnp.ndarray: return (mask * array).sum(0) def _mean_with_mask(array: jnp.ndarray, mask: jnp.ndarray) -> jnp.ndarray: num_valid_rows = mask.sum(0) return sum_with_mask(array, mask) / num_valid_rows def _mask_out_padding_graph( padded_graph: jraph.GraphsTuple) -> jraph.GraphsTuple: return padded_graph._replace( nodes=jnp.where( jraph.get_node_padding_mask( padded_graph)[:, None], padded_graph.nodes, 0.), edges=jnp.where( jraph.get_edge_padding_mask( padded_graph)[:, None], padded_graph.edges, 0.), globals=jnp.where( jraph.get_graph_padding_mask( padded_graph)[:, None], padded_graph.globals, 0.), ) def _aggregate_nodes_to_globals(graph, node_features): n_graph = graph.n_node.shape[0] sum_n_node = jax.tree_leaves(graph.nodes)[0].shape[0] graph_idx = jnp.arange(n_graph) node_gr_idx = jnp.repeat( graph_idx, graph.n_node, axis=0, total_repeat_length=sum_n_node) return jax.ops.segment_sum(node_features, node_gr_idx, num_segments=n_graph)
deepmind-research-master
ogb_lsc/pcq/model.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PCQM4M-LSC Jaxline experiment.""" import datetime import functools import os import signal import threading from typing import Iterable, Mapping, NamedTuple, Tuple from absl import app from absl import flags from absl import logging import chex import dill import haiku as hk import jax from jax.config import config as jax_config import jax.numpy as jnp from jaxline import experiment from jaxline import platform from jaxline import utils import jraph import numpy as np import optax import tensorflow as tf import tree # pylint: disable=g-bad-import-order import dataset_utils import datasets import model FLAGS = flags.FLAGS def _get_step_date_label(global_step: int): # Date removing microseconds. date_str = datetime.datetime.now().isoformat().split('.')[0] return f'step_{global_step}_{date_str}' class _Predictions(NamedTuple): predictions: np.ndarray indices: np.ndarray def tf1_ema(ema_value, current_value, decay, step): """Implements EMA with TF1-style decay warmup.""" decay = jnp.minimum(decay, (1.0 + step) / (10.0 + step)) return ema_value * decay + current_value * (1 - decay) def _sort_predictions_by_indices(predictions: _Predictions): sorted_order = np.argsort(predictions.indices) return _Predictions( predictions=predictions.predictions[sorted_order], indices=predictions.indices[sorted_order]) class Experiment(experiment.AbstractExperiment): """OGB Graph Property Prediction GraphNet experiment.""" CHECKPOINT_ATTRS = { '_params': 'params', '_opt_state': 'opt_state', '_network_state': 'network_state', '_ema_network_state': 'ema_network_state', '_ema_params': 'ema_params', } def __init__(self, mode, init_rng, config): """Initializes experiment.""" super(Experiment, self).__init__(mode=mode, init_rng=init_rng) if mode not in ('train', 'eval', 'train_eval_multithreaded'): raise ValueError(f'Invalid mode {mode}.') # Do not use accelerators in data pipeline. tf.config.experimental.set_visible_devices([], device_type='GPU') tf.config.experimental.set_visible_devices([], device_type='TPU') self.mode = mode self.init_rng = init_rng self.config = config self.loss = None self.forward = None # Needed for checkpoint restore. self._params = None self._network_state = None self._opt_state = None self._ema_network_state = None self._ema_params = None # _ _ # | |_ _ __ __ _(_)_ __ # | __| "__/ _` | | "_ \ # | |_| | | (_| | | | | | # \__|_| \__,_|_|_| |_| # def step(self, global_step: jnp.ndarray, rng: jnp.ndarray, **unused_args): """See Jaxline base class.""" if self.loss is None: self._train_init() graph = next(self._train_input) out = self.update_parameters( self._params, self._ema_params, self._network_state, self._ema_network_state, self._opt_state, global_step, rng, graph._asdict()) (self._params, self._ema_params, self._network_state, self._ema_network_state, self._opt_state, scalars) = out return utils.get_first(scalars) def _construct_loss_config(self): loss_config = getattr(model, self.config.model.loss_config_name) if self.config.model.loss_config_name == 'RegressionLossConfig': return loss_config( mean=datasets.NORMALIZE_TARGET_MEAN, std=datasets.NORMALIZE_TARGET_STD, kwargs=self.config.model.loss_kwargs) else: raise ValueError('Unknown Loss Config') def _train_init(self): self.loss = hk.transform_with_state(self._loss) self._train_input = utils.py_prefetch( lambda: self._build_numpy_dataset_iterator('train', is_training=True)) init_stacked_graphs = next(self._train_input) init_key = utils.bcast_local_devices(self.init_rng) p_init = jax.pmap(self.loss.init) self._params, self._network_state = p_init(init_key, **init_stacked_graphs._asdict()) # Learning rate scheduling. lr_schedule = optax.warmup_cosine_decay_schedule( **self.config.optimizer.lr_schedule) self.optimizer = getattr(optax, self.config.optimizer.name)( learning_rate=lr_schedule, **self.config.optimizer.optimizer_kwargs) self._opt_state = jax.pmap(self.optimizer.init)(self._params) self.update_parameters = jax.pmap(self._update_parameters, axis_name='i') if self.config.ema: self._ema_params = self._params self._ema_network_state = self._network_state def _loss( self, **graph: Mapping[str, chex.ArrayTree]) -> chex.ArrayTree: graph = jraph.GraphsTuple(**graph) model_instance = model.GraphPropertyEncodeProcessDecode( loss_config=self._construct_loss_config(), **self.config.model) loss, scalars = model_instance.get_loss(graph) return loss, scalars def _maybe_save_predictions( self, predictions: jnp.ndarray, split: str, global_step: jnp.ndarray, ): if not self.config.predictions_dir: return output_dir = os.path.join(self.config.predictions_dir, _get_step_date_label(global_step)) os.makedirs(output_dir, exist_ok=True) output_path = os.path.join(output_dir, split + '.dill') with open(output_path, 'wb') as f: dill.dump(predictions, f) logging.info('Saved %s predictions at: %s', split, output_path) def _build_numpy_dataset_iterator(self, split: str, is_training: bool): dynamic_batch_size_config = ( self.config.training.dynamic_batch_size if is_training else self.config.evaluation.dynamic_batch_size) return dataset_utils.build_dataset_iterator( split=split, dynamic_batch_size_config=dynamic_batch_size_config, sample_random=self.config.sample_random, debug=self.config.debug, is_training=is_training, **self.config.dataset_config) def _update_parameters( self, params: hk.Params, ema_params: hk.Params, network_state: hk.State, ema_network_state: hk.State, opt_state: optax.OptState, global_step: jnp.ndarray, rng: jnp.ndarray, graph: jraph.GraphsTuple, ) -> Tuple[hk.Params, hk.Params, hk.State, hk.State, optax.OptState, chex.ArrayTree]: """Updates parameters.""" def get_loss(*x, **graph): (loss, scalars), network_state = self.loss.apply(*x, **graph) return loss, (scalars, network_state) grad_loss_fn = jax.grad(get_loss, has_aux=True) scaled_grads, (scalars, network_state) = grad_loss_fn( params, network_state, rng, **graph) grads = jax.lax.psum(scaled_grads, axis_name='i') updates, opt_state = self.optimizer.update(grads, opt_state, params) params = optax.apply_updates(params, updates) if ema_params is not None: ema = lambda x, y: tf1_ema(x, y, self.config.ema_decay, global_step) ema_params = jax.tree_multimap(ema, ema_params, params) ema_network_state = jax.tree_multimap(ema, ema_network_state, network_state) return params, ema_params, network_state, ema_network_state, opt_state, scalars # _ # _____ ____ _| | # / _ \ \ / / _` | | # | __/\ V / (_| | | # \___| \_/ \__,_|_| # def evaluate(self, global_step: jnp.ndarray, rng: jnp.ndarray, **unused_kwargs) -> chex.ArrayTree: """See Jaxline base class.""" if self.forward is None: self._eval_init() if self.config.ema: params = utils.get_first(self._ema_params) state = utils.get_first(self._ema_network_state) else: params = utils.get_first(self._params) state = utils.get_first(self._network_state) rng = utils.get_first(rng) split = self.config.evaluation.split predictions, scalars = self._get_predictions( params, state, rng, utils.py_prefetch( functools.partial( self._build_numpy_dataset_iterator, split, is_training=False))) self._maybe_save_predictions(predictions, split, global_step[0]) return scalars def _sum_regression_scalars(self, preds: jnp.ndarray, graph: jraph.GraphsTuple) -> chex.ArrayTree: """Creates unnormalised values for accumulation.""" targets = graph.globals['target'] graph_mask = jraph.get_graph_padding_mask(graph) # Sum for accumulation, normalise later since there are a # variable number of graphs per batch. mae = model.sum_with_mask(jnp.abs(targets - preds), graph_mask) mse = model.sum_with_mask((targets - preds)**2, graph_mask) count = jnp.sum(graph_mask) return {'values': {'mae': mae.item(), 'mse': mse.item()}, 'counts': {'mae': count.item(), 'mse': count.item()}} def _get_prediction( self, params: hk.Params, state: hk.State, rng: jnp.ndarray, graph: jraph.GraphsTuple, ) -> np.ndarray: """Returns predictions for all the graphs in the dataset split.""" model_out, _ = self.eval_apply(params, state, rng, **graph._asdict()) prediction = np.squeeze(model_out['globals'], axis=1) return prediction def _get_predictions( self, params: hk.Params, state: hk.State, rng: jnp.ndarray, graph_iterator: Iterable[jraph.GraphsTuple], ) -> Tuple[_Predictions, chex.ArrayTree]: all_scalars = [] predictions = [] graph_indices = [] for i, graph in enumerate(graph_iterator): prediction = self._get_prediction(params, state, rng, graph) if 'target' in graph.globals and not jnp.isnan( graph.globals['target']).any(): scalars = self._sum_regression_scalars(prediction, graph) all_scalars.append(scalars) num_padding_graphs = jraph.get_number_of_padding_with_graphs_graphs(graph) num_valid_graphs = len(graph.n_node) - num_padding_graphs depadded_prediction = prediction[:num_valid_graphs] predictions.append(depadded_prediction) graph_indices.append(graph.globals['graph_index'][:num_valid_graphs]) if i % 1000 == 0: logging.info('Generated predictions for %d batches so far', i + 1) predictions = _sort_predictions_by_indices( _Predictions( predictions=np.concatenate(predictions), indices=np.concatenate(graph_indices))) if all_scalars: sum_all_args = lambda *l: sum(l) # Sum over graphs in the dataset. accum_scalars = tree.map_structure(sum_all_args, *all_scalars) scalars = tree.map_structure(lambda x, y: x / y, accum_scalars['values'], accum_scalars['counts']) else: scalars = {} return predictions, scalars def _eval_init(self): self.forward = hk.transform_with_state(self._forward) self.eval_apply = jax.jit(self.forward.apply) def _forward(self, **graph: Mapping[str, chex.ArrayTree]) -> chex.ArrayTree: graph = jraph.GraphsTuple(**graph) model_instance = model.GraphPropertyEncodeProcessDecode( loss_config=self._construct_loss_config(), **self.config.model) return model_instance(graph) def _restore_state_to_in_memory_checkpointer(restore_path): """Initializes experiment state from a checkpoint.""" # Load pretrained experiment state. python_state_path = os.path.join(restore_path, 'checkpoint.dill') with open(python_state_path, 'rb') as f: pretrained_state = dill.load(f) logging.info('Restored checkpoint from %s', python_state_path) # Assign state to a dummy experiment instance for the in-memory checkpointer, # broadcasting to devices. dummy_experiment = Experiment( mode='train', init_rng=0, config=FLAGS.config.experiment_kwargs.config) for attribute, key in Experiment.CHECKPOINT_ATTRS.items(): setattr(dummy_experiment, attribute, utils.bcast_local_devices(pretrained_state[key])) jaxline_state = dict( global_step=pretrained_state['global_step'], experiment_module=dummy_experiment) snapshot = utils.SnapshotNT(0, jaxline_state) # Finally, seed the jaxline `utils.InMemoryCheckpointer` global dict. utils.GLOBAL_CHECKPOINT_DICT['latest'] = utils.CheckpointNT( threading.local(), [snapshot]) def _save_state_from_in_memory_checkpointer( save_path, experiment_class: experiment.AbstractExperiment): """Saves experiment state to a checkpoint.""" logging.info('Saving model.') for checkpoint_name, checkpoint in utils.GLOBAL_CHECKPOINT_DICT.items(): if not checkpoint.history: logging.info('Nothing to save in "%s"', checkpoint_name) continue pickle_nest = checkpoint.history[-1].pickle_nest global_step = pickle_nest['global_step'] state_dict = {'global_step': global_step} for attribute, key in experiment_class.CHECKPOINT_ATTRS.items(): state_dict[key] = utils.get_first( getattr(pickle_nest['experiment_module'], attribute)) save_dir = os.path.join( save_path, checkpoint_name, _get_step_date_label(global_step)) python_state_path = os.path.join(save_dir, 'checkpoint.dill') os.makedirs(save_dir, exist_ok=True) with open(python_state_path, 'wb') as f: dill.dump(state_dict, f) logging.info( 'Saved "%s" checkpoint to %s', checkpoint_name, python_state_path) def _setup_signals(save_model_fn): """Sets up a signal for model saving.""" # Save a model on Ctrl+C. def sigint_handler(unused_sig, unused_frame): # Ideally, rather than saving immediately, we would then "wait" for a good # time to save. In practice this reads from an in-memory checkpoint that # only saves every 30 seconds or so, so chances of race conditions are very # small. save_model_fn() logging.info(r'Use `Ctrl+\` to save and exit.') # Exit on `Ctrl+\`, saving a model. prev_sigquit_handler = signal.getsignal(signal.SIGQUIT) def sigquit_handler(unused_sig, unused_frame): # Restore previous handler early, just in case something goes wrong in the # next lines, so it is possible to press again and exit. signal.signal(signal.SIGQUIT, prev_sigquit_handler) save_model_fn() logging.info(r'Exiting on `Ctrl+\`') # Re-raise for clean exit. os.kill(os.getpid(), signal.SIGQUIT) signal.signal(signal.SIGINT, sigint_handler) signal.signal(signal.SIGQUIT, sigquit_handler) def main(argv, experiment_class: experiment.AbstractExperiment): # Maybe restore a model. restore_path = FLAGS.config.restore_path if restore_path: _restore_state_to_in_memory_checkpointer(restore_path) # Maybe save a model. save_dir = os.path.join(FLAGS.config.checkpoint_dir, 'models') if FLAGS.config.one_off_evaluate: save_model_fn = lambda: None # No need to save checkpoint in this case. else: save_model_fn = functools.partial( _save_state_from_in_memory_checkpointer, save_dir, experiment_class) _setup_signals(save_model_fn) # Save on Ctrl+C (continue) or Ctrl+\ (exit). try: platform.main(experiment_class, argv) finally: save_model_fn() # Save at the end of training or in case of exception. if __name__ == '__main__': jax_config.update('jax_debug_nans', False) flags.mark_flag_as_required('config') app.run(lambda argv: main(argv, Experiment))
deepmind-research-master
ogb_lsc/pcq/experiment.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Dynamic batching utilities.""" from typing import Generator, Iterator, Sequence, Tuple import jax.tree_util as tree import jraph import numpy as np _NUMBER_FIELDS = ("n_node", "n_edge", "n_graph") def dynamically_batch(graphs_tuple_iterator: Iterator[jraph.GraphsTuple], n_node: int, n_edge: int, n_graph: int) -> Generator[jraph.GraphsTuple, None, None]: """Dynamically batches trees with `jraph.GraphsTuples` to `graph_batch_size`. Elements of the `graphs_tuple_iterator` will be incrementally added to a batch until the limits defined by `n_node`, `n_edge` and `n_graph` are reached. This means each element yielded by this generator For situations where you have variable sized data, it"s useful to be able to have variable sized batches. This is especially the case if you have a loss defined on the variable shaped element (for example, nodes in a graph). Args: graphs_tuple_iterator: An iterator of `jraph.GraphsTuples`. n_node: The maximum number of nodes in a batch. n_edge: The maximum number of edges in a batch. n_graph: The maximum number of graphs in a batch. Yields: A `jraph.GraphsTuple` batch of graphs. Raises: ValueError: if the number of graphs is < 2. RuntimeError: if the `graphs_tuple_iterator` contains elements which are not `jraph.GraphsTuple`s. RuntimeError: if a graph is found which is larger than the batch size. """ if n_graph < 2: raise ValueError("The number of graphs in a batch size must be greater or " f"equal to `2` for padding with graphs, got {n_graph}.") valid_batch_size = (n_node - 1, n_edge, n_graph - 1) accumulated_graphs = [] num_accumulated_nodes = 0 num_accumulated_edges = 0 num_accumulated_graphs = 0 for element in graphs_tuple_iterator: element_nodes, element_edges, element_graphs = _get_graph_size(element) if _is_over_batch_size(element, valid_batch_size): graph_size = element_nodes, element_edges, element_graphs graph_size = {k: v for k, v in zip(_NUMBER_FIELDS, graph_size)} batch_size = {k: v for k, v in zip(_NUMBER_FIELDS, valid_batch_size)} raise RuntimeError("Found graph bigger than batch size. Valid Batch " f"Size: {batch_size}, Graph Size: {graph_size}") if not accumulated_graphs: # If this is the first element of the batch, set it and continue. accumulated_graphs = [element] num_accumulated_nodes = element_nodes num_accumulated_edges = element_edges num_accumulated_graphs = element_graphs continue else: # Otherwise check if there is space for the graph in the batch: if ((num_accumulated_graphs + element_graphs > n_graph - 1) or (num_accumulated_nodes + element_nodes > n_node - 1) or (num_accumulated_edges + element_edges > n_edge)): # If there is, add it to the batch batched_graph = _batch_np(accumulated_graphs) yield jraph.pad_with_graphs(batched_graph, n_node, n_edge, n_graph) accumulated_graphs = [element] num_accumulated_nodes = element_nodes num_accumulated_edges = element_edges num_accumulated_graphs = element_graphs else: # Otherwise, return the old batch and start a new batch. accumulated_graphs.append(element) num_accumulated_nodes += element_nodes num_accumulated_edges += element_edges num_accumulated_graphs += element_graphs # We may still have data in batched graph. if accumulated_graphs: batched_graph = _batch_np(accumulated_graphs) yield jraph.pad_with_graphs(batched_graph, n_node, n_edge, n_graph) def _batch_np(graphs: Sequence[jraph.GraphsTuple]) -> jraph.GraphsTuple: # Calculates offsets for sender and receiver arrays, caused by concatenating # the nodes arrays. offsets = np.cumsum(np.array([0] + [np.sum(g.n_node) for g in graphs[:-1]])) def _map_concat(nests): concat = lambda *args: np.concatenate(args) return tree.tree_multimap(concat, *nests) return jraph.GraphsTuple( n_node=np.concatenate([g.n_node for g in graphs]), n_edge=np.concatenate([g.n_edge for g in graphs]), nodes=_map_concat([g.nodes for g in graphs]), edges=_map_concat([g.edges for g in graphs]), globals=_map_concat([g.globals for g in graphs]), senders=np.concatenate([g.senders + o for g, o in zip(graphs, offsets)]), receivers=np.concatenate( [g.receivers + o for g, o in zip(graphs, offsets)])) def _get_graph_size(graph: jraph.GraphsTuple) -> Tuple[int, int, int]: n_node = np.sum(graph.n_node) n_edge = len(graph.senders) n_graph = len(graph.n_node) return n_node, n_edge, n_graph def _is_over_batch_size( graph: jraph.GraphsTuple, graph_batch_size: Tuple[int, int, int], ) -> bool: graph_size = _get_graph_size(graph) return any([x > y for x, y in zip(graph_size, graph_batch_size)])
deepmind-research-master
ogb_lsc/pcq/batching_utils.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generate conformer features to be used for training/predictions.""" import multiprocessing as mp import pickle from typing import List from absl import app from absl import flags import numpy as np # pylint: disable=g-bad-import-order import conformer_utils import datasets _SPLITS = flags.DEFINE_spaceseplist( 'splits', ['test'], 'Splits to compute conformer features for.') _OUTPUT_FILE = flags.DEFINE_string( 'output_file', None, required=True, help='Output file name to write the generated conformer features to.') _NUM_PROCS = flags.DEFINE_integer( 'num_parallel_procs', 64, 'Number of parallel processes to use for conformer generation.') def generate_conformer_features(smiles: List[str]) -> List[np.ndarray]: # Conformer generation is a CPU-bound task and hence can get a boost from # parallel processing. # To avoid GIL, we choose multiprocessing instead of the # simpler multi-threading option here for parallel computing. with mp.Pool(_NUM_PROCS.value) as pool: return list(pool.map(conformer_utils.compute_conformer, smiles)) def main(_): smiles = datasets.load_smile_strings(with_labels=False) indices = set() for split in _SPLITS.value: indices.update(datasets.load_splits()[split]) smiles = [smiles[i] for i in sorted(indices)] conformers = generate_conformer_features(smiles) smiles_to_conformers = dict(zip(smiles, conformers)) with open(_OUTPUT_FILE.value, 'wb') as f: pickle.dump(smiles_to_conformers, f) if __name__ == '__main__': app.run(main)
deepmind-research-master
ogb_lsc/pcq/generate_conformer_features.py
# Copyright 2021 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Conformer utilities.""" import copy from typing import List, Optional from absl import logging import numpy as np import rdkit from rdkit import Chem from rdkit.Chem import AllChem import tensorflow.compat.v2 as tf def generate_conformers( molecule: Chem.rdchem.Mol, max_num_conformers: int, *, random_seed: int = -1, prune_rms_thresh: float = -1.0, max_iter: int = -1, fallback_to_random: bool = False, ) -> Chem.rdchem.Mol: """Generates conformers for a given molecule. Args: molecule: molecular representation of the compound. max_num_conformers: maximum number of conformers to generate. If pruning is done, the returned number of conformers is not guaranteed to match max_num_conformers. random_seed: random seed to use for conformer generation. prune_rms_thresh: RMSD threshold which allows to prune conformers that are too similar. max_iter: Maximum number of iterations to perform when optimising MMFF force field. If set to <= 0, energy optimisation is not performed. fallback_to_random: if conformers cannot be obtained, use random coordinates to initialise. Returns: Copy of a `molecule` with added hydrogens. The returned molecule contains force field-optimised conformers. The number of conformers is guaranteed to be <= max_num_conformers. """ mol = copy.deepcopy(molecule) mol = Chem.AddHs(mol) mol = _embed_conformers( mol, max_num_conformers, random_seed, prune_rms_thresh, fallback_to_random, use_random=False) if max_iter > 0: mol_with_conformers = _minimize_by_mmff(mol, max_iter) if mol_with_conformers is None: mol_with_conformers = _minimize_by_uff(mol, max_iter) else: mol_with_conformers = mol # Aligns conformations in a molecule to each other using the first # conformation as the reference. AllChem.AlignMolConformers(mol_with_conformers) # We remove hydrogens to keep the number of atoms consistent with the graph # nodes. mol_with_conformers = Chem.RemoveHs(mol_with_conformers) return mol_with_conformers def atom_to_feature_vector( atom: rdkit.Chem.rdchem.Atom, conformer: Optional[np.ndarray] = None, ) -> List[float]: """Converts rdkit atom object to feature list of indices. Args: atom: rdkit atom object. conformer: Generated conformers. Returns -1 values if set to None. Returns: List containing positions (x, y, z) of each atom from the conformer. """ if conformer: pos = conformer.GetAtomPosition(atom.GetIdx()) return [pos.x, pos.y, pos.z] return [np.nan, np.nan, np.nan] def compute_conformer(smile: str, max_iter: int = -1) -> np.ndarray: """Computes conformer. Args: smile: Smile string. max_iter: Maximum number of iterations to perform when optimising MMFF force field. If set to <= 0, energy optimisation is not performed. Returns: A tuple containing index, fingerprint and conformer. Raises: RuntimeError: If unable to convert smile string to RDKit mol. """ mol = rdkit.Chem.MolFromSmiles(smile) if not mol: raise RuntimeError('Unable to convert smile to molecule: %s' % smile) conformer_failed = False try: mol = generate_conformers( mol, max_num_conformers=1, random_seed=45, prune_rms_thresh=0.01, max_iter=max_iter) except IOError as e: logging.exception('Failed to generate conformers for %s . IOError %s.', smile, e) conformer_failed = True except ValueError: logging.error('Failed to generate conformers for %s . ValueError', smile) conformer_failed = True except: # pylint: disable=bare-except logging.error('Failed to generate conformers for %s.', smile) conformer_failed = True atom_features_list = [] conformer = None if conformer_failed else list(mol.GetConformers())[0] for atom in mol.GetAtoms(): atom_features_list.append(atom_to_feature_vector(atom, conformer)) conformer_features = np.array(atom_features_list, dtype=np.float32) return conformer_features def get_random_rotation_matrix(include_mirror_symmetry: bool) -> tf.Tensor: """Returns a single random rotation matrix.""" rotation_matrix = _get_random_rotation_3d() if include_mirror_symmetry: random_mirror_symmetry = _get_random_mirror_symmetry() rotation_matrix = tf.matmul(rotation_matrix, random_mirror_symmetry) return rotation_matrix def rotate(vectors: tf.Tensor, rotation_matrix: tf.Tensor) -> tf.Tensor: """Batch of vectors on a single rotation matrix.""" return tf.matmul(vectors, rotation_matrix) def _embed_conformers( molecule: Chem.rdchem.Mol, max_num_conformers: int, random_seed: int, prune_rms_thresh: float, fallback_to_random: bool, *, use_random: bool = False, ) -> Chem.rdchem.Mol: """Embeds conformers into a copy of a molecule. If random coordinates allowed, tries not to use random coordinates at first, and uses random only if fails. Args: molecule: molecular representation of the compound. max_num_conformers: maximum number of conformers to generate. If pruning is done, the returned number of conformers is not guaranteed to match max_num_conformers. random_seed: random seed to use for conformer generation. prune_rms_thresh: RMSD threshold which allows to prune conformers that are too similar. fallback_to_random: if conformers cannot be obtained, use random coordinates to initialise. *: use_random: Use random coordinates. Shouldn't be set by any caller except this function itself. Returns: A copy of a molecule with embedded conformers. Raises: ValueError: if conformers cannot be obtained for a given molecule. """ mol = copy.deepcopy(molecule) # Obtains parameters for conformer generation. # In particular, ETKDG is experimental-torsion basic knowledge distance # geometry, which allows to randomly generate an initial conformation that # satisfies various geometric constraints such as lower and upper bounds on # the distances between atoms. params = AllChem.ETKDGv3() params.randomSeed = random_seed params.pruneRmsThresh = prune_rms_thresh params.numThreads = -1 params.useRandomCoords = use_random conf_ids = AllChem.EmbedMultipleConfs(mol, max_num_conformers, params) if not conf_ids: if not fallback_to_random or use_random: raise ValueError('Cant get conformers') return _embed_conformers( mol, max_num_conformers, random_seed, prune_rms_thresh, fallback_to_random, use_random=True) return mol def _minimize_by_mmff( molecule: Chem.rdchem.Mol, max_iter: int, ) -> Optional[Chem.rdchem.Mol]: """Minimizes forcefield for conformers using MMFF algorithm. Args: molecule: a datastructure containing conformers. max_iter: number of maximum iterations to use when optimising force field. Returns: A copy of a `molecule` containing optimised conformers; or None if MMFF cannot be performed. """ molecule_props = AllChem.MMFFGetMoleculeProperties(molecule) if molecule_props is None: return None mol = copy.deepcopy(molecule) for conf_id in range(mol.GetNumConformers()): ff = AllChem.MMFFGetMoleculeForceField( mol, molecule_props, confId=conf_id, ignoreInterfragInteractions=False) ff.Initialize() # minimises a conformer within a mol in place. ff.Minimize(max_iter) return mol def _minimize_by_uff( molecule: Chem.rdchem.Mol, max_iter: int, ) -> Chem.rdchem.Mol: """Minimizes forcefield for conformers using UFF algorithm. Args: molecule: a datastructure containing conformers. max_iter: number of maximum iterations to use when optimising force field. Returns: A copy of a `molecule` containing optimised conformers. """ mol = copy.deepcopy(molecule) conf_ids = range(mol.GetNumConformers()) for conf_id in conf_ids: ff = AllChem.UFFGetMoleculeForceField(mol, confId=conf_id) ff.Initialize() # minimises a conformer within a mol in place. ff.Minimize(max_iter) return mol def _get_symmetry_rotation_matrix(sign: tf.Tensor) -> tf.Tensor: """Returns the 2d/3d matrix for mirror symmetry.""" zero = tf.zeros_like(sign) one = tf.ones_like(sign) # pylint: disable=bad-whitespace,bad-continuation rot = [sign, zero, zero, zero, one, zero, zero, zero, one] # pylint: enable=bad-whitespace,bad-continuation shape = (3, 3) rot = tf.stack(rot, axis=-1) rot = tf.reshape(rot, shape) return rot def _quaternion_to_rotation_matrix(quaternion: tf.Tensor) -> tf.Tensor: """Converts a batch of quaternions to a batch of rotation matrices.""" q0 = quaternion[0] q1 = quaternion[1] q2 = quaternion[2] q3 = quaternion[3] r00 = 2 * (q0 * q0 + q1 * q1) - 1 r01 = 2 * (q1 * q2 - q0 * q3) r02 = 2 * (q1 * q3 + q0 * q2) r10 = 2 * (q1 * q2 + q0 * q3) r11 = 2 * (q0 * q0 + q2 * q2) - 1 r12 = 2 * (q2 * q3 - q0 * q1) r20 = 2 * (q1 * q3 - q0 * q2) r21 = 2 * (q2 * q3 + q0 * q1) r22 = 2 * (q0 * q0 + q3 * q3) - 1 matrix = tf.stack([r00, r01, r02, r10, r11, r12, r20, r21, r22], axis=-1) return tf.reshape(matrix, [3, 3]) def _get_random_rotation_3d() -> tf.Tensor: random_quaternions = tf.random.normal( shape=[4], dtype=tf.float32) random_quaternions /= tf.linalg.norm( random_quaternions, axis=-1, keepdims=True) return _quaternion_to_rotation_matrix(random_quaternions) def _get_random_mirror_symmetry() -> tf.Tensor: random_0_1 = tf.random.uniform( shape=(), minval=0, maxval=2, dtype=tf.int32) random_signs = tf.cast((2 * random_0_1) - 1, tf.float32) return _get_symmetry_rotation_matrix(random_signs)
deepmind-research-master
ogb_lsc/pcq/conformer_utils.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Smart module export/import utilities.""" import inspect import pickle import tensorflow.compat.v1 as tf from tensorflow.compat.v1.io import gfile import tensorflow_hub as hub import tree as nest import wrapt _ALLOWED_TYPES = (bool, float, int, str) def _getcallargs(signature, *args, **kwargs): bound_args = signature.bind(*args, **kwargs) bound_args.apply_defaults() inputs = bound_args.arguments inputs.pop("self", None) return inputs def _to_placeholder(arg): if arg is None or isinstance(arg, bool): return arg arg = tf.convert_to_tensor(arg) return tf.placeholder(dtype=arg.dtype, shape=arg.shape) class SmartModuleExport(object): """Helper class for exporting TF-Hub modules.""" def __init__(self, object_factory): self._object_factory = object_factory self._wrapped_object = self._object_factory() self._variable_scope = tf.get_variable_scope() self._captured_calls = {} self._captured_attrs = {} def _create_captured_method(self, method_name): """Creates a wrapped method that captures its inputs.""" with tf.variable_scope(self._variable_scope): method_ = getattr(self._wrapped_object, method_name) @wrapt.decorator def wrapper(method, instance, args, kwargs): """Wrapped method to capture inputs.""" del instance specs = inspect.signature(method) inputs = _getcallargs(specs, *args, **kwargs) with tf.variable_scope(self._variable_scope): output = method(*args, **kwargs) self._captured_calls[method_name] = [inputs, specs] return output return wrapper(method_) # pylint: disable=no-value-for-parameter def __getattr__(self, name): """Helper method for accessing an attributes of the wrapped object.""" # if "_wrapped_object" not in self.__dict__: # return super(ExportableModule, self).__getattr__(name) with tf.variable_scope(self._variable_scope): attr = getattr(self._wrapped_object, name) if inspect.ismethod(attr) or inspect.isfunction(attr): return self._create_captured_method(name) else: if all([isinstance(v, _ALLOWED_TYPES) for v in nest.flatten(attr)]): self._captured_attrs[name] = attr return attr def __call__(self, *args, **kwargs): return self._create_captured_method("__call__")(*args, **kwargs) def export(self, path, session, overwrite=False): """Build the TF-Hub spec, module and sync ops.""" method_specs = {} def module_fn(): """A module_fn for use with hub.create_module_spec().""" # We will use a copy of the original object to build the graph. wrapped_object = self._object_factory() for method_name, method_info in self._captured_calls.items(): captured_inputs, captured_specs = method_info tensor_inputs = nest.map_structure(_to_placeholder, captured_inputs) method_to_call = getattr(wrapped_object, method_name) tensor_outputs = method_to_call(**tensor_inputs) flat_tensor_inputs = nest.flatten(tensor_inputs) flat_tensor_inputs = { str(k): v for k, v in zip( range(len(flat_tensor_inputs)), flat_tensor_inputs) } flat_tensor_outputs = nest.flatten(tensor_outputs) flat_tensor_outputs = { str(k): v for k, v in zip( range(len(flat_tensor_outputs)), flat_tensor_outputs) } method_specs[method_name] = dict( specs=captured_specs, inputs=nest.map_structure(lambda _: None, tensor_inputs), outputs=nest.map_structure(lambda _: None, tensor_outputs)) signature_name = ("default" if method_name == "__call__" else method_name) hub.add_signature(signature_name, flat_tensor_inputs, flat_tensor_outputs) hub.attach_message( "methods", tf.train.BytesList(value=[pickle.dumps(method_specs)])) hub.attach_message( "properties", tf.train.BytesList(value=[pickle.dumps(self._captured_attrs)])) # Create the spec that will be later used in export. hub_spec = hub.create_module_spec(module_fn, drop_collections=["sonnet"]) # Get variables values module_weights = [ session.run(v) for v in self._wrapped_object.get_all_variables() ] # create the sync ops with tf.Graph().as_default(): hub_module = hub.Module(hub_spec, trainable=True, name="hub") assign_ops = [] assign_phs = [] for _, v in sorted(hub_module.variable_map.items()): ph = tf.placeholder(shape=v.shape, dtype=v.dtype) assign_phs.append(ph) assign_ops.append(tf.assign(v, ph)) with tf.Session() as module_session: module_session.run(tf.local_variables_initializer()) module_session.run(tf.global_variables_initializer()) module_session.run( assign_ops, feed_dict=dict(zip(assign_phs, module_weights))) if overwrite and gfile.exists(path): gfile.rmtree(path) gfile.makedirs(path) hub_module.export(path, module_session) class SmartModuleImport(object): """A class for importing graph building objects from TF-Hub modules.""" def __init__(self, module): self._module = module self._method_specs = pickle.loads( self._module.get_attached_message("methods", tf.train.BytesList).value[0]) self._properties = pickle.loads( self._module.get_attached_message("properties", tf.train.BytesList).value[0]) def _create_wrapped_method(self, method): """Creates a wrapped method that converts nested inputs and outputs.""" def wrapped_method(*args, **kwargs): """A wrapped method around a TF-Hub module signature.""" inputs = _getcallargs(self._method_specs[method]["specs"], *args, **kwargs) nest.assert_same_structure(self._method_specs[method]["inputs"], inputs) flat_inputs = nest.flatten(inputs) flat_inputs = { str(k): v for k, v in zip(range(len(flat_inputs)), flat_inputs) } signature = "default" if method == "__call__" else method flat_outputs = self._module( flat_inputs, signature=signature, as_dict=True) flat_outputs = [v for _, v in sorted(flat_outputs.items())] output_spec = self._method_specs[method]["outputs"] if output_spec is None: if len(flat_outputs) != 1: raise ValueError( "Expected output containing a single tensor, found {}".format( flat_outputs)) outputs = flat_outputs[0] else: outputs = nest.unflatten_as(output_spec, flat_outputs) return outputs return wrapped_method def __getattr__(self, name): if name in self._method_specs: return self._create_wrapped_method(name) if name in self._properties: return self._properties[name] return getattr(self._module, name) def __call__(self, *args, **kwargs): return self._create_wrapped_method("__call__")(*args, **kwargs)
deepmind-research-master
option_keyboard/smart_module.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Environment with keyboard.""" import itertools from absl import logging import dm_env import numpy as np import tensorflow.compat.v1 as tf import tensorflow_hub as hub import tree from option_keyboard import smart_module class EnvironmentWithLogging(dm_env.Environment): """Wraps an environment with additional logging.""" def __init__(self, env): self._env = env self._episode_return = 0 def reset(self): self._episode_return = 0 return self._env.reset() def step(self, action): """Take action in the environment and do some logging.""" step = self._env.step(action) if step.first(): step = self._env.step(action) self._episode_return = 0 self._episode_return += step.reward return step @property def episode_return(self): return self._episode_return def action_spec(self): return self._env.action_spec() def observation_spec(self): return self._env.observation_spec() def __getattr__(self, name): return getattr(self._env, name) class EnvironmentWithKeyboard(dm_env.Environment): """Wraps an environment with a keyboard.""" def __init__(self, env, keyboard, keyboard_ckpt_path, n_actions_per_dim, additional_discount, call_and_return=False): self._env = env self._keyboard = keyboard self._discount = additional_discount self._call_and_return = call_and_return options = _discretize_actions(n_actions_per_dim, keyboard.num_cumulants) self._options_np = options options = tf.convert_to_tensor(options, dtype=tf.float32) self._options = options obs_spec = self._extract_observation(env.observation_spec()) obs_ph = tf.placeholder(shape=obs_spec.shape, dtype=obs_spec.dtype) option_ph = tf.placeholder(shape=(), dtype=tf.int32) gpi_action = self._keyboard.gpi(obs_ph, options[option_ph]) session = tf.Session() self._gpi_action = session.make_callable(gpi_action, [obs_ph, option_ph]) self._keyboard_action = session.make_callable( self._keyboard(tf.expand_dims(obs_ph, axis=0))[0], [obs_ph]) session.run(tf.global_variables_initializer()) if keyboard_ckpt_path: saver = tf.train.Saver(var_list=keyboard.variables) saver.restore(session, keyboard_ckpt_path) def _compute_reward(self, option, obs): return np.sum(self._options_np[option] * obs["cumulants"]) def reset(self): return self._env.reset() def step(self, option): """Take a step in the keyboard, then the environment.""" step_count = 0 option_step = None while True: obs = self._extract_observation(self._env.observation()) action = self._gpi_action(obs, option) action_step = self._env.step(action) step_count += 1 if option_step is None: option_step = action_step else: new_discount = ( option_step.discount * self._discount * action_step.discount) new_reward = ( option_step.reward + new_discount * action_step.reward) option_step = option_step._replace( observation=action_step.observation, reward=new_reward, discount=new_discount, step_type=action_step.step_type) if action_step.last(): break # Terminate option. if self._should_terminate(option, action_step.observation): break if not self._call_and_return: break return option_step def _should_terminate(self, option, obs): if self._compute_reward(option, obs) > 0: return True elif np.all(self._options_np[option] <= 0): # TODO(shaobohou) A hack ensure option with non-positive weights # terminates after one step return True else: return False def action_spec(self): return dm_env.specs.DiscreteArray( num_values=self._options_np.shape[0], name="action") def _extract_observation(self, obs): return obs["arena"] def observation_spec(self): return self._env.observation_spec() def __getattr__(self, name): return getattr(self._env, name) class EnvironmentWithKeyboardDirect(dm_env.Environment): """Wraps an environment with a keyboard. This is different from EnvironmentWithKeyboard as the actions space is not discretized. TODO(shaobohou) Merge the two implementations. """ def __init__(self, env, keyboard, keyboard_ckpt_path, additional_discount, call_and_return=False): self._env = env self._keyboard = keyboard self._discount = additional_discount self._call_and_return = call_and_return obs_spec = self._extract_observation(env.observation_spec()) obs_ph = tf.placeholder(shape=obs_spec.shape, dtype=obs_spec.dtype) option_ph = tf.placeholder( shape=(keyboard.num_cumulants,), dtype=tf.float32) gpi_action = self._keyboard.gpi(obs_ph, option_ph) session = tf.Session() self._gpi_action = session.make_callable(gpi_action, [obs_ph, option_ph]) self._keyboard_action = session.make_callable( self._keyboard(tf.expand_dims(obs_ph, axis=0))[0], [obs_ph]) session.run(tf.global_variables_initializer()) if keyboard_ckpt_path: saver = tf.train.Saver(var_list=keyboard.variables) saver.restore(session, keyboard_ckpt_path) def _compute_reward(self, option, obs): assert option.shape == obs["cumulants"].shape return np.sum(option * obs["cumulants"]) def reset(self): return self._env.reset() def step(self, option): """Take a step in the keyboard, then the environment.""" step_count = 0 option_step = None while True: obs = self._extract_observation(self._env.observation()) action = self._gpi_action(obs, option) action_step = self._env.step(action) step_count += 1 if option_step is None: option_step = action_step else: new_discount = ( option_step.discount * self._discount * action_step.discount) new_reward = ( option_step.reward + new_discount * action_step.reward) option_step = option_step._replace( observation=action_step.observation, reward=new_reward, discount=new_discount, step_type=action_step.step_type) if action_step.last(): break # Terminate option. if self._should_terminate(option, action_step.observation): break if not self._call_and_return: break return option_step def _should_terminate(self, option, obs): if self._compute_reward(option, obs) > 0: return True elif np.all(option <= 0): # TODO(shaobohou) A hack ensure option with non-positive weights # terminates after one step return True else: return False def action_spec(self): return dm_env.specs.BoundedArray(shape=(self._keyboard.num_cumulants,), dtype=np.float32, minimum=-1.0, maximum=1.0, name="action") def _extract_observation(self, obs): return obs["arena"] def observation_spec(self): return self._env.observation_spec() def __getattr__(self, name): return getattr(self._env, name) def _discretize_actions(num_actions_per_dim, action_space_dim, min_val=-1.0, max_val=1.0): """Discrete action space.""" if num_actions_per_dim > 1: discretized_dim_action = np.linspace( min_val, max_val, num_actions_per_dim, endpoint=True) discretized_actions = [discretized_dim_action] * action_space_dim discretized_actions = itertools.product(*discretized_actions) discretized_actions = list(discretized_actions) elif num_actions_per_dim == 1: discretized_actions = [ max_val * np.eye(action_space_dim), min_val * np.eye(action_space_dim), ] discretized_actions = np.concatenate(discretized_actions, axis=0) elif num_actions_per_dim == 0: discretized_actions = np.eye(action_space_dim) else: raise ValueError( "Unsupported num_actions_per_dim {}".format(num_actions_per_dim)) discretized_actions = np.array(discretized_actions) # Remove options with all zeros. non_zero_entries = np.sum(np.square(discretized_actions), axis=-1) != 0.0 discretized_actions = discretized_actions[non_zero_entries] logging.info("Total number of discretized actions: %s", len(discretized_actions)) logging.info("Discretized actions: %s", discretized_actions) return discretized_actions class EnvironmentWithLearnedPhi(dm_env.Environment): """Wraps an environment with learned phi model.""" def __init__(self, env, model_path): self._env = env create_ph = lambda x: tf.placeholder(shape=x.shape, dtype=x.dtype) add_batch = lambda x: tf.expand_dims(x, axis=0) # Make session and callables. with tf.Graph().as_default(): model = smart_module.SmartModuleImport(hub.Module(model_path)) obs_spec = env.observation_spec() obs_ph = tree.map_structure(create_ph, obs_spec) action_ph = tf.placeholder(shape=(), dtype=tf.int32) phis = model(tree.map_structure(add_batch, obs_ph), add_batch(action_ph)) self.num_phis = phis.shape.as_list()[-1] self._last_phis = np.zeros((self.num_phis,), dtype=np.float32) session = tf.Session() self._session = session self._phis_fn = session.make_callable( phis[0], tree.flatten([obs_ph, action_ph])) self._session.run(tf.global_variables_initializer()) def reset(self): self._last_phis = np.zeros((self.num_phis,), dtype=np.float32) return self._env.reset() def step(self, action): """Take action in the environment and do some logging.""" phis = self._phis_fn(*tree.flatten([self._env.observation(), action])) step = self._env.step(action) if step.first(): phis = self._phis_fn(*tree.flatten([self._env.observation(), action])) step = self._env.step(action) step.observation["cumulants"] = phis self._last_phis = phis return step def action_spec(self): return self._env.action_spec() def observation(self): obs = self._env.observation() obs["cumulants"] = self._last_phis return obs def observation_spec(self): obs_spec = self._env.observation_spec() obs_spec["cumulants"] = dm_env.specs.BoundedArray( shape=(self.num_phis,), dtype=np.float32, minimum=-1e9, maximum=1e9, name="collected_resources") return obs_spec def __getattr__(self, name): return getattr(self._env, name)
deepmind-research-master
option_keyboard/environment_wrappers.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Environment configurations.""" def get_task_config(): return dict( arena_size=11, num_channels=2, max_num_steps=50, # 50 for the actual task. num_init_objects=10, object_priors=[0.5, 0.5], egocentric=True, rewarder="BalancedCollectionRewarder", ) def get_pretrain_config(): return dict( arena_size=11, num_channels=2, max_num_steps=40, # 40 for pretraining. num_init_objects=10, object_priors=[0.5, 0.5], egocentric=True, default_w=(1, 1), ) def get_fig4_task_config(): return dict( arena_size=11, num_channels=2, max_num_steps=50, # 50 for the actual task. num_init_objects=10, object_priors=[0.5, 0.5], egocentric=True, default_w=(1, -1), ) def get_fig5_task_config(default_w): return dict( arena_size=11, num_channels=2, max_num_steps=50, # 50 for the actual task. num_init_objects=10, object_priors=[0.5, 0.5], egocentric=True, default_w=default_w, )
deepmind-research-master
option_keyboard/configs.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Simple Scavenger environment.""" import copy import enum import sys import dm_env import numpy as np from option_keyboard import auto_reset_environment this_module = sys.modules[__name__] class Action(enum.IntEnum): """Actions available to the player.""" UP = 0 DOWN = 1 LEFT = 2 RIGHT = 3 def _one_hot(indices, depth): return np.eye(depth)[indices] def _random_pos(arena_size): return tuple(np.random.randint(0, arena_size, size=[2]).tolist()) class Scavenger(auto_reset_environment.Base): """Simple Scavenger.""" def __init__(self, arena_size, num_channels, max_num_steps, default_w=None, num_init_objects=15, object_priors=None, egocentric=True, rewarder=None, aux_tasks_w=None): self._arena_size = arena_size self._num_channels = num_channels self._max_num_steps = max_num_steps self._num_init_objects = num_init_objects self._egocentric = egocentric self._rewarder = ( getattr(this_module, rewarder)() if rewarder is not None else None) self._aux_tasks_w = aux_tasks_w if object_priors is None: self._object_priors = np.ones(num_channels) / num_channels else: assert len(object_priors) == num_channels self._object_priors = np.array(object_priors) / np.sum(object_priors) if default_w is None: self._default_w = np.ones(shape=(num_channels,)) else: self._default_w = default_w self._num_channels_all = self._num_channels + 2 self._step_in_episode = None @property def state(self): return copy.deepcopy([ self._step_in_episode, self._walls, self._objects, self._player_pos, self._prev_collected, ]) def set_state(self, state): state_ = copy.deepcopy(state) self._step_in_episode = state_[0] self._walls = state_[1] self._objects = state_[2] self._player_pos = state_[3] self._prev_collected = state_[4] @property def player_pos(self): return self._player_pos def _reset(self): self._step_in_episode = 0 # Walls. self._walls = [] for col in range(self._arena_size): new_pos = (0, col) if new_pos not in self._walls: self._walls.append(new_pos) for row in range(self._arena_size): new_pos = (row, 0) if new_pos not in self._walls: self._walls.append(new_pos) # Objects. self._objects = dict() for _ in range(self._num_init_objects): while True: new_pos = _random_pos(self._arena_size) if new_pos not in self._objects and new_pos not in self._walls: self._objects[new_pos] = np.random.multinomial(1, self._object_priors) break # Player self._player_pos = _random_pos(self._arena_size) while self._player_pos in self._objects or self._player_pos in self._walls: self._player_pos = _random_pos(self._arena_size) self._prev_collected = np.zeros(shape=(self._num_channels,)) obs = self.observation() return dm_env.restart(obs) def _step(self, action): self._step_in_episode += 1 if action == Action.UP: new_player_pos = (self._player_pos[0], self._player_pos[1] + 1) elif action == Action.DOWN: new_player_pos = (self._player_pos[0], self._player_pos[1] - 1) elif action == Action.LEFT: new_player_pos = (self._player_pos[0] - 1, self._player_pos[1]) elif action == Action.RIGHT: new_player_pos = (self._player_pos[0] + 1, self._player_pos[1]) else: raise ValueError("Invalid action `{}`".format(action)) # Toroidal. new_player_pos = ( (new_player_pos[0] + self._arena_size) % self._arena_size, (new_player_pos[1] + self._arena_size) % self._arena_size, ) if new_player_pos not in self._walls: self._player_pos = new_player_pos # Compute rewards. consumed = self._objects.pop(self._player_pos, np.zeros(shape=(self._num_channels,))) if self._rewarder is None: reward = np.dot(consumed, np.array(self._default_w)) else: reward = self._rewarder.get_reward(self.state, consumed) self._prev_collected = np.copy(consumed) assert self._player_pos not in self._objects assert self._player_pos not in self._walls # Render everything. obs = self.observation() if self._step_in_episode < self._max_num_steps: return dm_env.transition(reward=reward, observation=obs) else: # termination with discount=1.0 return dm_env.truncation(reward=reward, observation=obs) def observation(self, force_non_egocentric=False): arena_shape = [self._arena_size] * 2 + [self._num_channels_all] arena = np.zeros(shape=arena_shape, dtype=np.float32) def offset_position(pos_): use_egocentric = self._egocentric and not force_non_egocentric offset = self._player_pos if use_egocentric else (0, 0) x = (pos_[0] - offset[0] + self._arena_size) % self._arena_size y = (pos_[1] - offset[1] + self._arena_size) % self._arena_size return (x, y) player_pos = offset_position(self._player_pos) arena[player_pos] = _one_hot(self._num_channels, self._num_channels_all) for pos, obj in self._objects.items(): x, y = offset_position(pos) arena[x, y, :self._num_channels] = obj for pos in self._walls: x, y = offset_position(pos) arena[x, y] = _one_hot(self._num_channels + 1, self._num_channels_all) collected_resources = np.copy(self._prev_collected).astype(np.float32) obs = dict( arena=arena, cumulants=collected_resources, ) if self._aux_tasks_w is not None: obs["aux_tasks_reward"] = np.dot( np.array(self._aux_tasks_w), self._prev_collected).astype(np.float32) return obs def observation_spec(self): arena = dm_env.specs.BoundedArray( shape=(self._arena_size, self._arena_size, self._num_channels_all), dtype=np.float32, minimum=0., maximum=1., name="arena") collected_resources = dm_env.specs.BoundedArray( shape=(self._num_channels,), dtype=np.float32, minimum=-1e9, maximum=1e9, name="collected_resources") obs_spec = dict( arena=arena, cumulants=collected_resources, ) if self._aux_tasks_w is not None: obs_spec["aux_tasks_reward"] = dm_env.specs.BoundedArray( shape=(len(self._aux_tasks_w),), dtype=np.float32, minimum=-1e9, maximum=1e9, name="aux_tasks_reward") return obs_spec def action_spec(self): return dm_env.specs.DiscreteArray(num_values=len(Action), name="action") class SequentialCollectionRewarder(object): """SequentialCollectionRewarder.""" def get_reward(self, state, consumed): """Get reward.""" object_counts = sum(list(state[2].values()) + [np.zeros(len(consumed))]) reward = 0.0 if np.sum(consumed) > 0: for i in range(len(consumed)): if np.all(object_counts[:i] <= object_counts[i]): reward += consumed[i] else: reward -= consumed[i] return reward class BalancedCollectionRewarder(object): """BalancedCollectionRewarder.""" def get_reward(self, state, consumed): """Get reward.""" object_counts = sum(list(state[2].values()) + [np.zeros(len(consumed))]) reward = 0.0 if np.sum(consumed) > 0: for i in range(len(consumed)): if (object_counts[i] + consumed[i]) >= np.max(object_counts): reward += consumed[i] else: reward -= consumed[i] return reward
deepmind-research-master
option_keyboard/scavenger.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Keyboard agent.""" import os import numpy as np import sonnet as snt import tensorflow.compat.v1 as tf from option_keyboard import smart_module class Agent(): """An Option Keyboard Agent.""" def __init__( self, obs_spec, action_spec, policy_weights, network_kwargs, epsilon, additional_discount, batch_size, optimizer_name, optimizer_kwargs, ): """A simple DQN agent. Args: obs_spec: The observation spec. action_spec: The action spec. policy_weights: A list of vectors each representing the cumulant weights for that particular option/policy. network_kwargs: Keyword arguments for snt.nets.MLP epsilon: Exploration probability. additional_discount: Discount on returns used by the agent. batch_size: Size of update batch. optimizer_name: Name of an optimizer from tf.train optimizer_kwargs: Keyword arguments for the optimizer. """ tf.logging.info(policy_weights) self._policy_weights = tf.convert_to_tensor( policy_weights, dtype=tf.float32) self._current_policy = None self._epsilon = epsilon self._additional_discount = additional_discount self._batch_size = batch_size self._n_actions = action_spec.num_values self._n_policies, self._n_cumulants = policy_weights.shape def create_network(): return OptionValueNet( self._n_policies, self._n_cumulants, self._n_actions, network_kwargs=network_kwargs, ) self._network = smart_module.SmartModuleExport(create_network) self._replay = [] obs_spec = self._extract_observation(obs_spec) def option_values(values, policy): return tf.tensordot( values[:, policy, ...], self._policy_weights[policy], axes=[1, 0]) # Placeholders for policy. o = tf.placeholder(shape=obs_spec.shape, dtype=obs_spec.dtype) p = tf.placeholder(shape=(), dtype=tf.int32) q = self._network(tf.expand_dims(o, axis=0)) qo = option_values(q, p) # Placeholders for update. o_tm1 = tf.placeholder(shape=(None,) + obs_spec.shape, dtype=obs_spec.dtype) a_tm1 = tf.placeholder(shape=(None,), dtype=tf.int32) c_t = tf.placeholder(shape=(None, self._n_cumulants), dtype=tf.float32) d_t = tf.placeholder(shape=(None,), dtype=tf.float32) o_t = tf.placeholder(shape=(None,) + obs_spec.shape, dtype=obs_spec.dtype) # Compute values over all options. q_tm1 = self._network(o_tm1) q_t = self._network(o_t) qo_t = option_values(q_t, p) a_t = tf.cast(tf.argmax(qo_t, axis=-1), tf.int32) qa_tm1 = _batched_index(q_tm1[:, p, ...], a_tm1) qa_t = _batched_index(q_t[:, p, ...], a_t) # TD error g = additional_discount * tf.expand_dims(d_t, axis=-1) td_error = tf.stop_gradient(c_t + g * qa_t) - qa_tm1 loss = tf.reduce_sum(tf.square(td_error) / 2) # Dummy calls to keyboard for SmartModule _ = self._network.gpi(o_tm1[0], c_t[0]) _ = self._network.num_cumulants _ = self._network.num_policies _ = self._network.num_actions with tf.variable_scope("optimizer"): self._optimizer = getattr(tf.train, optimizer_name)(**optimizer_kwargs) train_op = self._optimizer.minimize(loss) # Make session and callables. session = tf.Session() self._session = session self._update_fn = session.make_callable( train_op, [o_tm1, a_tm1, c_t, d_t, o_t, p]) self._value_fn = session.make_callable(qo, [o, p]) session.run(tf.global_variables_initializer()) self._saver = tf.train.Saver(var_list=self._network.variables) @property def keyboard(self): return self._network def _extract_observation(self, obs): return obs["arena"] def step(self, timestep, is_training=False): """Select actions according to epsilon-greedy policy.""" if timestep.first(): self._current_policy = np.random.randint(self._n_policies) if is_training and np.random.rand() < self._epsilon: return np.random.randint(self._n_actions) q_values = self._value_fn( self._extract_observation(timestep.observation), self._current_policy) return int(np.argmax(q_values)) def update(self, step_tm1, action, step_t): """Takes in a transition from the environment.""" transition = [ self._extract_observation(step_tm1.observation), action, step_t.observation["cumulants"], step_t.discount, self._extract_observation(step_t.observation), ] self._replay.append(transition) if len(self._replay) == self._batch_size: batch = list(zip(*self._replay)) + [self._current_policy] self._update_fn(*batch) self._replay = [] # Just a queue. def export(self, path): tf.logging.info("Exporting keyboard to %s", path) self._network.export( os.path.join(path, "tfhub"), self._session, overwrite=True) self._saver.save(self._session, os.path.join(path, "checkpoints")) class OptionValueNet(snt.AbstractModule): """Option Value net.""" def __init__(self, n_policies, n_cumulants, n_actions, network_kwargs, name="option_keyboard"): """Construct an Option Value Net sonnet module. Args: n_policies: Number of policies. n_cumulants: Number of cumulants. n_actions: Number of actions. network_kwargs: Network arguments. name: Name """ super(OptionValueNet, self).__init__(name=name) self._n_policies = n_policies self._n_cumulants = n_cumulants self._n_actions = n_actions self._network_kwargs = network_kwargs def _build(self, observation): values = [] flat_obs = snt.BatchFlatten()(observation) for _ in range(self._n_cumulants): net = snt.nets.MLP(**self._network_kwargs)(flat_obs) net = snt.Linear(output_size=self._n_policies * self._n_actions)(net) net = snt.BatchReshape([self._n_policies, self._n_actions])(net) values.append(net) values = tf.stack(values, axis=2) return values def gpi(self, observation, cumulant_weights): q_values = self.__call__(tf.expand_dims(observation, axis=0))[0] q_w = tf.tensordot(q_values, cumulant_weights, axes=[1, 0]) # [P,a] q_w_actions = tf.reduce_max(q_w, axis=0) action = tf.cast(tf.argmax(q_w_actions), tf.int32) return action @property def num_cumulants(self): return self._n_cumulants @property def num_policies(self): return self._n_policies @property def num_actions(self): return self._n_actions def _batched_index(values, indices): one_hot_indices = tf.one_hot(indices, values.shape[-1], dtype=values.dtype) one_hot_indices = tf.expand_dims(one_hot_indices, axis=1) return tf.reduce_sum(values * one_hot_indices, axis=-1)
deepmind-research-master
option_keyboard/keyboard_agent.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Keyboard utils.""" import numpy as np from option_keyboard import configs from option_keyboard import environment_wrappers from option_keyboard import experiment from option_keyboard import keyboard_agent from option_keyboard import scavenger def create_and_train_keyboard(num_episodes, policy_weights=None, export_path=None): """Train an option keyboard.""" if policy_weights is None: policy_weights = np.eye(2, dtype=np.float32) env_config = configs.get_pretrain_config() env = scavenger.Scavenger(**env_config) env = environment_wrappers.EnvironmentWithLogging(env) agent = keyboard_agent.Agent( obs_spec=env.observation_spec(), action_spec=env.action_spec(), policy_weights=policy_weights, network_kwargs=dict( output_sizes=(64, 128), activate_final=True, ), epsilon=0.1, additional_discount=0.9, batch_size=10, optimizer_name="AdamOptimizer", optimizer_kwargs=dict(learning_rate=3e-4,)) if num_episodes: experiment.run(env, agent, num_episodes=num_episodes) agent.export(export_path) return agent def create_and_train_keyboard_with_phi(num_episodes, phi_model_path, policy_weights, export_path=None): """Train an option keyboard.""" env_config = configs.get_pretrain_config() env = scavenger.Scavenger(**env_config) env = environment_wrappers.EnvironmentWithLogging(env) env = environment_wrappers.EnvironmentWithLearnedPhi(env, phi_model_path) agent = keyboard_agent.Agent( obs_spec=env.observation_spec(), action_spec=env.action_spec(), policy_weights=policy_weights, network_kwargs=dict( output_sizes=(64, 128), activate_final=True, ), epsilon=0.1, additional_discount=0.9, batch_size=10, optimizer_name="AdamOptimizer", optimizer_kwargs=dict(learning_rate=3e-4,)) if num_episodes: experiment.run(env, agent, num_episodes=num_episodes) agent.export(export_path) return agent
deepmind-research-master
option_keyboard/keyboard_utils.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Run an experiment.""" from absl import app from absl import flags import tensorflow.compat.v1 as tf from option_keyboard import configs from option_keyboard import dqn_agent from option_keyboard import environment_wrappers from option_keyboard import experiment from option_keyboard import scavenger FLAGS = flags.FLAGS flags.DEFINE_integer("num_episodes", 10000, "Number of training episodes.") flags.DEFINE_integer("report_every", 200, "Frequency at which metrics are reported.") flags.DEFINE_string("output_path", None, "Path to write out training curves.") def main(argv): del argv # Create the task environment. env_config = configs.get_task_config() env = scavenger.Scavenger(**env_config) env = environment_wrappers.EnvironmentWithLogging(env) # Create the flat agent. agent = dqn_agent.Agent( obs_spec=env.observation_spec(), action_spec=env.action_spec(), network_kwargs=dict( output_sizes=(64, 128), activate_final=True, ), epsilon=0.1, additional_discount=0.9, batch_size=10, optimizer_name="AdamOptimizer", optimizer_kwargs=dict(learning_rate=3e-4,)) _, ema_returns = experiment.run( env, agent, num_episodes=FLAGS.num_episodes, report_every=FLAGS.report_every) if FLAGS.output_path: experiment.write_returns_to_file(FLAGS.output_path, ema_returns) if __name__ == "__main__": tf.disable_v2_behavior() app.run(main)
deepmind-research-master
option_keyboard/run_dqn.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for training a keyboard and then running a DQN agent on top of it.""" from absl import flags from absl.testing import absltest import tensorflow.compat.v1 as tf from option_keyboard import run_ok FLAGS = flags.FLAGS class RunDQNTest(absltest.TestCase): def test_run(self): FLAGS.num_episodes = 200 FLAGS.num_pretrain_episodes = 200 run_ok.main(None) if __name__ == '__main__': tf.disable_v2_behavior() absltest.main()
deepmind-research-master
option_keyboard/run_ok_test.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Auto-resetting environment base class. The environment API states that stepping an environment after a LAST timestep should return the first timestep of a new episode. However, environment authors sometimes don't spot this part or find it awkward to implement. This module contains a class that helps implement the reset behaviour. """ import abc import dm_env class Base(dm_env.Environment): """This class implements the required `step()` and `reset()` methods. It instead requires users to implement `_step()` and `_reset()`. This class handles the reset behaviour automatically when it detects a LAST timestep. """ def __init__(self): self._reset_next_step = True @abc.abstractmethod def _reset(self): """Returns a `timestep` namedtuple as per the regular `reset()` method.""" @abc.abstractmethod def _step(self, action): """Returns a `timestep` namedtuple as per the regular `step()` method.""" def reset(self): self._reset_next_step = False return self._reset() def step(self, action): if self._reset_next_step: return self.reset() timestep = self._step(action) self._reset_next_step = timestep.last() return timestep
deepmind-research-master
option_keyboard/auto_reset_environment.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """A simple training loop.""" import csv from absl import logging from tensorflow.compat.v1.io import gfile def _ema(base, val, decay=0.995): return base * decay + (1 - decay) * val def run(env, agent, num_episodes, report_every=200, num_eval_reps=1): """Runs an agent on an environment. Args: env: The environment. agent: The agent. num_episodes: Number of episodes to train for. report_every: Frequency at which training progress are reported (episodes). num_eval_reps: Number of eval episodes to run per training episode. Returns: A list of dicts containing training and evaluation returns, and a list of reported returns smoothed by EMA. """ returns = [] logged_returns = [] train_return_ema = 0. eval_return_ema = 0. for episode in range(num_episodes): returns.append(dict(episode=episode)) # Run a training episode. train_episode_return = run_episode(env, agent, is_training=True) train_return_ema = _ema(train_return_ema, train_episode_return) returns[-1]["train"] = train_episode_return # Run an evaluation episode. returns[-1]["eval"] = [] for _ in range(num_eval_reps): eval_episode_return = run_episode(env, agent, is_training=False) eval_return_ema = _ema(eval_return_ema, eval_episode_return) returns[-1]["eval"].append(eval_episode_return) if ((episode + 1) % report_every) == 0 or episode == 0: logged_returns.append( dict(episode=episode, train=train_return_ema, eval=[eval_return_ema])) logging.info("Episode %s, avg train return %.3f, avg eval return %.3f", episode + 1, train_return_ema, eval_return_ema) if hasattr(agent, "get_logs"): logging.info("Episode %s, agent logs: %s", episode + 1, agent.get_logs()) return returns, logged_returns def run_episode(environment, agent, is_training=False): """Run a single episode.""" timestep = environment.reset() while not timestep.last(): action = agent.step(timestep, is_training) new_timestep = environment.step(action) if is_training: agent.update(timestep, action, new_timestep) timestep = new_timestep episode_return = environment.episode_return return episode_return def write_returns_to_file(path, returns): """Write returns to file.""" with gfile.GFile(path, "w") as file: writer = csv.writer(file, delimiter=" ", quoting=csv.QUOTE_MINIMAL) writer.writerow(["episode", "train"] + [f"eval_{idx}" for idx in range(len(returns[0]["eval"]))]) for row in returns: writer.writerow([row["episode"], row["train"]] + row["eval"])
deepmind-research-master
option_keyboard/experiment.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Run an experiment.""" import os from absl import app from absl import flags import tensorflow.compat.v1 as tf import tensorflow_hub as hub from option_keyboard import configs from option_keyboard import dqn_agent from option_keyboard import environment_wrappers from option_keyboard import experiment from option_keyboard import keyboard_utils from option_keyboard import scavenger from option_keyboard import smart_module FLAGS = flags.FLAGS flags.DEFINE_integer("num_episodes", 10000, "Number of training episodes.") flags.DEFINE_integer("num_pretrain_episodes", 20000, "Number of pretraining episodes.") flags.DEFINE_integer("report_every", 200, "Frequency at which metrics are reported.") flags.DEFINE_string("keyboard_path", None, "Path to pretrained keyboard model.") flags.DEFINE_string("output_path", None, "Path to write out training curves.") def main(argv): del argv # Pretrain the keyboard and save a checkpoint. if FLAGS.keyboard_path: keyboard_path = FLAGS.keyboard_path else: with tf.Graph().as_default(): export_path = "/tmp/option_keyboard/keyboard" _ = keyboard_utils.create_and_train_keyboard( num_episodes=FLAGS.num_pretrain_episodes, export_path=export_path) keyboard_path = os.path.join(export_path, "tfhub") # Load the keyboard. keyboard = smart_module.SmartModuleImport(hub.Module(keyboard_path)) # Create the task environment. base_env_config = configs.get_task_config() base_env = scavenger.Scavenger(**base_env_config) base_env = environment_wrappers.EnvironmentWithLogging(base_env) # Wrap the task environment with the keyboard. additional_discount = 0.9 env = environment_wrappers.EnvironmentWithKeyboard( env=base_env, keyboard=keyboard, keyboard_ckpt_path=None, n_actions_per_dim=3, additional_discount=additional_discount, call_and_return=False) # Create the player agent. agent = dqn_agent.Agent( obs_spec=env.observation_spec(), action_spec=env.action_spec(), network_kwargs=dict( output_sizes=(64, 128), activate_final=True, ), epsilon=0.1, additional_discount=additional_discount, batch_size=10, optimizer_name="AdamOptimizer", optimizer_kwargs=dict(learning_rate=3e-4,)) _, ema_returns = experiment.run( env, agent, num_episodes=FLAGS.num_episodes, report_every=FLAGS.report_every) if FLAGS.output_path: experiment.write_returns_to_file(FLAGS.output_path, ema_returns) if __name__ == "__main__": tf.disable_v2_behavior() app.run(main)
deepmind-research-master
option_keyboard/run_ok.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for running the simple DQN agent.""" from absl import flags from absl.testing import absltest import tensorflow.compat.v1 as tf from option_keyboard import run_dqn FLAGS = flags.FLAGS class RunDQNTest(absltest.TestCase): def test_run(self): FLAGS.num_episodes = 200 run_dqn.main(None) if __name__ == '__main__': tf.disable_v2_behavior() absltest.main()
deepmind-research-master
option_keyboard/run_dqn_test.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """DQN agent.""" import numpy as np import sonnet as snt import tensorflow.compat.v1 as tf class Agent(): """A DQN Agent.""" def __init__( self, obs_spec, action_spec, network_kwargs, epsilon, additional_discount, batch_size, optimizer_name, optimizer_kwargs, ): """A simple DQN agent. Args: obs_spec: The observation spec. action_spec: The action spec. network_kwargs: Keyword arguments for snt.nets.MLP epsilon: Exploration probability. additional_discount: Discount on returns used by the agent. batch_size: Size of update batch. optimizer_name: Name of an optimizer from tf.train optimizer_kwargs: Keyword arguments for the optimizer. """ self._epsilon = epsilon self._additional_discount = additional_discount self._batch_size = batch_size self._n_actions = action_spec.num_values self._network = ValueNet(self._n_actions, network_kwargs=network_kwargs) self._replay = [] obs_spec = self._extract_observation(obs_spec) # Placeholders for policy o = tf.placeholder(shape=obs_spec.shape, dtype=obs_spec.dtype) q = self._network(tf.expand_dims(o, axis=0)) # Placeholders for update. o_tm1 = tf.placeholder(shape=(None,) + obs_spec.shape, dtype=obs_spec.dtype) a_tm1 = tf.placeholder(shape=(None,), dtype=tf.int32) r_t = tf.placeholder(shape=(None,), dtype=tf.float32) d_t = tf.placeholder(shape=(None,), dtype=tf.float32) o_t = tf.placeholder(shape=(None,) + obs_spec.shape, dtype=obs_spec.dtype) # Compute values over all options. q_tm1 = self._network(o_tm1) q_t = self._network(o_t) a_t = tf.cast(tf.argmax(q_t, axis=-1), tf.int32) qa_tm1 = _batched_index(q_tm1, a_tm1) qa_t = _batched_index(q_t, a_t) # TD error g = additional_discount * d_t td_error = tf.stop_gradient(r_t + g * qa_t) - qa_tm1 loss = tf.reduce_sum(tf.square(td_error) / 2) with tf.variable_scope("optimizer"): self._optimizer = getattr(tf.train, optimizer_name)(**optimizer_kwargs) train_op = self._optimizer.minimize(loss) # Make session and callables. session = tf.Session() self._update_fn = session.make_callable(train_op, [o_tm1, a_tm1, r_t, d_t, o_t]) self._value_fn = session.make_callable(q, [o]) session.run(tf.global_variables_initializer()) def _extract_observation(self, obs): return obs["arena"] def step(self, timestep, is_training=False): """Select actions according to epsilon-greedy policy.""" if is_training and np.random.rand() < self._epsilon: return np.random.randint(self._n_actions) q_values = self._value_fn( self._extract_observation(timestep.observation)) return int(np.argmax(q_values)) def update(self, step_tm1, action, step_t): """Takes in a transition from the environment.""" transition = [ self._extract_observation(step_tm1.observation), action, step_t.reward, step_t.discount, self._extract_observation(step_t.observation), ] self._replay.append(transition) if len(self._replay) == self._batch_size: batch = list(zip(*self._replay)) self._update_fn(*batch) self._replay = [] # Just a queue. class ValueNet(snt.AbstractModule): """Value Network.""" def __init__(self, n_actions, network_kwargs, name="value_network"): """Construct a value network sonnet module. Args: n_actions: Number of actions. network_kwargs: Network arguments. name: Name """ super(ValueNet, self).__init__(name=name) self._n_actions = n_actions self._network_kwargs = network_kwargs def _build(self, observation): flat_obs = snt.BatchFlatten()(observation) net = snt.nets.MLP(**self._network_kwargs)(flat_obs) net = snt.Linear(output_size=self._n_actions)(net) return net @property def num_actions(self): return self._n_actions def _batched_index(values, indices): one_hot_indices = tf.one_hot(indices, values.shape[-1], dtype=values.dtype) return tf.reduce_sum(values * one_hot_indices, axis=-1)
deepmind-research-master
option_keyboard/dqn_agent.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ r"""Run an experiment. Run GPE/GPI on the "balancing" task with a fixed w For example, first train a keyboard: python3 train_keyboard.py -- --logtostderr --policy_weights_name=12 Then, evaluate the keyboard with a fixed w. python3 run_true_w_fig6.py -- --logtostderr \ --keyboard_path=/tmp/option_keyboard/keyboard_12/tfhub """ import csv from absl import app from absl import flags import numpy as np import tensorflow.compat.v1 as tf from tensorflow.compat.v1.io import gfile import tensorflow_hub as hub from option_keyboard import configs from option_keyboard import environment_wrappers from option_keyboard import experiment from option_keyboard import scavenger from option_keyboard import smart_module from option_keyboard.gpe_gpi_experiments import regressed_agent FLAGS = flags.FLAGS flags.DEFINE_integer("num_episodes", 1000, "Number of training episodes.") flags.DEFINE_string("keyboard_path", None, "Path to keyboard model.") flags.DEFINE_list("test_w", None, "The w to test.") flags.DEFINE_string("output_path", None, "Path to write out returns.") def main(argv): del argv # Load the keyboard. keyboard = smart_module.SmartModuleImport(hub.Module(FLAGS.keyboard_path)) # Create the task environment. base_env_config = configs.get_task_config() base_env = scavenger.Scavenger(**base_env_config) base_env = environment_wrappers.EnvironmentWithLogging(base_env) # Wrap the task environment with the keyboard. additional_discount = 0.9 env = environment_wrappers.EnvironmentWithKeyboardDirect( env=base_env, keyboard=keyboard, keyboard_ckpt_path=None, additional_discount=additional_discount, call_and_return=False) # Create the player agent. agent = regressed_agent.Agent( batch_size=10, optimizer_name="AdamOptimizer", # Disable training. optimizer_kwargs=dict(learning_rate=0.0,), init_w=[float(x) for x in FLAGS.test_w]) returns = [] for _ in range(FLAGS.num_episodes): returns.append(experiment.run_episode(env, agent)) tf.logging.info("#" * 80) tf.logging.info( f"Avg. return over {FLAGS.num_episodes} episodes is {np.mean(returns)}") tf.logging.info("#" * 80) if FLAGS.output_path: with gfile.GFile(FLAGS.output_path, "w") as file: writer = csv.writer(file, delimiter=" ", quoting=csv.QUOTE_MINIMAL) writer.writerow(["return"]) for val in returns: writer.writerow([val]) if __name__ == "__main__": tf.disable_v2_behavior() app.run(main)
deepmind-research-master
option_keyboard/gpe_gpi_experiments/run_true_w_fig6.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Run an experiment. Run a q-learning agent on a task. """ from absl import app from absl import flags import tensorflow.compat.v1 as tf from option_keyboard import configs from option_keyboard import dqn_agent from option_keyboard import environment_wrappers from option_keyboard import experiment from option_keyboard import scavenger FLAGS = flags.FLAGS flags.DEFINE_integer("num_episodes", 10000, "Number of training episodes.") flags.DEFINE_list("test_w", None, "The w to test.") flags.DEFINE_integer("report_every", 200, "Frequency at which metrics are reported.") flags.DEFINE_string("output_path", None, "Path to write out training curves.") def main(argv): del argv # Create the task environment. test_w = [float(x) for x in FLAGS.test_w] env_config = configs.get_fig5_task_config(test_w) env = scavenger.Scavenger(**env_config) env = environment_wrappers.EnvironmentWithLogging(env) # Create the flat agent. agent = dqn_agent.Agent( obs_spec=env.observation_spec(), action_spec=env.action_spec(), network_kwargs=dict( output_sizes=(64, 128), activate_final=True, ), epsilon=0.1, additional_discount=0.9, batch_size=10, optimizer_name="AdamOptimizer", optimizer_kwargs=dict(learning_rate=3e-4,)) _, ema_returns = experiment.run( env, agent, num_episodes=FLAGS.num_episodes, report_every=FLAGS.report_every) if FLAGS.output_path: experiment.write_returns_to_file(FLAGS.output_path, ema_returns) if __name__ == "__main__": tf.disable_v2_behavior() app.run(main)
deepmind-research-master
option_keyboard/gpe_gpi_experiments/run_dqn_fig5.py
# Lint as: python3 # pylint: disable=g-bad-file-header # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Train a keyboard.""" from absl import app from absl import flags import numpy as np import tensorflow.compat.v1 as tf from option_keyboard import keyboard_utils FLAGS = flags.FLAGS flags.DEFINE_integer("num_pretrain_episodes", 20000, "Number of pretraining episodes.") flags.DEFINE_string("export_path", None, "Where to save the keyboard checkpoints.") flags.DEFINE_string("policy_weights_name", None, "A string repsenting the policy weights.") def main(argv): del argv all_policy_weights = { "1": [1., 0.], "2": [0., 1.], "3": [1., -1.], "4": [-1., 1.], "5": [1., 1.], } if FLAGS.policy_weights_name: policy_weights = np.array( [all_policy_weights[v] for v in FLAGS.policy_weights_name]) num_episodes = ((FLAGS.num_pretrain_episodes // 2) * max(2, len(policy_weights))) export_path = FLAGS.export_path + "_" + FLAGS.policy_weights_name else: policy_weights = None num_episodes = FLAGS.num_pretrain_episodes export_path = FLAGS.export_path keyboard_utils.create_and_train_keyboard( num_episodes=num_episodes, policy_weights=policy_weights, export_path=export_path) if __name__ == "__main__": tf.disable_v2_behavior() app.run(main)
deepmind-research-master
option_keyboard/gpe_gpi_experiments/train_keyboard.py