python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Interfaces of datasets and models that can be trained with the framework."""
import abc
from typing import NamedTuple, Optional
from dm_c19_modelling.evaluation import dataset_factory
import numpy as np
WEEK_DAY_INTEGER = "week_day_integer"
SITE_ID_INTEGER = "site_id_integer"
POPULATION = "population"
class TrainingDataset(NamedTuple):
"""A training dataset.
This is analogous to `dataset_factory.Dataset` without access to the targets
for the evaluation dates, to avoid information leaking.
"""
targets: np.ndarray # (num_dates, num_sites, num_targets)
features: np.ndarray # (num_dates, num_sites, num_features)
sum_past_targets: np.ndarray # (num_sites, num_targets)
feature_names: np.ndarray
target_names: np.ndarray
dates: np.ndarray # (num_dates,)
sites: np.ndarray # (num_sites,)
evaluation_dates: np.ndarray # (num_evaluation_dates,)
dataset_index_key: Optional[str]
cadence: int
@property
def num_dates(self):
return self.dates.size
@property
def num_sites(self):
return self.sites.size
@classmethod
def from_dataset(cls, dataset):
if not isinstance(dataset, dataset_factory.Dataset):
raise TypeError("requires `dataset_factory.Dataset` type")
return cls(
targets=dataset.training_targets,
features=dataset.training_features,
sum_past_targets=dataset.sum_past_targets,
target_names=dataset.target_names,
feature_names=dataset.feature_names,
dates=dataset.training_dates,
sites=dataset.sites,
evaluation_dates=dataset.evaluation_dates,
cadence=dataset.cadence,
dataset_index_key=dataset.dataset_index_key)
class Stats(NamedTuple):
mean: np.ndarray
std: np.ndarray
@classmethod
def from_array(cls, data: np.ndarray, reduce_axes):
return cls(
mean=np.nanmean(data, reduce_axes),
std=np.nanstd(data, reduce_axes),)
class TrainingDatasetSpec(NamedTuple):
"""Specification of a dataset.
A model trained on a DatasetSpec, should be able to operate on any
other dataset with the same spec.
"""
feature_names: np.ndarray
target_names: np.ndarray
sites: np.ndarray
num_forecast_dates: int
cadence: int
feature_stats: Stats
target_stats: Stats
# Hints about past data, that can be useful for initialization of some models.
# Population per site.
population: np.ndarray # (num_sites,)
# Estimate of the average target value on the first date.
initial_targets_hint: np.ndarray # (num_sites, num_features)
# Estimate of the summed past targets up to (included) the first date.
initial_sum_targets_hint: np.ndarray # (num_sites, num_features)
# The names of features that have values missing.
features_with_missing_values: np.ndarray
@classmethod
def from_dataset(cls, dataset: TrainingDataset):
"""Builds a `TrainingDatasetSpec` from a `TrainingDataset`."""
if not isinstance(dataset, (TrainingDataset)):
raise TypeError("requires `TrainingDataset` type.")
feature_names = list(dataset.feature_names)
if POPULATION in feature_names:
population = dataset.features[0, :, feature_names.index(POPULATION)]
else:
population = None
# Look at the average targets for some initial dates, covering at least a
# 1 week period. Technically, we could do this for just one step, but
# but daily data is noisy, so it is better to average.
num_steps_to_average = int(np.ceil(7 // dataset.cadence))
initial_targets_hint = dataset.targets[:num_steps_to_average].mean(0)
initial_sum_targets_hint = (
dataset.sum_past_targets + dataset.targets[0])
return cls(
target_names=dataset.target_names,
feature_names=dataset.feature_names,
sites=dataset.sites,
num_forecast_dates=len(dataset.evaluation_dates),
cadence=dataset.cadence,
feature_stats=Stats.from_array(dataset.features, reduce_axes=(0, 1)),
target_stats=Stats.from_array(dataset.targets, reduce_axes=(0, 1)),
population=population,
initial_targets_hint=initial_targets_hint,
initial_sum_targets_hint=initial_sum_targets_hint,
features_with_missing_values=np.array(feature_names)[np.any(
np.isnan(dataset.features), axis=(0, 1))])
def assert_is_compatible(self, dataset: TrainingDataset):
# TODO(alvarosg): Maybe make into errors if we decide to keep it.
assert np.all(dataset.feature_names == self.feature_names)
assert np.all(dataset.target_names == self.target_names)
assert np.all(dataset.sites == self.sites)
assert len(dataset.evaluation_dates) == self.num_forecast_dates
assert dataset.cadence == self.cadence
class TrainableModel(metaclass=abc.ABCMeta):
"""Base class for trainable models on our training framework."""
def __init__(self, dataset_spec: TrainingDatasetSpec):
super().__init__()
self._dataset_spec = dataset_spec
def build_training_generator(self, dataset: TrainingDataset):
"""Iteratively yields batches of data given a dataset."""
self._dataset_spec.assert_is_compatible(dataset)
return self._build_training_generator(dataset)
@abc.abstractmethod
def _build_training_generator(self, dataset: TrainingDataset):
"""See `build_training_generator`."""
@abc.abstractmethod
def training_update(self, previous_state, batch, global_step):
"""Updates the model.
Args:
previous_state: Previous model state.
batch: batch of data as generated by `build_training_generator`.
global_step: global step
Returns:
A tuple with (updated_state, scalars_dict).
"""
def evaluate(self, model_state, dataset: TrainingDataset):
"""Computes a future forecast.
Args:
model_state: current model state.
dataset: input dataset for the future forecast.
Returns:
A tuple (predictions, aux_data) with a forecast of shape
[self._num_forecast_dates, num_sites, num_targets] as well as any
auxiliary data as a second argument.
"""
self._dataset_spec.assert_is_compatible(dataset)
return self._evaluate(model_state, dataset)
@abc.abstractmethod
def _evaluate(self, model_state, dataset: TrainingDataset):
"""See `evaluate`."""
| dm_c19_modelling-main | modelling/definitions.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""DeepMind COVID-19 modelling."""
| dm_c19_modelling-main | modelling/__init__.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Executable to train and evaluate models online.
Usage:
```
runner.py --config=base_config.py:<project_directory> --mode="train"
runner.py --config=base_config.py:<project_directory> --mode="eval"
```
"""
from absl import app
from absl import flags
from dm_c19_modelling.modelling import definitions
from dm_c19_modelling.modelling.training import checkpointing
from dm_c19_modelling.modelling.training import dataset_factory
from dm_c19_modelling.modelling.training import eval_loop
from dm_c19_modelling.modelling.training import log_writers
from dm_c19_modelling.modelling.training import model_factory
from dm_c19_modelling.modelling.training import train_loop
from ml_collections import config_flags
FLAGS = flags.FLAGS
config_flags.DEFINE_config_file(
"config", help_string="Experiment configuration file.")
flags.DEFINE_enum(
"mode", "train", ["train", "eval"],
"Execution mode: `train` will run training, `eval` will run evaluation.")
flags.DEFINE_boolean(
"overfit", False,
"If True, no data is left out for validation. Useful for debugging models.")
flags.DEFINE_string(
"forecast_name", None,
"Forecast name to use for storing final predictions in the forecast index.")
def create_writers(mode):
writers = []
writers.append(log_writers.ConsoleWriter(mode))
return writers
def main(argv):
del argv
config = FLAGS.config
checkpointer = checkpointing.Checkpointer(**config.checkpointer)
writers = create_writers(FLAGS.mode)
dataset = dataset_factory.get_training_dataset(**config.dataset)
dataset_spec = definitions.TrainingDatasetSpec.from_dataset(dataset)
model = model_factory.get_model(dataset_spec, **config.model)
build_info = dict(
dataset_spec=dataset_spec,
model_factory_kwargs=config.model.to_dict(),
dataset_factory_kwargs=config.dataset.to_dict(),
)
# Get another version of the dataset with some trailing dates left out
# for validation and early stopping.
dataset_without_validation_dates, valid_forecast_targets = (
dataset_factory.remove_validation_dates(dataset))
if FLAGS.mode == "train":
train_loop.train(
dataset if FLAGS.overfit else dataset_without_validation_dates,
model, build_info, checkpointer, writers, **config.training)
elif FLAGS.mode.startswith("eval"):
eval_loop.evaluate(
dataset_without_validation_dates, valid_forecast_targets, model,
checkpointer, writers, training_steps=config.training.training_steps,
**config.eval)
if config.fine_tune.fine_tune_steps is not None:
# Fine tune the best eval checkpoint on the whole dataset, including
# validation dates.
fine_tune_writers = create_writers("fine_tune")
train_loop.fine_tune(
dataset, model, checkpointer, fine_tune_writers,
initial_checkpoint=eval_loop.BEST_EVAL, **config.fine_tune)
checkpoint_name_to_submit = train_loop.LATEST_FINE_TUNE
else:
checkpoint_name_to_submit = eval_loop.BEST_EVAL
# Make a final forecast using the full dataset (including validation dates)
# as inputs.
if not FLAGS.overfit:
eval_loop.submit_final_forecast(
dataset, model, checkpointer, forecast_name=FLAGS.forecast_name,
directory=config.dataset.directory,
dataset_name=config.dataset.dataset_name,
checkpoint_name=checkpoint_name_to_submit)
else:
raise ValueError(f"Mode {FLAGS.mode} not recognized.")
if __name__ == "__main__":
app.run(main)
| dm_c19_modelling-main | modelling/training/runner.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Config for c19 experiment."""
from dm_c19_modelling.evaluation import constants
from ml_collections import config_dict
# TODO(peterbattaglia): These are just for reference: remove later.
_ALL_FEATURES = (
"new_confirmed", "new_deceased", "new_recovered", "new_tested",
"total_confirmed", "total_deceased", "total_recovered", "total_tested",
"population", "population_male", "population_female", "rural_population",
"urban_population", "largest_city_population", "clustered_population",
"population_density", "human_development_index", "population_age_00_09",
"population_age_10_19", "population_age_20_29", "population_age_30_39",
"population_age_40_49", "population_age_50_59", "population_age_60_69",
"population_age_70_79", "population_age_80_89", "population_age_90_99",
"population_age_80_and_older", "mobility_retail_and_recreation",
"mobility_grocery_and_pharmacy", "mobility_parks",
"mobility_transit_stations", "mobility_workplaces", "mobility_residential")
def get_config(project_directory):
"""Returns the experiment config.."""
config = config_dict.ConfigDict()
# Parameters to build the model and dataset.
config.dataset = _get_dataset_config(project_directory)
config.model = _get_model_config()
# Configuration of the training, evaluation, and fine tuning loops.
config.training = _get_training_config()
config.eval = _get_eval_config()
config.fine_tune = _get_fine_tune_config(config.training)
# Parameters to checkpoint the model during training.
config.checkpointer = _get_checkpointer_config()
return config
def _get_dataset_config(project_directory):
"""Keyword arguments to `dataset_factory.get_dataset`."""
config = config_dict.ConfigDict()
config.directory = project_directory
config.dataset_name = "covid_open_data_world"
# The name(s) of the target to predict.
config.targets = [constants.Targets.DECEASED_NEW]
# The names of the features to use to make predictions.
# TODO(alvarosg): Should we add an option for models to then select subsets of
# these features only?
config.features = [
"new_confirmed",
"new_deceased",
"population",
"mobility_retail_and_recreation", "mobility_grocery_and_pharmacy",
"mobility_parks", "mobility_transit_stations", "mobility_workplaces",
"mobility_residential"
]
# The The creation date of the dataset to use for training.
config.creation_date = "latest"
# The date to train up to and evaluate from.
config.last_observation_date = "2020-12-06"
# The number of dates to use for evaluation. The forecast horizon is equal
# to num_forecast_dates * cadence.
config.num_forecast_dates = 28
# The cadence in days of the forecasts.
config.cadence = 1
# Whether to allow sites to be dropped if any of the requested features aren't
# defined for that site for at least one training date.
config.allow_dropped_sites = False
return config
def _get_model_config():
"""Keyword arguments to `model_factory.get_model`."""
config = config_dict.ConfigDict()
# This is a very specific implementation of a config to work with our model
# factory. Users may change the config and factory design, such as it is
# compatible with the factory being called as:
# `get_model(dataset_spec, **this_config)`
# Parameters that are shared by all instances of `LossMinimizerHaikuModel`,
config.init_seed = 42
config.training_seed = 42
config.optimizer_kwargs = dict(
name="adam",
b1=0.9,
b2=0.999,
eps=1e-8,)
config.learning_rate_annealing_kwargs = dict(
name="exponential",
start_value=1e-3,
end_value=1e-6,
num_steps_decay_rate=1e5,
decay_rate=0.1)
# Model name and additional specific configs for the models we support.
config.model_name = "mlp" # One of those below (e.g. mlp, lstm, seir_lstm)
config.model_specific_kwargs = dict(
mlp=_get_mlp_model_config(),
lstm=_get_lstm_model_config(),
seir_lstm=_get_seir_lstm_model_config(),
)
return config
def _get_mlp_model_config():
"""Returns MLP model config."""
config = config_dict.ConfigDict()
# Eventually this will probably stop being a model specific parameter
# and instead be on the final base class.
config.batch_generator_seed = 42
config.layer_sizes = (128, 128,)
config.input_window = 21
config.batch_size = 128
return config
def _get_lstm_model_config():
"""Returns LSTM model config."""
# Note currently this model only works if the model input features are
# constants (e.g. population), are trivially predictable (e.g. day of week) or
# can be built from targets (e.g. number of deceased).
config = config_dict.ConfigDict()
config.embedding_size = 32
config.hidden_size = 32
config.batch_size = 64
config.warm_up_steps = 14
config.batch_generator_seed = 42
config.training_sequence_length = 56
config.num_context_dates_evaluation = 28
return config
def _get_seir_lstm_model_config():
"""Returns LSTM model config."""
config = config_dict.ConfigDict()
config.lstm_embedding_size = 32
config.lstm_hidden_size = 32
config.condition_lstm_on_ode_state = True
config.min_exposed_fraction = 5e-6
# One of: ["rollout_features", "polynomial_fit_params"]
# Note `rollout_features` only works if the model input features are constants
# (e.g. population), are trivially predictable (e.g. day of week) or can be
# built from targets (e.g. number of deceased).
config.extrapolation_mode = "polynomial_fit_params"
config.param_extrapolation_context_steps = 21
config.param_extrapolation_poly_degree = 1
config.seir_parameters_init = dict(
S2E=0.8, # An LSTM will be used to modulate this.
E2I=0.2,
I2RD=0.5,
ifr=0.01,
)
return config
def _get_training_config():
"""Keyword arguments to `train_loop.train`."""
config = config_dict.ConfigDict()
config.training_steps = 10000
config.log_interval = 100
config.checkpoint_interval = 1000
return config
def _get_eval_config():
"""Keyword arguments to `eval_loop.evaluate`."""
config = config_dict.ConfigDict()
config.early_stop_metric_to_minimize = "eval_mean_squared_error"
return config
def _get_fine_tune_config(training_config):
"""Keyword arguments to `train_loop.fine_tune`."""
config = config_dict.ConfigDict()
config.fine_tune_steps = 4000
# By default reuse the same intervals than during training.
config.log_interval = training_config.get_ref("log_interval")
config.checkpoint_interval = training_config.get_ref("checkpoint_interval")
# By default tell the model during fine tuning that the global step
# is the last training step. Note models sometimes use this global_step for
# learning rate annealing, so in practice this will cause the fine tuning to
# happen with the final training learning rate.
config.global_step_for_model = training_config.get_ref("training_steps")
return config
def _get_checkpointer_config():
"""Keyword arguments to `checkpointing.Checkpointer`."""
config = config_dict.ConfigDict()
config.directory = "/tmp/training_example/"
config.max_to_keep = 2
return config
| dm_c19_modelling-main | modelling/training/base_config.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Evaluation loop."""
import time
from absl import logging
from dm_c19_modelling.evaluation import forecast_indexing
from dm_c19_modelling.modelling.training import log_writers
from dm_c19_modelling.modelling.training import train_loop
# Checkpoint names.
LATEST_TRAIN = train_loop.LATEST_TRAIN
LATEST_EVAL = "latest_eval"
BEST_EVAL = "best_eval"
def evaluate(dataset, next_forecast_targets, model, checkpointer, writers,
training_steps, early_stop_metric_to_minimize):
"""Main evaluation loop."""
logging.info("Start evaluation")
# Get the states.
latest_train_state = checkpointer.get_experiment_state(LATEST_TRAIN)
latest_eval_state = checkpointer.get_experiment_state(LATEST_EVAL)
best_eval_state = checkpointer.get_experiment_state(BEST_EVAL)
# Setup/restore the eval checkpoints, depending on whether `LATEST_EVAL`
# can be restored, since the existance of LATEST_EVAL guarantees the
# existence of BEST_EVAL, as the former is saved last.
# TODO(alvarosg): Consider having a single eval checkpoint, with two high
# level fields "best", "last", e.g., usages, would be:
# `train_loop.assign_state(eval_state.latest, latest_train_state)`
# `train_loop.assign_state(eval_state.best, eval_state.latest)`
# And we would only need a single:
# `checkpointer.restore(EVAL)` and `checkpointer.save(EVAL)`
if not checkpointer.can_be_restored(LATEST_EVAL):
latest_eval_state.checkpoint_path = None
best_eval_state.early_stop_metric_value = None
else:
checkpointer.restore(BEST_EVAL)
checkpointer.restore(LATEST_EVAL)
# Wait until there is a train checkpoint.
while True:
if checkpointer.can_be_restored(LATEST_TRAIN):
break
else:
logging.info("Train checkpoint not available, waiting.")
time.sleep(10)
while True:
checkpoint_path = checkpointer.restore_path(LATEST_TRAIN)
if checkpoint_path == latest_eval_state.checkpoint_path:
if latest_eval_state.global_step >= training_steps:
logging.info("Last checkpoint (iteration %d) evaluated, exiting loop.",
latest_eval_state.global_step)
break
else:
logging.info(
"Checkpoint %s already evaluated, waiting.", checkpoint_path)
time.sleep(10)
continue
# Will evaluate the latest train checkpoint available.
checkpointer.restore(LATEST_TRAIN)
predictions, _ = model.evaluate(latest_train_state.model_state, dataset)
# TODO(alvarosg): Add more eval metrics.
scalar_metrics = {
"eval_mean_squared_error": (
(next_forecast_targets - predictions) ** 2).mean(),
"step": latest_train_state.global_step,
}
# Log the eval metrics.
log_writers.multiple_write(
writers, latest_train_state.global_step, scalar_metrics)
# Store the eval metrics in the latest eval checkpoint.
train_loop.assign_state(latest_eval_state, latest_train_state)
latest_eval_state.checkpoint_path = checkpoint_path
latest_eval_state.early_stop_metric_value = scalar_metrics[
early_stop_metric_to_minimize]
# Update the best checkpoint if appropriate.
if (best_eval_state.early_stop_metric_value is None or
(latest_eval_state.early_stop_metric_value <
best_eval_state.early_stop_metric_value)):
if best_eval_state.early_stop_metric_value is None:
# Initializing best model:
logging.info("Initializing best model: %s = %g",
early_stop_metric_to_minimize,
latest_eval_state.early_stop_metric_value)
else:
logging.info("Updating best model: %s %g -> %g",
early_stop_metric_to_minimize,
best_eval_state.early_stop_metric_value,
latest_eval_state.early_stop_metric_value)
train_loop.assign_state(best_eval_state, latest_eval_state)
checkpointer.save(BEST_EVAL)
checkpointer.save(LATEST_EVAL)
def submit_final_forecast(dataset, model, checkpointer, forecast_name,
directory, dataset_name, checkpoint_name=BEST_EVAL):
"""Submits forecasts from the best checkpoint to the forecast index."""
state = checkpointer.get_experiment_state(checkpoint_name)
checkpoint_path = checkpointer.restore_path(checkpoint_name)
checkpointer.restore(checkpoint_name)
final_forecast, _ = model.evaluate(state.model_state, dataset)
model_description = {
"name": forecast_name,
"model_factory_kwargs": state.build_info[
"model_factory_kwargs"],
"checkpoint_path": checkpoint_path
}
extra_info = {
"first_training_date": dataset.dates[0]
}
predictions_df = forecast_indexing.build_predictions_df(
final_forecast, dataset.evaluation_dates, dataset.sites,
dataset.target_names)
# TODO(alvarosg): In case of cadence=1, submit weekly forecasts too, with
# `forecast_utils.pool_daily_forecasts_to_weekly`.
logging.info(
"Submitting final forecast with name '%s' for dataset with index '%s' "
"for checkpoint at %s.",
forecast_name, dataset.dataset_index_key, checkpoint_path)
logging.info("Model description:")
logging.info(model_description)
logging.info("Extra info:")
logging.info(extra_info)
if forecast_name is None:
logging.info("Empty forcast name, skipping submission.")
return
forecast_indexing.save_predictions_df(
predictions_df,
directory=directory,
last_observation_date=max(dataset.dates),
forecast_horizon=len(dataset.evaluation_dates),
model_description=model_description,
dataset_name=dataset_name,
dataset_index_key=dataset.dataset_index_key,
cadence=dataset.cadence,
features_used=list(dataset.feature_names),
extra_info=extra_info)
| dm_c19_modelling-main | modelling/training/eval_loop.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""DeepMind COVID-19 training."""
| dm_c19_modelling-main | modelling/training/__init__.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Dataset factory."""
import datetime
from typing import Sequence, Tuple
from dm_c19_modelling.evaluation import constants
from dm_c19_modelling.evaluation import dataset_factory
from dm_c19_modelling.modelling import definitions
import numpy as np
def get_training_dataset(
**evaluation_dataset_factory_kwargs) -> definitions.TrainingDataset:
"""Gets a dataset."""
training_dataset = definitions.TrainingDataset.from_dataset(
dataset_factory.get_dataset(**evaluation_dataset_factory_kwargs))
training_dataset = _add_site_id_feature(training_dataset)
return _add_day_of_week_feature(training_dataset)
def _add_day_of_week_feature(dataset: definitions.TrainingDataset):
"""Adds an integer day of the week feature to the data."""
integer_days_of_the_week = np.array([ # From 0 (Monday) to 6 (Sunday)
datetime.datetime.strptime(date, constants.DATE_FORMAT).weekday()
for date in dataset.dates])
# Broadcast from [num_dates] -> [num_dates, num_sites, 1]
integer_days_of_the_week = np.tile(
integer_days_of_the_week[:, None, None], [1, dataset.num_sites, 1])
return _append_features(
dataset, integer_days_of_the_week, [definitions.WEEK_DAY_INTEGER])
def _add_site_id_feature(dataset: definitions.TrainingDataset):
"""Adds an integer site id feature to the data."""
integer_site_ids = np.arange(dataset.num_sites)
# Broadcast from [num_sites] -> [num_dates, num_sites, 1]
integer_site_ids = np.tile(
integer_site_ids[None, :, None], [dataset.num_dates, 1, 1])
return _append_features(
dataset, integer_site_ids, [definitions.SITE_ID_INTEGER])
def _append_features(
dataset: definitions.TrainingDataset, new_features: np.ndarray,
feature_names: Sequence[str]):
updated_features = np.concatenate(
[dataset.features, new_features.astype(dataset.features.dtype)],
axis=-1)
updated_feature_names = np.concatenate(
[dataset.feature_names, feature_names], axis=0)
return dataset._replace(features=updated_features,
feature_names=updated_feature_names)
def remove_validation_dates(
dataset: definitions.TrainingDataset) -> Tuple[
definitions.TrainingDataset, np.ndarray]:
"""Generates training and eval datasets.
Args:
dataset: `definitions.TrainingDataset` to split.
Returns:
Tuple with:
dataset_without_validation_dates: `definitions.TrainingDataset` where
the last `num_forecast_dates` worth of data have been removed.
forecast_targets_validation: targets for the last `num_forecast_dates`
that have been removed.
"""
num_forecast_dates = len(dataset.evaluation_dates)
# We build something that looks like a dataset, but shifted by
# `num_forecast_dates`, into the past, and keeping the targets for the last
# `num_forecast_dates` for validation.
forecast_targets_validation = dataset.targets[-num_forecast_dates:]
dataset_without_validation_dates = dataset._replace(
targets=dataset.targets[:-num_forecast_dates],
features=dataset.features[:-num_forecast_dates],
dates=dataset.dates[:-num_forecast_dates],
# As we remove inputs, the index key would no longer be consistent.
dataset_index_key=None,
)
return dataset_without_validation_dates, forecast_targets_validation
| dm_c19_modelling-main | modelling/training/dataset_factory.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Training loop."""
import time
from absl import logging
from dm_c19_modelling.modelling.training import log_writers
# Checkpoint names.
LATEST_TRAIN = "latest_train"
LATEST_FINE_TUNE = "latest_fine_tune"
def train(dataset, model, build_info, checkpointer, writers, training_steps,
**train_loop_kwargs):
"""Main training loop."""
logging.info("Start training")
# Setup/restore the checkpoint.
state = checkpointer.get_experiment_state(LATEST_TRAIN)
if not checkpointer.can_be_restored(LATEST_TRAIN):
state.global_step = 0
state.model_state = None
state.build_info = build_info
else:
checkpointer.restore(LATEST_TRAIN)
batch_generator = model.build_training_generator(dataset)
_train_loop(model, batch_generator, training_steps, state,
checkpoint_save_fn=lambda: checkpointer.save(LATEST_TRAIN),
writers=writers, **train_loop_kwargs)
def fine_tune(dataset, model, checkpointer, writers, fine_tune_steps,
global_step_for_model, initial_checkpoint, **train_loop_kwargs):
"""Fine tuning training loop."""
logging.info("Start fine-tuning")
# Setup/restore the fine tune checkpoint.
state = checkpointer.get_experiment_state(LATEST_FINE_TUNE)
if not checkpointer.can_be_restored(LATEST_FINE_TUNE):
# If there is not one yet, we simply copy the initial one.
initial_state = checkpointer.get_experiment_state(initial_checkpoint)
checkpointer.restore(initial_checkpoint)
assign_state(state, initial_state)
state.global_step = 0
else:
checkpointer.restore(LATEST_FINE_TUNE)
batch_generator = model.build_training_generator(dataset)
_train_loop(model, batch_generator, fine_tune_steps, state,
override_global_step_for_model=global_step_for_model,
checkpoint_save_fn=lambda: checkpointer.save(LATEST_FINE_TUNE),
writers=writers, **train_loop_kwargs)
def _train_loop(model, batch_generator, training_steps, state,
log_interval, writers, checkpoint_interval, checkpoint_save_fn,
override_global_step_for_model=None):
"""Training loop, updating the model state at each iteration."""
logging.info("Entering training loop")
prev_timestamp = None
while state.global_step < training_steps:
batch = next(batch_generator)
if override_global_step_for_model is not None:
global_step_for_model = override_global_step_for_model
else:
global_step_for_model = state.global_step
state.model_state, scalar_outputs = model.training_update(
state.model_state, batch, global_step_for_model)
# Log scalars still using the pre-update global step, as the losses
# etc. here would correspond to metrics before the model is updated.
scalar_outputs["step"] = state.global_step
# Increase the global step before calling the saving model callbacks, so
# the state of the saved model has the correct global step, e.g.
# 1 to indicate that model has been trained for 1 iterations.
state.global_step += 1
# Write to the loggers periodically.
if state.global_step % log_interval == 0:
# Compute steps per second.
new_timestamp = time.time()
if prev_timestamp is None:
scalar_outputs["steps_per_sec"] = float("nan")
else:
scalar_outputs["steps_per_sec"] = log_interval / (
new_timestamp - prev_timestamp)
log_writers.multiple_write(writers, state.global_step, scalar_outputs)
prev_timestamp = new_timestamp
# Checkpointing periodically (should always happens last in the loop).
if state.global_step % checkpoint_interval == 0:
checkpoint_save_fn()
logging.info("Storing checkpoint at end of loop")
checkpoint_save_fn()
def assign_state(state_dst, state_src):
for k, v in state_src.items():
setattr(state_dst, k, v)
| dm_c19_modelling-main | modelling/training/train_loop.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
r"""Executable to submit a forecast for the checkpoint of a pretrained model.
Usage:
```
submit_forecast.py --checkpoint_path=/tmp/checkpoints/latest_fine_tune \
--forecast_name="tmp_model_forecast"
```
"""
from absl import app
from absl import flags
from dm_c19_modelling.modelling.training import checkpointing
from dm_c19_modelling.modelling.training import dataset_factory
from dm_c19_modelling.modelling.training import eval_loop
from dm_c19_modelling.modelling.training import model_factory
from dm_c19_modelling.modelling.training import train_loop
FLAGS = flags.FLAGS
flags.DEFINE_string(
"checkpoint_path", None, "Path to the checkpoint. E.g.. ")
flags.DEFINE_enum(
"checkpoint_name",
train_loop.LATEST_FINE_TUNE,
[train_loop.LATEST_TRAIN,
eval_loop.BEST_EVAL,
train_loop.LATEST_FINE_TUNE],
"Checkpoint name. By default, the fined tuned checkpoint.")
flags.DEFINE_string(
"forecast_name", None,
"Forecast name to use for storing predictions in the forecast index.")
def main(argv):
del argv
checkpointer = checkpointing.Checkpointer(
directory=FLAGS.checkpoint_path, max_to_keep=2)
state = checkpointer.get_experiment_state(FLAGS.checkpoint_name)
checkpointer.restore(FLAGS.checkpoint_name)
# Get dataset.
dataset = dataset_factory.get_training_dataset(
**state.build_info["dataset_factory_kwargs"])
# Get model.
# Note that code for the model must not have changed since training.
model = model_factory.get_model(
state.build_info["dataset_spec"],
**state.build_info["model_factory_kwargs"])
eval_loop.submit_final_forecast(
dataset, model, checkpointer,
forecast_name=FLAGS.forecast_name,
directory=state.build_info["dataset_factory_kwargs"]["directory"],
dataset_name=state.build_info["dataset_factory_kwargs"]["dataset_name"],
checkpoint_name=FLAGS.checkpoint_name)
if __name__ == "__main__":
app.run(main)
| dm_c19_modelling-main | modelling/training/submit_forecast.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Model factory."""
from dm_c19_modelling.modelling.models import lstm
from dm_c19_modelling.modelling.models import mlp
from dm_c19_modelling.modelling.models import seir_lstm
_MODEL_MAPPING = {
"mlp": mlp.MLPModel,
"lstm": lstm.LSTM,
"seir_lstm": seir_lstm.SEIRLSTM,
}
def get_model(dataset_spec, model_name, model_specific_kwargs, **kwargs):
"""Model factory."""
# This factory is called by the training script via
# `get_model(dataset_spec, **config.model)`
# This is a very specific implementation of a factory that is setup to build
# different models based on a specific config structure containing
# `model_name` and `model_specific_kwargs`. Users implementing new models may
# fully adjust this factory to use the model config in a different way.
# Note the `dataset_spec` passed here includes fields such as statistics of
# the data used for normalization. If reloading a pretrained model care should
# be taken to pass the same `dataset_spec` as that used at training, when
# building the model.
model_kwargs = dict(dataset_spec=dataset_spec, **kwargs)
# Add model specific options.
model_kwargs.update(model_specific_kwargs[model_name])
# Instantiate the correct class.
model_class = _MODEL_MAPPING[model_name]
return model_class(**model_kwargs)
| dm_c19_modelling-main | modelling/training/model_factory.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities."""
import abc
from typing import Any, Dict, Text
from absl import logging
import tree
class BaseWriter(metaclass=abc.ABCMeta):
"""Writer interface for experiment data."""
def write_scalars(self, global_step, scalars):
"""Writes the scalars returned by experiment's step()."""
class ConsoleWriter(BaseWriter):
"""Writes training data to the log."""
def __init__(self, name):
self._name = name
def write_scalars(self, global_step: int, scalars: Dict[Text, Any]):
logging.info('%s: global_step: %d, %s',
self._name, global_step,
tree.map_structure(str, scalars))
def multiple_write(writers, global_step, scalars):
for writer in writers:
writer.write_scalars(global_step, scalars)
| dm_c19_modelling-main | modelling/training/log_writers.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Checkpointing utilities."""
import os
import pickle
from absl import logging
import tensorflow as tf
class _PyWrapper(tf.train.experimental.PythonState):
"""Wraps a Python object for storage in an object-based checkpoint."""
def __init__(self, obj):
"""Specify an object to wrap.
Args:
obj: The object to save and restore (may be overwritten).
"""
self._obj = obj
@property
def object(self):
return self._obj
def serialize(self):
"""Callback to serialize the object."""
return pickle.dumps(self._obj)
def deserialize(self, string_value):
"""Callback to deserialize the array."""
self._obj = pickle.loads(string_value)
class _CheckpointState:
"""tf.Train.Checkpoint wrapper ensuring all fields are checkpointable."""
def __init__(self):
super().__setattr__('_checkpoint',
tf.train.Checkpoint(python_state=_PyWrapper({})))
@property
def checkpoint(self):
return self._checkpoint
def __setattr__(self, name, value):
self._checkpoint.python_state.object[name] = value
def __getattr__(self, name):
return self._checkpoint.python_state.object[name]
def keys(self):
return self._checkpoint.python_state.object.keys()
def items(self):
return self._checkpoint.python_state.object.items()
class Checkpointer:
"""Checkpoints python state using tf.train.Checkpoint."""
def __init__(self, directory, max_to_keep, restore_path=None):
self._directory = directory
self._max_to_keep = max_to_keep
self._first_restore_path = restore_path
self._experiment_states = {}
self._checkpoints = {}
logging.info('Storing checkpoint at: %s', directory)
def _internal_restore_path(self, checkpoint_name):
"""Returns a path to the checkpoint used for restore, or None."""
# If we have a checkpoint we own, return that.
restore_path = self.restore_path(checkpoint_name)
# Otherwise, check the read-only restore path.
if restore_path is None and self._first_restore_path is not None:
# We use the checkpoint metadata (state) to check whether the
# checkpoint we want actually exists.
# First restore path can be a directory or a specific checkpoint.
chk_state = tf.train.get_checkpoint_state(self._first_restore_path)
if chk_state is not None:
# The restore path is a directory, get the latest checkpoint from there.
restore_path = chk_state.model_checkpoint_path
else:
# Try with the the parent directory.
chk_state = tf.train.get_checkpoint_state(
os.path.dirname(self._first_restore_path))
if chk_state is not None and (
self._first_restore_path in chk_state.all_model_checkpoint_paths):
restore_path = self._first_restore_path
else:
restore_path = None
return restore_path
def get_experiment_state(self, checkpoint_name):
"""Returns the experiment state."""
if checkpoint_name not in self._experiment_states:
assert checkpoint_name not in self._checkpoints
state = _CheckpointState()
self._experiment_states[checkpoint_name] = state
self._checkpoints[checkpoint_name] = tf.train.CheckpointManager(
state.checkpoint,
os.path.join(self._directory, checkpoint_name),
self._max_to_keep,
checkpoint_name=checkpoint_name)
return self._experiment_states[checkpoint_name]
def can_be_restored(self, checkpoint_name):
"""Returns True if the checkpoint with the given name can be restored."""
return self._internal_restore_path(checkpoint_name) is not None
def restore(self, checkpoint_name):
"""Restores checkpoint state."""
save_path = self._internal_restore_path(checkpoint_name)
assert save_path is not None
checkpoint_manager = self._checkpoints[checkpoint_name]
checkpoint_manager.checkpoint.restore(save_path).assert_consumed()
logging.info('Restored checkpoint from: %s', save_path)
def restore_or_save(self, checkpoint_name):
if self.can_be_restored(checkpoint_name):
self.restore(checkpoint_name)
else:
self.save(checkpoint_name)
def save(self, checkpoint_name):
"""Saves the state to file."""
self._checkpoints[checkpoint_name].save()
self._first_restore_path = None
logging.info('Saved checkpoint at: %s', self.restore_path(checkpoint_name))
def restore_path(self, checkpoint_name):
"""Returns the restore path for this checkpoint."""
# Returns None if we didn't create any checkpoint yet.
chk_state = tf.train.get_checkpoint_state(
self._checkpoints[checkpoint_name].directory)
return None if chk_state is None else chk_state.model_checkpoint_path
| dm_c19_modelling-main | modelling/training/checkpointing.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for checkpointing.py."""
import os
import tempfile
from absl.testing import absltest
from dm_c19_modelling.modelling.training import checkpointing
import numpy as np
class CheckpointTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._checkpoint_dir = tempfile.TemporaryDirectory().name
def test_python_state(self):
chk = checkpointing.Checkpointer(self._checkpoint_dir, max_to_keep=1)
state = chk.get_experiment_state('checkpoint1')
state.int = 10
state.float = 12.34
state.string = 'test'
state.numpy = np.array([1, 2, 3, 4])
# Save the checkpooint.
self.assertFalse(chk.can_be_restored('checkpoint1'))
chk.restore_or_save('checkpoint1')
self.assertTrue(chk.can_be_restored('checkpoint1'))
# Restore the checkpoint.
chk2 = checkpointing.Checkpointer(self._checkpoint_dir, max_to_keep=1)
state2 = chk2.get_experiment_state('checkpoint1')
self.assertTrue(chk2.can_be_restored('checkpoint1'))
chk2.restore_or_save('checkpoint1')
self.assertEqual(state.int, state2.int)
self.assertEqual(state.float, state2.float)
self.assertEqual(state.string, state2.string)
np.testing.assert_array_equal(state.numpy, state2.numpy)
def test_restore_path(self):
chk1 = checkpointing.Checkpointer(
self._checkpoint_dir,
max_to_keep=1,
restore_path=os.path.join(self._checkpoint_dir, 'bad_path'))
state1 = chk1.get_experiment_state('state1')
state1.counter = np.random.randint(100)
self.assertFalse(chk1.can_be_restored('state1'))
chk1.save('state1')
self.assertTrue(chk1.can_be_restored('state1'))
chk2 = checkpointing.Checkpointer(
self._checkpoint_dir,
max_to_keep=1,
restore_path=os.path.join(self._checkpoint_dir, 'state1'))
state2 = chk2.get_experiment_state('state2')
self.assertTrue(chk2.can_be_restored('state2'))
# First restore will override the state with the values from the checkpoint.
state2.counter = state1.counter + 1
chk2.restore('state2')
self.assertEqual(state1.counter, state2.counter)
# After we save and restore, the original values are lost.
state2.counter = state1.counter + 1
chk2.save('state2')
chk3 = checkpointing.Checkpointer(
self._checkpoint_dir,
max_to_keep=1,
restore_path=chk1.restore_path('state1'))
# The restore path will be ignored because we have a checkpoint for state2
# in our main checkpoint directory.
state3 = chk3.get_experiment_state('state2')
chk3.restore('state2')
self.assertEqual(state3.counter, state1.counter + 1)
def test_restore_path_update(self):
chk1 = checkpointing.Checkpointer(self._checkpoint_dir, max_to_keep=1)
state1 = chk1.get_experiment_state('latest')
state1.counter = np.random.randint(100)
self.assertIsNone(chk1.restore_path('latest'))
chk2 = checkpointing.Checkpointer(self._checkpoint_dir, max_to_keep=1)
state2 = chk2.get_experiment_state('latest')
self.assertIsNone(chk2.restore_path('latest'))
self.assertFalse(chk2.can_be_restored('latest'))
state1.counter += 1
chk1.save('latest')
restore_path = chk2.restore_path('latest')
self.assertIsNotNone(restore_path)
self.assertTrue(chk2.can_be_restored('latest'))
state1.counter += 1
chk1.save('latest')
new_restore_path = chk2.restore_path('latest')
self.assertNotEqual(restore_path, new_restore_path)
chk2.restore('latest')
self.assertEqual(state2.counter, state1.counter)
if __name__ == '__main__':
absltest.main()
| dm_c19_modelling-main | modelling/training/tests/checkpointing_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for `runner.py`."""
import datetime
import tempfile
from unittest import mock
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
from dm_c19_modelling.evaluation import constants
from dm_c19_modelling.evaluation import dataset_factory
from dm_c19_modelling.modelling.training import base_config
from dm_c19_modelling.modelling.training import runner
import numpy as np
_SEED = 42
_MODEL_NAMES = base_config.get_config("").model.model_specific_kwargs.keys()
FLAGS = flags.FLAGS
def get_mock_dataset(model_name):
num_training_dates = 100
num_forecast_dates = 7
num_sites = 20
dates = np.array([
(datetime.datetime(year=2020, month=1, day=1) + datetime.timedelta(days=i)
).strftime(constants.DATE_FORMAT)
for i in range(num_training_dates + num_forecast_dates)])
training_dates = dates[:num_training_dates]
evaluation_dates = dates[num_training_dates:]
sites = np.array([f"site_{i}" for i in range(num_sites)])
if model_name == "lstm":
# LSTM requires the features to either be constant, or be targets.
feature_names = np.array(["population", "target_2", "target_1"])
num_features = 3
else:
num_features = 6
feature_names = np.array([f"feature_{i}" for i in range(num_features - 1)] +
["population"])
if model_name == "seir_lstm":
# This model is only compatible with this target specifically.
target_names = np.array([constants.DECEASED_NEW])
num_targets = 1
else:
num_targets = 20
target_names = np.array([f"target_{i}" for i in range(num_targets)])
rand = np.random.RandomState(_SEED)
training_features = rand.normal(
size=[num_training_dates, num_sites, num_features])
training_targets = rand.normal(
size=[num_training_dates, num_sites, num_targets])
evaluation_targets = rand.normal(
size=[num_forecast_dates, num_sites, num_targets])
sum_past_targets = rand.normal(size=[num_sites, num_targets])
return dataset_factory.Dataset(
training_targets=training_targets,
evaluation_targets=evaluation_targets,
training_features=training_features,
sum_past_targets=sum_past_targets,
feature_names=feature_names,
target_names=target_names,
training_dates=training_dates,
evaluation_dates=evaluation_dates,
sites=sites,
dataset_index_key="dummy_key",
cadence=1,
)
class RunnerTest(parameterized.TestCase):
def setUp(self):
super().setUp()
# Need to force parsing FLAGS,
FLAGS(["runner_test.py"])
@parameterized.parameters(
*({"model_name": model_name} for model_name in _MODEL_NAMES))
def test_smoke_test(self, model_name):
config = base_config.get_config("")
config.model.model_name = model_name
with tempfile.TemporaryDirectory() as tmp_dir:
config.checkpointer.directory = tmp_dir
config.training.log_interval = 10
config.training.checkpoint_interval = 40
config.training.training_steps = 200
config.fine_tune.fine_tune_steps = 100
FLAGS.config = config
with mock.patch.object(
dataset_factory, "get_dataset",
return_value=get_mock_dataset(model_name)):
FLAGS.mode = "train"
runner.main(None)
FLAGS.mode = "eval"
runner.main(None)
if __name__ == "__main__":
absltest.main()
| dm_c19_modelling-main | modelling/training/tests/runner_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""DeepMind COVID-19 models."""
| dm_c19_modelling-main | modelling/models/__init__.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MLP model."""
import typing
from typing import Any, Dict, Generator, Sequence, Tuple
from dm_c19_modelling.modelling.models import base_jax
from dm_c19_modelling.modelling.models import utils
import haiku as hk
import jax.numpy as jnp
import numpy as np
if typing.TYPE_CHECKING:
from dm_c19_modelling.modelling.definitions import TrainingDataset
class MLPModel(base_jax.LossMinimizerHaikuModel):
"""MLP model."""
def __init__(self, layer_sizes: Sequence[int], input_window: int,
batch_size: int, batch_generator_seed: int,
**parent_kwargs): # pylint: disable=g-doc-args
"""Constructor."""
super().__init__(**parent_kwargs)
self._batch_generator_seed = batch_generator_seed
self._layer_sizes = layer_sizes
self._output_feature_size = len(self._dataset_spec.target_names)
self._input_window = input_window
self._batch_size = batch_size
def _build_training_generator(
self, dataset: "TrainingDataset"
) -> Generator[Tuple[np.ndarray, np.ndarray], None, None]:
rand = np.random.RandomState(seed=self._batch_generator_seed)
batch_gen = utils.build_training_generator(
rand,
dataset,
batch_size=self._batch_size,
window=self._input_window + self._dataset_spec.num_forecast_dates)
while True:
features, targets = next(batch_gen)
yield features[:, :self._input_window], targets[:, self._input_window:]
def _build_network(self) -> hk.Sequential:
"""Builds network."""
initial_reshape = hk.Reshape(output_shape=(-1,))
mlp = hk.nets.MLP(self._layer_sizes, activate_final=True)
output_layer = hk.Linear(self._dataset_spec.num_forecast_dates *
self._output_feature_size)
final_reshape = hk.Reshape(
output_shape=(self._dataset_spec.num_forecast_dates,
self._output_feature_size))
sequential = hk.Sequential(
[self._prepare_features, initial_reshape, mlp, output_layer,
final_reshape])
return sequential
def _loss_fn(
self, batch: Tuple[np.ndarray,
np.ndarray]) -> Tuple[np.ndarray, Dict[str, Any]]:
"""Computes loss."""
inputs, targets = batch
network = self._build_network()
normalized_predictions = network(inputs)
# Build loss in normalize space.
normalized_targets = utils.normalize(targets,
self._dataset_spec.target_stats)
l2_error = (normalized_predictions - normalized_targets)**2
loss = jnp.mean(l2_error)
scalars_dict = {"std": jnp.std(l2_error)} # Customize this for logging.
return loss, scalars_dict
def _prepare_predict_fn_inputs(self, dataset):
# Returns the features for the last input window.
return dataset.features[-self._input_window:]
def _predict_fn(self, inputs: np.ndarray) -> Tuple[np.ndarray, Any]:
"""Makes a prediction using the inputs."""
# [num_dates, num_sites, ...] -> [num_sites, num_dates, ...]
inputs = jnp.swapaxes(inputs, 0, 1)
network = self._build_network()
normalized_predictions = network(inputs)
# Denormalize the output of the network.
predictions = utils.denormalize(normalized_predictions,
self._dataset_spec.target_stats)
# [num_sites, num_dates, ...] -> [num_dates, num_sites, ...]
aux_data = {}
return jnp.swapaxes(predictions, 0, 1), aux_data
| dm_c19_modelling-main | modelling/models/mlp.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for models to use."""
from typing import Generator, Mapping, Sequence, Tuple
from dm_c19_modelling.modelling import definitions
import jax
import jax.numpy as jnp
import numpy as np
import optax
def _get_date_slice(date_indices: np.ndarray, window: int) -> np.ndarray:
date_slice = date_indices[:, None] + np.arange(window)[None]
assert date_slice.shape == (date_indices.size,
window), "Wrong date slice shape"
return date_slice
def _get_site_slice(site_indices: np.ndarray, window: int) -> np.ndarray:
site_slice = np.repeat(site_indices[:, None], window, axis=1)
assert site_slice.shape == (site_indices.size,
window), "Wrong site slice shape"
return site_slice
def _get_sequences(dataset: definitions.TrainingDataset,
date_indices: np.ndarray,
site_indices: np.ndarray,
window: int) -> Tuple[np.ndarray, np.ndarray]:
date_slice = _get_date_slice(date_indices, window) # [batch_size x window]
site_slice = _get_site_slice(site_indices, window) # [batch_size x window]
inputs = dataset.features[date_slice, site_slice]
targets = dataset.targets[date_slice, site_slice]
return inputs, targets
def build_training_generator(
rand: np.random.RandomState, dataset: definitions.TrainingDataset,
batch_size: int, window: int
) -> Generator[Tuple[np.ndarray, np.ndarray], None, None]:
"""Yields batches of [inputs, targets] sequences for a dataset."""
# date_range is the set of dates to predict targets from. Since predictions
# require a window of input observations, and predict a window of targets,
# the valid dates start at the end of the earliest input window and end before
# the latest target window.
date_range = np.arange(dataset.num_dates - window + 1)
# site_range is the set of sites to predict over.
site_range = np.arange(dataset.num_sites)
while True:
date_indices = rand.choice(date_range, size=batch_size, replace=True)
site_indices = rand.choice(site_range, size=batch_size, replace=True)
yield _get_sequences(dataset, date_indices, site_indices, window)
def prepare_features(features: np.ndarray, feature_names: Sequence[str],
feature_stats: definitions.Stats,
categorical_features_dict: Mapping[str, int],
features_with_missing_values: Sequence[str],
dtype=np.float32) -> np.ndarray:
"""Prepares features for the input of a neural network.
Transforms categorical features into one-hots, and normalizes non-categorical
features.
Args:
features: Array of shape `leading_shape` + [num_features].
feature_names: sequence of length `num_features`, containing the names of
the features.
feature_stats: statistics of the input features.
categorical_features_dict: Dictionary mapping feature_name -> num_categories
indicating which features are categorical. Categorical features will
be prepared as one-hot's.
features_with_missing_values: List of feature names indicating which
features have missing values.
dtype: Type of the prepared features.
Returns:
Array of shape `leading_shape` + [num_prepared_features]
"""
for name in categorical_features_dict.keys():
if name not in feature_names:
raise ValueError(
f"Unrecognized categorical feature '{name}', should be one "
f"of {feature_names}")
# TODO(alvaro): Maybe avoid python loop to make it more efficient.
normalized_features = normalize(features, feature_stats)
prepared_features = []
for feature_index, name in enumerate(feature_names):
if name in categorical_features_dict:
num_categories = categorical_features_dict[name]
feature = features[..., feature_index]
prepared_feature = jax.nn.one_hot(
feature.astype(np.int32), num_categories, axis=-1)
else:
prepared_feature = normalized_features[..., feature_index][..., None]
if name in features_with_missing_values:
prepared_feature, missingness_mask = _remove_nans_and_get_mask(
prepared_feature)
prepared_features.append(missingness_mask.astype(dtype))
prepared_features.append(prepared_feature.astype(dtype))
if not prepared_features:
raise ValueError("No features available.")
return jnp.concatenate(prepared_features, axis=-1)
def _get_safe_std(std, threshold=1e-8):
safe_std = np.array(std)
mask = np.isclose(std, 0, atol=threshold)
safe_std[mask] = 1.0
return safe_std
def normalize(features: np.ndarray, stats: definitions.Stats) -> np.ndarray:
return (features - stats.mean) / _get_safe_std(stats.std)
def denormalize(normalized_features: np.ndarray,
stats: definitions.Stats) -> np.ndarray:
return normalized_features * _get_safe_std(stats.std) + stats.mean
def _remove_nans_and_get_mask(features: np.ndarray) -> Tuple[
np.ndarray, np.ndarray]:
"""Replaces NaNs in features with 0s and adds mask to indicate missingness."""
nan_feature_locations = jnp.isnan(features)
mask = jnp.where(nan_feature_locations, 1., 0.)
features = jnp.where(nan_feature_locations, 0., features)
return features, mask
DEFAULT_CONSTANT_FEATURE_NAMES = (definitions.SITE_ID_INTEGER,
definitions.POPULATION)
def rollout_features_with_predictions(
features: np.ndarray, next_steps_targets: np.ndarray,
feature_names: Sequence[str], target_names: Sequence[str],
cadence: int, constant_features: Sequence[str] = ()):
"""Augments input features with target predictions for next steps.
Args:
features: array of shape [num_dates, num_sites, num_features]
next_steps_targets: [num_future_dates, num_sites, num_targets]
feature_names: names of the input features.
target_names: names of the target features.
cadence: cadence for each step in days.
constant_features: features that can be simply rollout as constants.
Returns:
Array of shape `[num_dates + num_future_dates, num_sites, num_features]`
constructed by concatenating the features, with an augmented version of the
targets that includes constant features, and features that can be trivially
rolled-out (e.g. day of the week).
"""
constant_features = tuple(constant_features) + tuple(
DEFAULT_CONSTANT_FEATURE_NAMES)
assert len(feature_names) == features.shape[-1]
assert len(target_names) == next_steps_targets.shape[-1]
# Verify that we have enough information to do the rollout.
missing_features = (
set(feature_names) -
set(list(target_names) +
list(constant_features) +
[definitions.WEEK_DAY_INTEGER]))
if missing_features:
raise ValueError(f"Cannot rollout using features {missing_features} "
"which are not constant, neither targets.")
num_future_dates = next_steps_targets.shape[0]
rollout_features = []
for feature_index, name in enumerate(feature_names):
if name == definitions.WEEK_DAY_INTEGER:
# Take the last weekday and increment it for each future step.
last_weekday = features[-1, ..., feature_index].astype(np.int32)
future_day_index = np.arange(1, num_future_dates + 1) * cadence
future_weekdays = future_day_index[:, None] + last_weekday[None, :]
future_weekdays = jnp.mod(future_weekdays, 7)
rollout_feature = future_weekdays.astype(features.dtype)
elif name in target_names:
# Copy the targets.
rollout_feature = next_steps_targets[..., list(target_names).index(name)]
elif name in constant_features:
# Copy features from the last day and broadcast to all dates.
last_features = features[-1, ..., feature_index]
rollout_feature = jnp.tile(
last_features[None], [num_future_dates, 1])
else:
# This should never happen, regardless of the inputs, since we already
# have a check for missing features before the loop.
raise ValueError(f"Cannot rollout feature {name} which is not constant"
"or a target.")
rollout_features.append(rollout_feature)
rollout_features = jnp.stack(rollout_features, axis=-1)
return jnp.concatenate([features, rollout_features], axis=0)
# TODO(alvarosg): Consider removing this (which would simplify the get_optimizer
# method, as if we do not support annelaing, there is no reason for optimizers
# to return auxiliary outputs).
def exponential_annealing(step, start_value, end_value, decay_rate,
num_steps_decay_rate):
"""Bridges the gap between start_value and end_value exponentially."""
progress = decay_rate**(step / num_steps_decay_rate)
return end_value + (start_value - end_value) * progress
# TODO(alvarosg): Decide if we want to use enums, and use them throughout.
_ANNELING_FNS_MAP = {
"exponential": exponential_annealing,
}
def get_annealing(global_step, name, **kwargs):
return _ANNELING_FNS_MAP[name](global_step, **kwargs)
def get_optimizer(name, **kwargs):
"""Returns init_fn, update_fn, aux_outputs for an `optax` optimizer."""
# We will return the optimizer params, so we can monitor things like
# annealing of parameters.
aux_outputs = {name + "_" + k: v for k, v in kwargs.items()}
return getattr(optax, name)(**kwargs), aux_outputs
def get_optimizer_with_learning_rate_annealing(
global_step, optimizer_kwargs, annealing_kwargs):
learning_rate = get_annealing(global_step, **annealing_kwargs)
optimizer_kwargs = dict(learning_rate=learning_rate, **optimizer_kwargs)
return get_optimizer(**optimizer_kwargs)
def get_optimizer_params_update_step(loss_fn, optimizer_fn):
"""Returns a jittable fn to update model parameters.
Args:
loss_fn: Function that returns the scalar loss with signature:
loss_fn(trainable_params, non_trainable_state, rng, data) ->
(scalar_loss, non_trainable_state, aux_outputs)
optimizer_fn: Function that returns an `optax` optimizer with signature:
optimizer_fn(global_step) -> (optax_optimizer, aux_outputs)
Returns:
Function with signature:
update_fn(global_step, optimizer_state, trainable_params,
non_trainable_state, rng, data) ->
(updated_optimizer_state, updated_params, loss,
aux_loss_outputs, aux_optimizer_outputs)
"""
def update_fn(global_step, optimizer_state, trainable_params,
non_trainable_state, rng, data):
# `loss_fn` returns (scalar_loss, non_trainable_state, aux_outputs)
# but `jax.value_and_grad(loss_fn, has_aux=True)` requires the output to
# be (scalar_loss, rest). So we apply a small transform to pack it as:
# (scalar_loss, (non_trainable_state, aux_outputs))
def loss_fn_with_aux(*args, **kwargs):
scalar_loss, non_trainable_state, aux_outputs = loss_fn(*args, **kwargs)
return (scalar_loss, (non_trainable_state, aux_outputs))
# Compute the loss and gradients.
(loss, (updated_non_trainable_state, aux_loss_outputs)
), grads = jax.value_and_grad(loss_fn_with_aux, has_aux=True)(
trainable_params, non_trainable_state, rng, data)
# Get the optimizer fn.
(_, opt_update_fn), aux_optimizer_outputs = optimizer_fn(global_step)
# Compute the update params and optimizer state.
updates, updated_optimizer_state = opt_update_fn(grads, optimizer_state)
updated_params = optax.apply_updates(trainable_params, updates)
return (updated_optimizer_state, updated_params,
updated_non_trainable_state, loss,
aux_loss_outputs, aux_optimizer_outputs)
return update_fn
| dm_c19_modelling-main | modelling/models/utils.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""LSTM model."""
import typing
from typing import Any, Dict, Generator, Tuple
from dm_c19_modelling.modelling.models import base_jax
from dm_c19_modelling.modelling.models import utils
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
if typing.TYPE_CHECKING:
from dm_c19_modelling.modelling.definitions import TrainingDataset
class LSTM(base_jax.LossMinimizerHaikuModel):
"""LSTM model.
It makes predictions into the future by feeding its own predictions as inputs
for the next step.
"""
def __init__(self,
embedding_size: int,
hidden_size: int,
batch_size: int,
warm_up_steps: int,
batch_generator_seed: int,
training_sequence_length: int,
num_context_dates_evaluation: int,
**parent_kwargs): # pylint: disable=g-doc-args
"""Constructor.
Args:
embedding_size: Size of linear embedding of the features.
hidden_size: Size of the LSTM.
batch_size: Batch size.
warm_up_steps: Initial number of leading steps for which not loss will
be computed (to allow the model to look at a few steps of data and
build up the state before making predictions).
batch_generator_seed: Seed for the batch generator.
training_sequence_length: Length of the training sequences.
num_context_dates_evaluation: Number of steps used to warm up the state
before making predictions during evaluation.
**parent_kwargs: Attributes for the parent class.
"""
super().__init__(**parent_kwargs)
self._embedding_size = embedding_size
self._hidden_size = hidden_size
self._batch_generator_seed = batch_generator_seed
self._warm_up_steps = warm_up_steps
self._output_feature_size = len(self._dataset_spec.target_names)
self._batch_size = batch_size
self._num_context_dates_evaluation = num_context_dates_evaluation
self._training_sequence_length = training_sequence_length
def _build_training_generator(
self, dataset: "TrainingDataset"
) -> Generator[Tuple[np.ndarray, np.ndarray], None, None]:
rand = np.random.RandomState(seed=self._batch_generator_seed)
# We will generate subsequences for teacher forcing, with inputs and targets
# shifted by 1 date.
batch_gen = utils.build_training_generator(
rand,
dataset,
batch_size=self._batch_size,
window=self._training_sequence_length + 1)
while True:
features, targets = next(batch_gen)
# Leading shape [training_sequence_length, batch_size]
yield (jnp.swapaxes(features[:, :-1], 0, 1),
jnp.swapaxes(targets[:, 1:], 0, 1))
def _build_network(self) -> hk.DeepRNN:
"""Builds network."""
return hk.DeepRNN([
self._prepare_features,
hk.Linear(self._embedding_size),
jax.nn.relu,
hk.LSTM(self._hidden_size),
hk.Linear(self._output_feature_size),
])
def _loss_fn(
self, batch: Tuple[np.ndarray,
np.ndarray]) -> Tuple[np.ndarray, Dict[str, Any]]:
"""Computes loss."""
inputs, targets = batch
rnn = self._build_network()
initial_state = rnn.initial_state(self._batch_size)
normalized_predictions, _ = hk.dynamic_unroll(rnn, inputs, initial_state)
# Build loss in normalize space.
normalized_targets = utils.normalize(targets,
self._dataset_spec.target_stats)
l2_error = (normalized_predictions - normalized_targets)**2
# Ignore loss for the first steps, until the state is built up.
l2_error = l2_error[self._warm_up_steps:]
loss = jnp.mean(l2_error)
scalars_dict = {}
return loss, scalars_dict
def _prepare_predict_fn_inputs(self, dataset):
# Returns the features.
return dataset.features
def _predict_fn(self, inputs: np.ndarray) -> Tuple[np.ndarray, Any]:
"""Makes a prediction using the inputs."""
rnn = self._build_network()
initial_state = rnn.initial_state(inputs.shape[1])
# Build up the state using teacher forcing for
# `self._num_context_dates_evaluation` steps.
inputs = inputs[-self._num_context_dates_evaluation:]
normalized_predictions_teacher_forcing, rnn_state = hk.dynamic_unroll(
rnn, inputs, initial_state)
# Everything but the last step, corresponds to one-step predictions for the
# inputs dates.
predictions_for_inputs = utils.denormalize(
normalized_predictions_teacher_forcing[:-1],
self._dataset_spec.target_stats)
# Initialize the prediction for the evaluation dates to zeros.
normalized_predictions = jnp.zeros([
self._dataset_spec.num_forecast_dates,
inputs.shape[1],
self._output_feature_size,], dtype=inputs.dtype)
# Use last prediction from the teacher forcing phase, which will be the
# first prediction for the evaluation dates.
normalized_predictions = jax.lax.dynamic_update_index_in_dim(
normalized_predictions, normalized_predictions_teacher_forcing[-1],
index=0, axis=0)
# Rollout the model for `self._dataset_spec.num_forecast_dates - 1` steps.
def body(x):
rnn_state, prev_features, normalized_predictions, step = x
# Build input features for this step using the features for the last
# step, and the normalized predictions for the last step.
features = utils.rollout_features_with_predictions(
prev_features[None], # Add a time axis.
utils.denormalize(
normalized_predictions[step],
self._dataset_spec.target_stats)[None], # Add a time axis.
self._dataset_spec.feature_names,
self._dataset_spec.target_names,
self._dataset_spec.cadence,
)[-1] # Remove time axis.
# Run the model and update the corresponding slice.
normalized_predictions_step, updated_rnn_state = rnn(
features, rnn_state)
normalized_predictions = jax.lax.dynamic_update_index_in_dim(
normalized_predictions, normalized_predictions_step,
index=step+1, axis=0)
return updated_rnn_state, features, normalized_predictions, step + 1
init_values_loop = rnn_state, inputs[-1], normalized_predictions, 0
_, _, normalized_predictions, _ = jax.lax.while_loop(
lambda x: x[-1] < self._dataset_spec.num_forecast_dates - 1,
body,
init_values_loop)
# Denormalize the outputs of the network and return.
predictions = utils.denormalize(normalized_predictions,
self._dataset_spec.target_stats)
aux_data = {"predictions_for_inputs": predictions_for_inputs}
return predictions, aux_data
| dm_c19_modelling-main | modelling/models/lstm.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Dummy model for testing."""
import abc
from typing import NamedTuple, Sequence, Union
from absl import logging
from dm_c19_modelling.modelling import definitions
from dm_c19_modelling.modelling.models import utils
import haiku as hk
import jax
import jax.numpy as jnp
import optax
class LossMinimizerHaikuModelState(NamedTuple):
trainable_params: hk.Params
non_trainable_state: hk.State
optimizer_state: Union[optax.OptState, Sequence[optax.OptState]]
rng: jnp.ndarray # jax.random.PRNGKey
class LossMinimizerHaikuModel(definitions.TrainableModel):
"""Model based on Haiku that is trained by minimizing a loss.
Child classes have to implement the "_loss_fn" and "_predict_fn" methods.
This base class abstracts away all of the complexity around updating params
and states, as well as `hk.transform`, `hk.init` and `hk.apply`.
"""
def __init__(self, init_seed, training_seed, optimizer_kwargs,
learning_rate_annealing_kwargs, jit_predict_fn=True,
jit_update_fn=True, **parent_kwargs):
super().__init__(**parent_kwargs)
self._init_seed = init_seed
self._training_seed = training_seed
self._optimizer_kwargs = optimizer_kwargs
self._learning_rate_annealing_kwargs = learning_rate_annealing_kwargs
# Prepare the update fn.
self._transformed_loss_fn = hk.transform_with_state(self._loss_fn)
# `hk.transform_with_state(self._loss_fn)`, would have an output structured
# as:
# ((loss, scalars_dict), updated_state)
# so we change the packing to:
# (loss, updated_state, scalars_dict)
def loss_fn(*args, **kwargs):
((loss, scalars_dict),
updated_state) = self._transformed_loss_fn.apply(*args, **kwargs)
return loss, updated_state, scalars_dict
self._update_fn = utils.get_optimizer_params_update_step(
loss_fn, self._get_optimizer)
if jit_update_fn:
self._update_fn = jax.jit(self._update_fn)
# Prepare the predict fn.
self._apply_predict_fn = hk.transform_with_state(self._predict_fn).apply
if jit_predict_fn:
self._apply_predict_fn = jax.jit(self._apply_predict_fn)
@abc.abstractmethod
def _loss_fn(self, batch):
"""Computes the loss to be minimized.
Args:
batch: batch of data as generated by `build_training_generator`.
Returns:
Tuple (loss, scalars_dict) with the scalar
loss, and a dictionary of additional scalar metrics.
"""
@abc.abstractmethod
def _prepare_predict_fn_inputs(self, dataset):
"""Builds the inputs to be passed to the `predict_fn`."""
@abc.abstractmethod
def _predict_fn(self, inputs):
"""Makes a prediction using the inputs from `_prepare_predict_fn_inputs`."""
def _get_optimizer(self, global_step):
return utils.get_optimizer_with_learning_rate_annealing(
global_step,
self._optimizer_kwargs,
self._learning_rate_annealing_kwargs)
def _get_initial_state(self, sample_batch):
# Initialize the model.
rng = jax.random.PRNGKey(self._init_seed)
trainable_params, non_trainable_state = self._transformed_loss_fn.init(
rng, sample_batch)
_log_param_types_and_shapes(trainable_params, non_trainable_state)
# Initialize the optimizer.
(optimizer_init_fn, _), _ = self._get_optimizer(global_step=0)
optimizer_state = optimizer_init_fn(trainable_params)
# Initialize a random seed.
rng = jax.random.PRNGKey(self._training_seed)
return LossMinimizerHaikuModelState(
trainable_params=trainable_params,
non_trainable_state=non_trainable_state,
optimizer_state=optimizer_state,
rng=rng)
def training_update(self, previous_state, batch, global_step):
"""Updates the state (params, optimizer step, logic) of the model."""
# Obtain a state if we do not have one.
if previous_state is None:
previous_state = self._get_initial_state(sample_batch=batch)
next_rng, rng_to_use = jax.random.split(previous_state.rng)
with jax.profiler.StepTraceContext(
"training_update", step_num=global_step):
(updated_optimizer_state, updated_params, updated_non_trainable_state,
loss, aux_loss_outputs, aux_optimizer_outputs) = self._update_fn(
global_step,
previous_state.optimizer_state,
previous_state.trainable_params,
previous_state.non_trainable_state,
rng_to_use,
batch)
# Build the next module state.
updated_state = LossMinimizerHaikuModelState(
trainable_params=updated_params,
non_trainable_state=updated_non_trainable_state,
optimizer_state=updated_optimizer_state,
rng=next_rng)
# Aggregate all of the summary scalars.
scalars = {}
scalars["loss"] = loss
scalars.update(aux_optimizer_outputs)
scalars.update(aux_loss_outputs)
return updated_state, scalars
def _evaluate(self, model_state, dataset):
# This asumes that the evaluation inputs are already something simple
# like a number array, if it is something non-jittable, we may need to
# add an additional tranformation, here or change slighly how we do this.
inputs = self._prepare_predict_fn_inputs(dataset)
((predictions, aux_data), _) = self._apply_predict_fn(
model_state.trainable_params,
model_state.non_trainable_state,
model_state.rng, inputs)
return predictions, aux_data
def _prepare_features(self, features):
"""Prepares features for a neural network."""
return utils.prepare_features(
features,
self._dataset_spec.feature_names,
self._dataset_spec.feature_stats,
categorical_features_dict={
definitions.SITE_ID_INTEGER: len(self._dataset_spec.sites),
definitions.WEEK_DAY_INTEGER: 7
},
features_with_missing_values=(
self._dataset_spec.features_with_missing_values))
def _log_param_types_and_shapes(trainable_params, non_trainable_state):
logging.info("Model trainable params:")
_log_dict_recursively([], trainable_params)
logging.info("Model non-trainable state:")
_log_dict_recursively([], non_trainable_state)
def _log_dict_recursively(parent_names, param):
"""For now, we assume that they are just nested dicts or arrays."""
if not hasattr(param, "dtype"):
for name, child_param in param.items():
_log_dict_recursively(parent_names + [name], child_param)
else:
logging.info(" %s: shape %s dtype %s",
"/".join(parent_names), param.shape, param.dtype)
| dm_c19_modelling-main | modelling/models/base_jax.py |
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""SEIR LSTM model.
The basic SEIR model's state variables are:
(S)usceptible_t Susceptible population as a function of time
(E)xposed_t Exposed population as a function of time
(I)nfectious_t Infectious population as a function of time
(R)ecovered_t Recovered population as a function of time
(D)eceased_t Deceased as a function of time
and parameters are:
S2E Rate of transmission (S --> E)
E2I Rate of incubation (E --> I)
I2RD Rate of infectiousness (I --> R/D)
ifr Infection fatality rate [0, 1]
See for reference- https://www.idmod.org/docs/hiv/model-seir.html
This module imposes the following strong assumptions:
* The initial conditions of the model are fitted for each site separately.
* S2E is predicted on a per-day and per-site basis, using an LSTM conditioned
on daily features, and optionally, the state variables for the previous day.
* E2I, I2RD and ifr, are shared across all sites and dates.
"""
import enum
import typing
from typing import Any, Dict, Generator, Mapping, NamedTuple, Tuple, Union
from dm_c19_modelling.evaluation import constants
from dm_c19_modelling.modelling.models import base_jax
from dm_c19_modelling.modelling.models import utils
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import tree
if typing.TYPE_CHECKING:
from dm_c19_modelling.modelling.definitions import TrainingDataset
POPULATION = "population"
class SEIRState(NamedTuple):
"""State of the SEIR differential equation.
The contents may have different ranks:
* Scalar floats: Used for initialization.
* Rank 1 arrays with shape [num_sites] to indicate state per site (shape [1]
for site broadcasting).
* Rank 2 arrays with shape [num_dates, num_sites] to indicate a trajectory
of states per site (shape [num_dates, num_sites] for site broadcasting).
"""
susceptible: np.ndarray
exposed: np.ndarray
infectious: np.ndarray
recovered: np.ndarray
deceased: np.ndarray
class SEIRParameters(NamedTuple):
"""Params of the SEIR differential equation. See `SEIRState` for shapes."""
S2E: np.ndarray
E2I: np.ndarray
I2RD: np.ndarray
ifr: np.ndarray
def _compute_seir_derivatives(
state: SEIRState, parameters: SEIRParameters) -> SEIRState:
"""Produces time derivatives for each field of the SEIR state."""
# Susceptible people who get exposed.
s_to_e = parameters.S2E * state.susceptible * state.infectious
# Exposed people who become infectious.
e_to_i = parameters.E2I * state.exposed
# Infectious people who either recover or die.
i_to_rd = parameters.I2RD * state.infectious
# Infectious people who recover.
i_to_r = i_to_rd * (1.0 - parameters.ifr)
# Infectious people who die.
i_to_d = i_to_rd * parameters.ifr
# Set the derivatives.
ds = -s_to_e
de = s_to_e - e_to_i
di = e_to_i - i_to_rd
dr = i_to_r
dd = i_to_d
return SEIRState(susceptible=ds,
exposed=de,
infectious=di,
recovered=dr,
deceased=dd)
def _euler_update(dt: float, state, state_derivative):
"""Integrates a nested state given its derivatives, using Euler integrator."""
return tree.map_structure(
lambda field, d_field: field + dt * d_field,
state, state_derivative)
def _calculate_seir_initialization(dataset_spec,
seir_params_init,
min_exposed_fraction):
"""Infers initial state conditions of the SEIR model from recent stats."""
# Initialized the SEIR state field by field. Note the SEIR state will be
# defined as a fraction of the population.
new_seir_state_init = {}
# Initialize the cumulative fraction of deceased population.
new_deceased_index = list(dataset_spec.target_names).index(
constants.DECEASED_NEW)
total_num_deceased = (
dataset_spec.initial_sum_targets_hint[..., new_deceased_index])
new_seir_state_init["deceased"] = (
total_num_deceased / dataset_spec.population)
# Calculate recovered people from the fatality rate.
ifr = seir_params_init.ifr
new_seir_state_init["recovered"] = new_seir_state_init["deceased"] / ifr * (
1 - ifr)
# Calculate infected proportion from the amount of daily deceased, assuming
# the value of `seir_params_init.I2RD`
# Note the targets are accumulated for `dataset_spec.cadence` days, but we
# need the rate, so we divide by the cadence.
new_deceased_daily = (
dataset_spec.initial_targets_hint[..., new_deceased_index] /
dataset_spec.cadence)
new_deceased_proportion_daily = (
new_deceased_daily / dataset_spec.population)
i2rd = seir_params_init.I2RD
new_seir_state_init["infectious"] = new_deceased_proportion_daily / ifr / i2rd
# Set exposed proportion to be the same number as infectious proportion, as
# these two move together.
new_seir_state_init["exposed"] = new_seir_state_init["infectious"]
# However, we still set the exposed proportion to a minimum value, as if it
# happens to be exactly 0, it would be impossible to leave that state.
new_seir_state_init["exposed"] = np.maximum(
new_seir_state_init["exposed"], min_exposed_fraction)
assert np.all(new_seir_state_init["exposed"] > 0.)
# The remaining fraction, will be population that are still susceptible.
new_seir_state_init["susceptible"] = 1 - sum(
new_seir_state_init.values())
return SEIRState(**new_seir_state_init)
class ExtrapolationModes(enum.Enum):
"""Extrapolation modes for producing ODE parameters for the horizon period."""
# Fits a polynomial the last values of the ODE params.
POLYNOMIAL_FIT_PARAMS = "polynomial_fit_params"
# Uses the model predictions to produce features for subsequent days.
# Only possible if only targets and constants are used as features.
ROLLOUT_FEATURES = "rollout_features"
class SEIRLSTM(base_jax.LossMinimizerHaikuModel):
"""LSTM model.
It makes predictions into the future by feeding its own predictions as inputs
for the next step.
"""
def __init__(self,
lstm_embedding_size: int,
lstm_hidden_size: int,
min_exposed_fraction: float,
seir_parameters_init: Mapping[str, float],
condition_lstm_on_ode_state: bool,
extrapolation_mode: Union[str, ExtrapolationModes],
param_extrapolation_context_steps: int,
param_extrapolation_poly_degree: int,
**parent_kwargs): # pylint: disable=g-doc-args
"""Constructor.
Args:
lstm_embedding_size: Size of linear embedding of the features.
lstm_hidden_size: Size of the LSTM.
min_exposed_fraction: Minimum initial fraction of the population exposed
to the disease. The initial SEIR state is estimated from the initial
statistics about accumulated deceased. However, if the number is
zero, the SEIR model would be initialized to 0, and it would not
be able to leave that state. To avoid this, whe set a lower bound
on the fraction of exposed population.
seir_parameters_init: Initialization values for the seir state, packed
in a dict with `SEIRParameters` fields.
condition_lstm_on_ode_state: Whether to feed the current ODE state
as inputs for the LSTM.
extrapolation_mode: One of `ExtrapolationModes`.
param_extrapolation_context_steps: Number of steps to fit a polynomial
to when extrapolating ODE parameters in time. Only for
`extrapolation_mode = ExtrapolationModes.POLYNOMIAL_FIT_PARAMS`.
param_extrapolation_poly_degree: Degree of the polynomial to use when
extrapolating ODE parameters in time. Only for
`extrapolation_mode = ExtrapolationModes.POLYNOMIAL_FIT_PARAMS`.
**parent_kwargs: Attributes for the parent class.
"""
super().__init__(**parent_kwargs)
self._extrapolation_mode = ExtrapolationModes(extrapolation_mode)
self._param_extrapolation_poly_degree = param_extrapolation_poly_degree
self._param_extrapolation_context_steps = param_extrapolation_context_steps
self._lstm_embedding_size = lstm_embedding_size
self._lstm_hidden_size = lstm_hidden_size
# TODO(alvarosg): Maybe support `constants.CONFIRMED_NEW`.
if tuple(self._dataset_spec.target_names) != (constants.DECEASED_NEW,):
raise ValueError(f"{constants.DECEASED_NEW} is the only supported target,"
f" got {self._dataset_spec.target_names}")
if POPULATION not in self._dataset_spec.feature_names:
raise ValueError(
f"Missing population feature, got {self._dataset_spec.feature_names}")
self._seir_parameters_init = SEIRParameters(**seir_parameters_init)
self._condition_lstm_on_ode_state = condition_lstm_on_ode_state
self._seir_state_init = _calculate_seir_initialization(
self._dataset_spec, self._seir_parameters_init,
min_exposed_fraction)
def _build_training_generator(
self, dataset: "TrainingDataset"
) -> Generator[Tuple[np.ndarray, np.ndarray], None, None]:
while True:
# Leading shape [num_dates, num_sites]
yield dataset.features[:-1], dataset.targets[1:]
def _get_learnable_seir_initial_state(self) -> SEIRState:
"""Returns learnable variables for the initial state of the SEIR model.."""
num_sites = len(self._dataset_spec.sites)
logits_state_dict = {}
for name, init_value in self._seir_state_init._asdict().items():
logits_state_dict[name] = hk.get_parameter(
f"log_{name}_initial", shape=[num_sites], dtype=np.float32,
init=hk.initializers.Constant(np.log(init_value)))
# Make sure they add up to one.
logits_state = SEIRState(**logits_state_dict)
initial_state_array = jax.nn.softmax(
jnp.stack(tuple(logits_state), axis=0), axis=0)
return SEIRState(*list(initial_state_array)) # Unstack along first axis.
def _get_logit_functions(self, param_name):
if param_name == "ifr":
# To map the interval [0, 1]
return jax.scipy.special.logit, jax.nn.sigmoid
elif param_name in ["E2I", "I2RD", "S2E"]:
# To map the interval [0, +inf)
return jnp.log, jnp.exp
else:
raise ValueError(f"Param name {param_name}")
def _get_learnable_seir_params(self) -> SEIRParameters:
"""Returns learnable values for ODE parameters."""
# Get the fixed values.
params_dict = {}
for name, init_value in self._seir_parameters_init._asdict().items():
if name == "S2E":
params_dict[name] = None
else:
log_fn, exp_fn = self._get_logit_functions(name)
# Shape [1], to it will be broadcasted to all sites.
params_dict[name] = exp_fn(hk.get_parameter(
f"log_{name}_param", shape=[1], dtype=np.float32,
init=hk.initializers.Constant(log_fn(init_value))))
return SEIRParameters(**params_dict)
def _build_rnn(self, name_prefix="S2E") -> hk.DeepRNN:
"""Builds network."""
return hk.DeepRNN([
hk.Linear(self._lstm_embedding_size, name=name_prefix + "_encoder"),
jax.nn.relu,
hk.LSTM(self._lstm_hidden_size, name=name_prefix + "_lstm"),
hk.Linear(1, name=name_prefix + "_decoder"), # Predict `S2E`.
])
def _loss_fn(
self, batch: Tuple[np.ndarray,
np.ndarray]) -> Tuple[np.ndarray, Dict[str, Any]]:
"""Computes loss."""
# features_sequence: [num_training_dates, num_sites, num_features]
# targets_sequence: [num_training_dates, num_sites, num_targets]
features_sequence, targets_sequence = batch
rnn = self._build_rnn()
seir_state_sequence, _, _ = self._run_model_on_feature_sequence(
rnn, features_sequence)
# Get the number of deceased scaled by population. Since population is
# constant we can just grab it from the first step.
population = self._get_population(features_sequence[0])
deceased = seir_state_sequence.deceased * population[None]
# Go from cumulative deceased to incremental adding a trailing feature axis.
predictions_sequence = jnp.diff(deceased, axis=0)[..., None]
# Build loss in normalize space.
normalized_targets = utils.normalize(targets_sequence,
self._dataset_spec.target_stats)
normalized_predictions = utils.normalize(predictions_sequence,
self._dataset_spec.target_stats)
l2_error = (normalized_predictions - normalized_targets) ** 2
loss = jnp.mean(l2_error)
scalars_dict = {}
return loss, scalars_dict
def _get_population(self, features):
population_index = list(self._dataset_spec.feature_names).index(POPULATION)
return features[..., population_index]
def _run_model_on_feature_sequence(self, rnn, feature_sequence):
"""Runs the model using a sequence of features to feed the S2E RNN."""
def core(features, state):
seir_state, rnn_state = state
# Obtain the SEIR parameters for this step.
seir_params, updated_rnn_state = self._get_seir_params_for_step_with_rnn(
rnn, features, rnn_state, seir_state)
# Forward the SEIR state with the current SEIR params.
updated_seir_state = self._multi_step_euler_update(
seir_state, seir_params)
# Note `hk.dynamic_unroll` expects two outputs.
# * Elements in the first output will be returned for all steps stacked
# along the first axis.
# * Elements of the second output will be passed to each subsequent
# iteration, and ultimately, only the final value will be returned.
# So in this case we return `updated_seir_state` both in the first output
# (to be able to get the full trajectory after `hk.dynamic_unroll`) and
# the second output, to be able to pass the value to the next iteration.
next_state = updated_seir_state, updated_rnn_state
return (updated_seir_state, seir_params), next_state
initial_state_seir = self._get_learnable_seir_initial_state()
initial_state_rnn = rnn.initial_state(feature_sequence.shape[1])
((seir_state_sequence, seir_params_sequence),
(_, final_rnn_state)) = hk.dynamic_unroll(
core, feature_sequence,
initial_state=(initial_state_seir, initial_state_rnn))
seir_state_sequence = tree.map_structure(
lambda first, seq: jnp.concatenate([first[None], seq], axis=0),
initial_state_seir, seir_state_sequence)
return (seir_state_sequence, seir_params_sequence, final_rnn_state)
def _multi_step_euler_update(self, seir_state, seir_params):
# To keep it comparable across values of the cadence, Euler steps will
# always have a fixed dt=1 day, but we will have `cadence` Euler steps.
for _ in range(self._dataset_spec.cadence):
seir_derivatives = _compute_seir_derivatives(seir_state, seir_params)
seir_state = _euler_update(
dt=1, state=seir_state, state_derivative=seir_derivatives)
return seir_state
def _get_seir_params_for_step_with_rnn(
self, rnn, features, rnn_state, seir_state):
"""Returns the SEIR state for a step using an RNN for `S2E`."""
# Get the parameters that are simply learned.
seir_params = self._get_learnable_seir_params()
# Build inputs for the rnn model.
rnn_inputs = self._prepare_features(features)
if self._condition_lstm_on_ode_state:
ode_state_features = jnp.stack(tuple(seir_state), axis=1)
rnn_inputs = jnp.concatenate([rnn_inputs, ode_state_features], axis=1)
# Add the RNN "S2E" value.
rnn_output, updated_rnn_state = rnn(rnn_inputs, rnn_state)
logits_s2e = jnp.squeeze(rnn_output, axis=1)
_, exp_fn = self._get_logit_functions("S2E")
# Our prior is that S2E will be close to `self._seir_parameters_init.S2E`,
# Propertly initialized neural networks produce initial outputs with
# zero-mean and unit-variance. So on average at the beginning of training
# the value of S2E will be `exp(logits_s2e) * seir_parameters_init.S2E`
# for `logits_s2e = 0` will yield `seir_parameters_init.S2E`.
# This way the LSTM only has to learn a correction to
# `seir_parameters_init.S2E`
s2e = exp_fn(logits_s2e) * self._seir_parameters_init.S2E
seir_params = seir_params._replace(S2E=s2e)
return seir_params, updated_rnn_state
def _prepare_predict_fn_inputs(self, dataset):
# Returns the features.
return dataset.features
def _predict_fn(self, inputs: np.ndarray) -> Tuple[np.ndarray, Any]:
"""Makes a prediction using the inputs."""
# inputs: [num_training_dates, num_sites, num_features]
num_forecast_dates = self._dataset_spec.num_forecast_dates
rnn = self._build_rnn()
# Run LSTM on the input sequence.
(seir_state_sequence,
seir_params_sequence,
rnn_state) = self._run_model_on_feature_sequence(rnn, inputs)
if self._extrapolation_mode == ExtrapolationModes.POLYNOMIAL_FIT_PARAMS:
(additional_seir_state_sequence,
additional_seir_params_sequence
) = self._extrapolation_with_polynomial_fit_on_params(
seir_state=tree.map_structure(lambda x: x[-1], seir_state_sequence),
seir_params_sequence=seir_params_sequence,
num_steps=num_forecast_dates - 1)
elif self._extrapolation_mode == ExtrapolationModes.ROLLOUT_FEATURES:
(additional_seir_state_sequence,
additional_seir_params_sequence
) = self._extrapolation_with_feature_rollout(
rnn=rnn,
seir_state_t=tree.map_structure(
lambda x: x[-1], seir_state_sequence),
seir_state_tm1=tree.map_structure(
lambda x: x[-2], seir_state_sequence),
rnn_state=rnn_state,
features_tm1=inputs[-1],
num_steps=num_forecast_dates - 1)
# Build the full sequence.
seir_state_sequence = tree.map_structure(
lambda a, b: jnp.concatenate([a, b], axis=0),
seir_state_sequence, additional_seir_state_sequence)
seir_params_sequence = tree.map_structure(
lambda a, b: jnp.concatenate([a, b], axis=0),
seir_params_sequence, additional_seir_params_sequence)
# Get the number of deceased scaled by population. Since population is
# constant we can just grab it from the first step.
population = self._get_population(inputs[0])
deceased = seir_state_sequence.deceased * population[None]
# Go from cumulative deceased to incremental adding a trailing feature axis.
new_deceased = jnp.diff(deceased, axis=0)[..., None]
# Get the final predictions of interset.
predictions = new_deceased[-num_forecast_dates:]
aux_data = {"full_seir_params_sequence": seir_params_sequence,
"full_seir_state_sequence": seir_state_sequence,
"predictions_for_inputs": new_deceased[:-num_forecast_dates]}
return predictions, aux_data
def _extrapolation_with_polynomial_fit_on_params(
self, seir_state, seir_params_sequence, num_steps):
extrapolated_seir_params_sequence = self._extrapolate_seir_params(
seir_params_sequence, num_steps)
# Run additional steps, where the initial SEIR state is the last SEIR state
# from the initial teacher forcing stage.
return self._run_model_on_seir_params_sequence(
initial_seir_state=seir_state,
seir_params_sequence=extrapolated_seir_params_sequence)
def _run_model_on_seir_params_sequence(
self, initial_seir_state, seir_params_sequence):
"""Runs the model using a sequence of seir parameters."""
def core(seir_params, seir_state):
# Forward the SEIR state with the current SEIR params.
updated_seir_state = self._multi_step_euler_update(
seir_state, seir_params)
return (updated_seir_state, seir_params), updated_seir_state
(seir_state_sequence, seir_params_sequence), _ = hk.dynamic_unroll(
core, seir_params_sequence, initial_state=initial_seir_state)
return seir_state_sequence, seir_params_sequence
def _extrapolate_seir_params(self, seir_params_sequence, num_steps):
"""Extrapolate SEIR parameters from previous values with polynomial fit."""
step_index = jnp.arange(
self._param_extrapolation_context_steps + num_steps,
dtype=tree.flatten(seir_params_sequence)[0].dtype)
x_powers = jnp.stack(
[step_index ** p
for p in range(self._param_extrapolation_poly_degree + 1)], axis=1)
# [self._num_context_steps, poly_degree + 1]
x_context = x_powers[:self._param_extrapolation_context_steps]
# [num_steps, poly_degree + 1]
x_extrapolation = x_powers[self._param_extrapolation_context_steps:]
def fn(param_sequence):
if param_sequence.shape[0] < self._param_extrapolation_context_steps:
raise ValueError(
f"Not enough inputs steps {param_sequence.shape[0]} to extrapolate "
f"with {self._param_extrapolation_context_steps} steps of context.")
# [self._num_context_steps, num_sites]
y_context = param_sequence[-self._param_extrapolation_context_steps:]
# [poly_degree + 1, num_sites]
coefficients, _, _, _ = jnp.linalg.lstsq(x_context, y_context)
# [num_steps, num_sites]
return jnp.einsum("td,db->tb", x_extrapolation, coefficients)
return tree.map_structure(fn, seir_params_sequence)
def _extrapolation_with_feature_rollout(
self, rnn, seir_state_t, seir_state_tm1, rnn_state,
features_tm1, num_steps):
"""Rollout own model predictions to produce future model ODE parameters."""
population = self._get_population(features_tm1)
def core(unused_step, state):
features_tm1, seir_state_tm1, seir_state_t, rnn_state = state
# Compute input features for the next step using the predictions based on
# previous ODE states.
# [num_sites]
new_proportion_deceased = seir_state_t.deceased - seir_state_tm1.deceased
# [num_sites, num_targets]
targets = (new_proportion_deceased * population)[..., None]
# [num_sites, num_feaures]
features_t = utils.rollout_features_with_predictions(
features_tm1[None], # Add a time axis.
targets[None], # Add a time axis.
self._dataset_spec.feature_names,
self._dataset_spec.target_names,
self._dataset_spec.cadence,
)[-1] # Remove time axis.
# Obtain the SEIR parameters for this step with the RNN.
seir_params, updated_rnn_state = self._get_seir_params_for_step_with_rnn(
rnn, features_t, rnn_state, seir_state_t)
# Forward the SEIR state with the current SEIR params.
seir_state_tp1 = self._multi_step_euler_update(
seir_state_t, seir_params)
next_state = features_t, seir_state_t, seir_state_tp1, updated_rnn_state
return (seir_state_tp1, seir_params), next_state
(seir_state_sequence, seir_params_sequence), _ = hk.dynamic_unroll(
core, jnp.arange(num_steps),
initial_state=(
features_tm1, seir_state_tm1, seir_state_t, rnn_state))
return seir_state_sequence, seir_params_sequence
| dm_c19_modelling-main | modelling/models/seir_lstm.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for `utils.py`."""
from absl.testing import absltest
from dm_c19_modelling.modelling import definitions
from dm_c19_modelling.modelling.models import utils
import numpy as np
class RolloutFeaturesTest(absltest.TestCase):
def test_expected_rollout(self):
feature_names = ["feature1", definitions.SITE_ID_INTEGER, "feature2",
"feature3", definitions.WEEK_DAY_INTEGER]
target_names = ["feature3", "feature1"]
constant_features = ["feature2"]
cadence = 2
features = np.array([
# First date. Day #2.
[
# First site.
[10.1, 25., 30., 40.1, 2],
# Second site.
[10.2, 27., 30., 40.2, 2],
],
# Second date. Day #4.
[
# First site.
[11.1, 25., 30., 41.1, 4],
# Second site.
[11.2, 27., 30., 41.2, 4],
],
])
next_steps_targets = np.array([
# Third date. Day #6.
[
# First site.
[42.1, 12.1],
# Second site.
[42.2, 12.2],
],
# Fourth date. Day #8.
[
# First site.
[43.1, 13.1],
# Second site.
[43.2, 13.2],
],
])
output = utils.rollout_features_with_predictions(
features=features,
next_steps_targets=next_steps_targets,
feature_names=feature_names,
target_names=target_names,
cadence=cadence,
constant_features=constant_features)
expected_additional_features = np.array([
# Third date. Day #6.
[
# First site.
[12.1, 25., 30., 42.1, 6],
# Second site.
[12.2, 27., 30., 42.2, 6],
],
# Fourth date. Day #8.
[
# First site.
[13.1, 25., 30., 43.1, 1],
# Second site.
[13.2, 27., 30., 43.2, 1],
],
])
expected_output = np.concatenate(
[features, expected_additional_features], axis=0)
np.testing.assert_allclose(output, expected_output)
if __name__ == "__main__":
absltest.main()
| dm_c19_modelling-main | modelling/models/tests/utils_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for `seir_lstm.py`."""
from unittest import mock
from absl.testing import absltest
from dm_c19_modelling.modelling.models import seir_lstm
import numpy as np
class SEIRLSTMTest(absltest.TestCase):
def test_polynomial_extrapolation(self):
mock_seir_lstm_instance = mock.Mock()
mock_seir_lstm_instance._param_extrapolation_poly_degree = 1
mock_seir_lstm_instance._param_extrapolation_context_steps = 2
params_sequence = np.array(
[[0., -3.], # step 0 (site 0, site 1) # Should be ignored.
[7., 9.], # step 1 (site 0, site 1)
[8., 7.]], # step 2 (site 0, site 1)
)
actual = seir_lstm.SEIRLSTM._extrapolate_seir_params(
mock_seir_lstm_instance,
params_sequence,
num_steps=3)
expected = np.array(
[[9., 5.], # step 3 (site 0, site 1)
[10., 3.], # step 4 (site 0, site 1)
[11., 1.]], # step 5 (site 0, site 1)
)
np.testing.assert_allclose(actual, expected, rtol=1e-06)
if __name__ == "__main__":
absltest.main()
| dm_c19_modelling-main | modelling/models/tests/seir_lstm_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for dm_c19_modelling.evaluation.baseline_models."""
import datetime
import functools
from absl.testing import absltest
from absl.testing import parameterized
from dm_c19_modelling.evaluation import baseline_models
from dm_c19_modelling.evaluation import dataset_factory
import numpy as np
def _get_dataset(num_training_dates, num_forecast_dates, num_sites):
sites = ["site_1", "site_2", "site_3"]
training_datetimes = [
datetime.datetime.strptime("2020-05-07", "%Y-%m-%d") +
datetime.timedelta(days=i) for i in range(num_training_dates)
]
eval_dates = range(num_training_dates,
num_training_dates + num_forecast_dates)
eval_datetimes = [
datetime.datetime.strptime("2020-05-07", "%Y-%m-%d") +
datetime.timedelta(days=i) for i in eval_dates
]
training_targets = np.random.randint(
0, 100, (num_training_dates, num_sites, 1))
eval_targets = np.random.randint(
0, 100, (num_forecast_dates, num_sites, 1))
sum_past_targets = np.random.randint(0, 100, (len(sites), 1))
return dataset_factory.Dataset(
training_targets=training_targets,
evaluation_targets=eval_targets,
sum_past_targets=sum_past_targets,
training_features=[],
target_names=["new_confirmed"],
feature_names=[],
training_dates=[
datetime.datetime.strftime(date, "%Y-%m-%d")
for date in training_datetimes
],
evaluation_dates=[
datetime.datetime.strftime(date, "%Y-%m-%d")
for date in eval_datetimes
],
sites=sites,
dataset_index_key="12345",
cadence=1
)
class BaselineModelsTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._num_training_dates = 20
self._num_forecast_dates = 14
self._num_sites = 3
self._dataset = _get_dataset(
self._num_training_dates, self._num_forecast_dates, self._num_sites)
@parameterized.named_parameters([
("logistic", baseline_models.Logistic),
("gompertz", baseline_models.Gompertz),
("quadratic", functools.partial(
baseline_models.PolynomialFit,
polynomial_degree=2, num_context_dates=2, fit_cumulatives=False)),])
def test_curve_fitting_model_predict(self, model_class):
"""Checks that predictions are the correct shape, defined, and positive."""
model = model_class()
predictions = model.predict(self._dataset)
self.assertEqual(
predictions.shape, (self._num_forecast_dates, self._num_sites, 1))
if isinstance(model_class,
(baseline_models.Logistic, baseline_models.Gompertz)):
self.assertFalse(np.any(predictions < 0))
self.assertFalse(np.any(np.isnan(predictions)))
def test_repeat_weekly_model_insufficient_data_raises_value_error(self):
"""Checks that the repeat weekly model fails with only 6 days of data."""
model = baseline_models.RepeatLastWeek()
dataset = _get_dataset(6, self._num_forecast_dates, self._num_sites)
with self.assertRaisesRegex(ValueError,
"At least 1 week of training data required *"):
model.predict(dataset)
def test_repeat_weekly_deaths_model_6_day_horizon_outputs_correctly(self):
"""Checks predictions from the repeating model with horizon < 1 week."""
model = baseline_models.RepeatLastWeek()
dataset = _get_dataset(self._num_training_dates, 6, self._num_sites)
predictions = model.predict(dataset)
np.testing.assert_array_equal(predictions, dataset.training_targets[-7:-1])
def test_repeat_weekly_deaths_model_12_day_horizon_outputs_correctly(self):
"""Checks predictions from the repeating model with horizon > 1 week."""
model = baseline_models.RepeatLastWeek()
dataset = _get_dataset(self._num_training_dates, 12, self._num_sites)
predictions = model.predict(dataset)
np.testing.assert_array_equal(predictions[:7],
dataset.training_targets[-7:])
np.testing.assert_array_equal(predictions[7:],
dataset.training_targets[-7:-2])
if __name__ == "__main__":
absltest.main()
| dm_c19_modelling-main | evaluation/baseline_models_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Script to run evaluation on forecasts from one on more models."""
from absl import app
from absl import flags
from dm_c19_modelling.evaluation import constants
from dm_c19_modelling.evaluation import evaluation
from dm_c19_modelling.evaluation import forecast_indexing
_PROJECT_DIR = flags.DEFINE_string(
"project_directory", None, "The directory where datasets and models are "
"saved.")
_DATASET_NAME = flags.DEFINE_string(
"dataset_name", "covid_open_data_world", "The name of the dataset to use.")
_FORECAST_IDS = flags.DEFINE_list(
"forecast_ids", None, "The IDs of the forecasts to evaluate. The forecasts "
"must be comparable: the models used to generate them must have used the "
"same training dataset, they must have the same forecast date and "
"forecast_horizon, provide forecasts for the same sites, and must predict "
"whatever is specified to be target_name")
_EVAL_DATASET_CREATION_DATE = flags.DEFINE_string(
"eval_dataset_creation_date", "latest", "The creation date of the dataset "
"to use for getting the ground truth for the evaluation dates.")
_TARGET_NAME = flags.DEFINE_string(
"target_name", None, "The name of the target to evaluate.")
_SAVE_METRICS = flags.DEFINE_bool(
"save_metrics", True, "Whether to save metrics to file.")
_SITES_PERMITTED_TO_DROP = flags.DEFINE_list(
"sites_permitted_to_drop", [], "A list of sites that may be dropped from "
"from evaluation if forecasts for that site are not defined in every "
"forecast being compared")
_NUM_FORECAST_DATES = flags.DEFINE_integer(
"num_forecast_dates", None, "The number of dates to use for evaluation. "
"This is optional: if not specified, evaluation will run on the maximum "
"number of overlapping dates available between the different forecasts.")
flags.mark_flags_as_required(["project_directory", "target_name"])
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
del argv # Unused
if _FORECAST_IDS.value is None:
# Forecast IDs must be provided, so instead just list the available ones.
forecast_index = forecast_indexing.ForecastIndex(_PROJECT_DIR.value,
_DATASET_NAME.value)
available_forecast_ids = list(forecast_index._index_dict.keys()) # pylint: disable=protected-access
print("\nAvailable forecast IDs:")
print("\n".join(available_forecast_ids))
return
evaluation.evaluate(
directory=_PROJECT_DIR.value,
dataset_name=_DATASET_NAME.value,
eval_dataset_creation_date=_EVAL_DATASET_CREATION_DATE.value,
target_name=constants.Targets(_TARGET_NAME.value),
forecast_ids=_FORECAST_IDS.value,
save=_SAVE_METRICS.value,
sites_permitted_to_drop=_SITES_PERMITTED_TO_DROP.value,
num_forecast_dates=_NUM_FORECAST_DATES.value)
if __name__ == "__main__":
app.run(main)
| dm_c19_modelling-main | evaluation/run_evaluation.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Functions to calculate metrics on forecasts."""
import numpy as np
def check_shape_wrapper(func):
"""Wrapper that checks the shapes of predictions and ground truth."""
def wrapped_func(predictions, ground_truth):
assert predictions.shape == ground_truth.shape, (
f"Predictions array has shape {predictions.shape}, ground truth has "
f"shape {ground_truth.shape}")
assert predictions.ndim == 3, (
"Metrics calculation expects rank 3 predictions and ground truth.")
assert predictions.shape[-1] == 1, (
"Metrics calculation expects a single target")
return func(predictions, ground_truth)
wrapped_func.__name__ = func.__name__
return wrapped_func
@check_shape_wrapper
def rmse(predictions: np.ndarray, ground_truth: np.ndarray) -> float:
"""Gets the RMSE averaged over time and sites for the given predictions."""
squared_error = (predictions - ground_truth) ** 2
return np.sqrt(np.mean(squared_error))
@check_shape_wrapper
def mae(predictions: np.ndarray, ground_truth: np.ndarray) -> float:
"""Gets MAE averaged over time and sites for the given predictions."""
return np.mean(np.abs(predictions - ground_truth))
| dm_c19_modelling-main | evaluation/metrics.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for dm_c19_modelling.evaluation.forecast_indexing."""
import os
from absl.testing import absltest
from dm_c19_modelling.evaluation import forecast_indexing
import numpy as np
import pandas as pd
_TEST_DATASET = "test_dataset"
_TEST_FORECASTS_FILE = "test_forecasts.csv"
def _get_test_predictions_and_arrays():
dates = np.array(["2020-05-07", "2020-05-08", "2020-05-09"])
sites = np.array(["site_1", "site_2"])
targets = np.array(["new_confirmed", "new_deceased"])
predictions = np.random.rand(len(dates), len(sites), len(targets)) * 10
return predictions, dates, sites, targets
def _get_test_entry(directory):
return {
"forecast_id": "12345",
"file_location": os.path.join(directory, _TEST_FORECASTS_FILE),
"source_data_info": ["test_dataset_1"],
"creation_timestamp": "2020-06-07_12:43:02",
"dataset_name": _TEST_DATASET,
"last_observation_date": "2020-05-04",
"cadence": 1,
"features_used": ["new_deceased"],
"extra_info": {"model_description": "test_model"}
}
def _create_test_forecasts(file_location):
target_dfs = []
for target_name in ["new_deceased", "new_confirmed"]:
target_dfs.append(pd.DataFrame({
"site_id": ["site_1", "site_2", "site_1", "site_2"],
"date": ["2020-05-05", "2020-05-05", "2020-05-06", "2020-05-06"],
"target_name": [target_name] * 4,
"prediction": np.random.rand(4) * 10
}))
df = pd.concat(target_dfs)
df.to_csv(file_location, index=False)
class ForecastIndexingTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._preds_arr, self._dates_arr, self._sites_arr, self._targets_arr = (
_get_test_predictions_and_arrays())
self._test_dir = absltest.get_default_test_tmpdir()
os.makedirs(self._test_dir, exist_ok=True)
self._key = "12345"
self._entry = _get_test_entry(self._test_dir)
_create_test_forecasts(self._entry["file_location"])
self._remove_index_if_exists()
def _remove_index_if_exists(self):
index_path = os.path.join(
self._test_dir, f"forecast_index-{_TEST_DATASET}.json")
if os.path.exists(index_path):
os.remove(index_path)
def test_prediction_df_columns(self):
"""Checks the columns in the predictions dataframe are as expected."""
predictions_df = forecast_indexing.build_predictions_df(
self._preds_arr, self._dates_arr, self._sites_arr, self._targets_arr)
np.testing.assert_array_equal(
sorted(predictions_df.columns),
["date", "prediction", "site_id", "target_name"])
def test_prediction_df_entries(self):
"""Checks that values in the predictions dataframe are as expected."""
predictions_df = forecast_indexing.build_predictions_df(
self._preds_arr, self._dates_arr, self._sites_arr, self._targets_arr)
np.testing.assert_array_equal(sorted(predictions_df.site_id.unique()),
self._sites_arr)
np.testing.assert_array_equal(sorted(predictions_df.date.unique()),
self._dates_arr)
np.testing.assert_array_equal(sorted(predictions_df.target_name.unique()),
self._targets_arr)
sample_entry = predictions_df.query(
"site_id=='site_1' & date=='2020-05-09' & target_name=='new_confirmed'"
).prediction
np.testing.assert_array_almost_equal(
sample_entry, [self._preds_arr[2][0][0]])
sample_entry = predictions_df.query(
"site_id=='site_2' & date=='2020-05-07' & target_name=='new_deceased'"
).prediction
np.testing.assert_array_almost_equal(
sample_entry, [self._preds_arr[0][1][1]])
def test_predictions_df_bad_shape(self):
"""Checks that building the dataframe fails with inconsistent shapes."""
sites_arr = np.append(self._sites_arr, ["site_3"])
with self.assertRaisesRegex(
ValueError, "Predictions have unexpected shape *"):
forecast_indexing.build_predictions_df(
self._preds_arr, self._dates_arr, sites_arr, self._targets_arr)
def test_add_to_index_and_query(self):
"""Tests that a well-formatted forecasts entry is added to the index."""
with forecast_indexing.ForecastIndex(self._test_dir, _TEST_DATASET,
read_only=False) as index:
index.add_entry(self._key, self._entry)
read_index = forecast_indexing.ForecastIndex(self._test_dir, _TEST_DATASET)
self.assertIsNotNone(read_index.query_by_forecast_id("12345"))
def test_fails_validation_nan_predictions(self):
"""Checks that validation fails if there are NaNs in predictions."""
df = pd.read_csv(self._entry["file_location"])
df.loc[0, "prediction"] = np.nan
df.to_csv(self._entry["file_location"], index=False)
with forecast_indexing.ForecastIndex(self._test_dir, _TEST_DATASET,
read_only=False) as index:
with self.assertRaisesWithLiteralMatch(
ValueError, "NaNs founds in forecasts"):
index.add_entry(self._key, self._entry)
def test_fails_validation_missing_predictions(self):
"""Checks that validation fails if a date is undefined for a site."""
df = pd.read_csv(self._entry["file_location"])
df.drop(0, inplace=True)
df.to_csv(self._entry["file_location"], index=False)
with forecast_indexing.ForecastIndex(self._test_dir, _TEST_DATASET,
read_only=False) as index:
with self.assertRaisesRegex(
ValueError, "Missing data found in the forecasts*"):
index.add_entry(self._key, self._entry)
def test_fails_validation_missing_column(self):
"""Checks that validation fails when a column is missing."""
df = pd.read_csv(self._entry["file_location"])
df.drop(columns=["target_name"], inplace=True)
df.to_csv(self._entry["file_location"], index=False)
with forecast_indexing.ForecastIndex(self._test_dir, _TEST_DATASET,
read_only=False) as index:
with self.assertRaisesRegex(
ValueError, "Forecasts must have columns*"):
index.add_entry(self._key, self._entry)
def test_fails_validation_inconsistent_cadence(self):
"""Checks that validation fails when forecasts have inconsistent cadence."""
df = pd.read_csv(self._entry["file_location"])
df_extra = df[df.date == "2020-05-06"]
df_extra.date = "2020-05-08"
df = pd.concat([df, df_extra])
df.to_csv(self._entry["file_location"], index=False)
with forecast_indexing.ForecastIndex(self._test_dir, _TEST_DATASET,
read_only=False) as index:
with self.assertRaisesWithLiteralMatch(
ValueError, "Inconsistent cadence found in forecasts"):
index.add_entry(self._key, self._entry)
def test_fails_invalid_target_name(self):
"""Checks that validation fails when forecasts contain an invalid target."""
df = pd.read_csv(self._entry["file_location"])
df.loc[0:3, "target_name"] = "bad_target"
df.to_csv(self._entry["file_location"], index=False)
with forecast_indexing.ForecastIndex(self._test_dir, _TEST_DATASET,
read_only=False) as index:
with self.assertRaisesWithLiteralMatch(
ValueError, "Invalid target in forecasts: bad_target"):
index.add_entry(self._key, self._entry)
if __name__ == "__main__":
absltest.main()
| dm_c19_modelling-main | evaluation/forecast_indexing_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tools for indexing forecasts."""
import datetime
import os
from typing import Any, Dict, Sequence, Optional, Union
from absl import logging
from dm_c19_modelling.evaluation import base_indexing
from dm_c19_modelling.evaluation import constants
from dm_c19_modelling.evaluation import dataset_indexing
import numpy as np
import pandas as pd
# Internal imports.
class ForecastIndex(base_indexing.BaseIndex):
"""Manages loading, querying, and adding entries to an index of forecasts."""
@property
def _index_type(self):
return "forecast"
@property
def _additional_fields(self):
return ("last_observation_date", "forecast_id", "cadence", "features_used",)
def load_file_by_key(self,
key: str,
validate: bool = True) -> pd.DataFrame:
"""Loads the file contained in the index entry with the given key."""
entry = self.get_entry(key)
file_location = entry["file_location"]
if validate:
base_indexing.validate_path(file_location)
logging.info("Loading forecasts from %s", file_location)
with open(file_location, "r") as fid:
return pd.read_csv(fid, keep_default_na=False,
na_values=[""], dtype={constants.SITE_ID: str})
def _validate_file_in_entry(self,
entry: base_indexing.IndexEntryType) -> None:
"""Validates that contents of forecasts adhere to the expected format."""
file_location = entry["file_location"]
with open(file_location, "r") as fid:
df = pd.read_csv(fid, keep_default_na=False,
na_values=[""], dtype={constants.SITE_ID: str})
required_columns = set([constants.PREDICTION, constants.DATE,
constants.SITE_ID, constants.TARGET_NAME])
if set(required_columns) != set(df.columns):
raise ValueError(
f"Forecasts must have columns: {', '.join(sorted(required_columns))}."
f" Has columns: {', '.join(sorted(df.columns))}")
if pd.isnull(df[constants.PREDICTION]).any():
raise ValueError("NaNs founds in forecasts")
for _, preds_per_site_target in df.groupby(
[constants.SITE_ID, constants.TARGET_NAME]):
# Check that the diff in dates for all but the first element is always
# the same (pandas computes a backwards diff and returns NaN for the first
# element.
date_diffs = pd.to_datetime(preds_per_site_target[constants.DATE]).diff()
if len(date_diffs) > 1 and not (
date_diffs.iloc[1:] == pd.Timedelta(entry["cadence"], "D")).all():
raise ValueError("Inconsistent cadence found in forecasts")
if pd.pivot_table(
df,
index=[constants.DATE, constants.SITE_ID, constants.TARGET_NAME],
dropna=False).isna().any().any():
raise ValueError("Missing data found in the forecasts: at least one site "
"does not have forecasts for all the evaluation dates "
"and all of the targets.")
for target_name in df["target_name"].unique():
try:
constants.Targets(target_name)
except ValueError:
raise ValueError(f"Invalid target in forecasts: {target_name}")
def query_by_forecast_id(self, forecast_id: str) -> Union[str, None]:
"""Gets the key in the index corresponding to the given forecast ID."""
if forecast_id in self._index_dict:
return forecast_id
else:
return None
def build_predictions_df(predictions: np.ndarray, dates: np.ndarray,
sites: np.ndarray,
target_names: np.ndarray) -> pd.DataFrame:
"""Builds a dataframe of predictions per site, date and target.
Args:
predictions: an array of shape (num_forecast_dates, num_sites, num_targets)
containing model predictions for the evaluation dates.
dates: an array of shape (num_forecast_dates), specifying the evaluation
dates.
sites: an array of shape (num_sites), specifying the site IDs.
target_names: an array of shape (num_targets), specifying the names of the
targets which are being predicted.
Returns:
A dataframe with columns ("date", "site_id", "target_name", "prediction")
"""
expected_predictions_shape = (len(dates), len(sites), len(target_names))
if not np.equal(predictions.shape, expected_predictions_shape).all():
raise ValueError(f"Predictions have unexpected shape {predictions.shape}. "
f"Expected {expected_predictions_shape}")
# Construct a dataframe of predictions for each target then concatenate them
target_dfs = []
for idx, target_name in enumerate(target_names):
target_df = pd.DataFrame(data=predictions[:, :, idx], columns=sites)
target_df[constants.DATE] = dates
target_df = target_df.melt(
id_vars=constants.DATE,
value_vars=sites,
var_name=constants.SITE_ID,
value_name=constants.PREDICTION)
target_df[constants.TARGET_NAME] = target_name
target_dfs.append(target_df)
df = pd.concat(target_dfs)
return df
def build_entry(forecast_id: str, file_location: str, dataset_name: str,
last_observation_date: str, creation_timestamp: str,
dataset_index_key: str, dataset_location: str, cadence: int,
extra_info: Dict[str, Any],
features_used: Optional[Sequence[str]] = None,
) -> base_indexing.IndexEntryType:
"""Builds an entry into a forecast index.
Args:
forecast_id: the unique identifier of the forecasts.
file_location: the path to the forecasts on disk.
dataset_name: the name of the dataset that the forecasts refer to.
last_observation_date: the last date of ground truth that was used to train
the model.
creation_timestamp: the datetime at which the forecasts were created.
dataset_index_key: the key into the dataset index of the dataset that
was used to train the model.
dataset_location: the path to the dataset file that the model was trained
on.
cadence: the cadence in days of the predictions. i.e. daily predictions have
a cadence of 1, weekly predictions have a cadence of 7.
extra_info: any extra information that is useful to store alongside the
rest of the forecast metadata. Usually includes the a description of the
model.
features_used: the features that were used as inputs to produce the
forecasts.
Returns:
An entry for this forecast that can be added to the forecast index.
"""
return {
"forecast_id": forecast_id,
"file_location": file_location,
"dataset_name": dataset_name,
"last_observation_date": last_observation_date,
"cadence": cadence,
"creation_timestamp": creation_timestamp,
"source_data_info": {"dataset_key": dataset_index_key,
"dataset_location": dataset_location},
"features_used": features_used if features_used else "N/A",
"extra_info": extra_info
}
def save_predictions_df(predictions_df: np.ndarray,
directory: str,
last_observation_date: str,
forecast_horizon: int,
model_description: Optional[Dict[str, str]],
dataset_name: str,
dataset_index_key: str,
cadence: int,
extra_info: Optional[Dict[str, str]],
features_used: Optional[Sequence[str]] = None) -> str:
"""Saves a formatted predictions dataframe and updates a forecast indexer.
Args:
predictions_df: a dataframe of predictions, with columns ['date', 'site_id',
'prediction', 'target_name']
directory: the base directory to store indexes and forecasts.
last_observation_date: the date string corresponding to the last date of
data that the model had access to during training.
forecast_horizon: the maximum number of days into the future that the model
predicts.
model_description: optional description of the model.
dataset_name: the name of the dataset.
dataset_index_key: the unique key into the dataset index that contains the
training dataset that the model was trained on.
cadence: the cadence in days of the predictions. i.e. daily predictions have
a cadence of 1, weekly predictions have a cadence of 7.
extra_info: a dict of any additional information to store with the
forecasts.
features_used: the features that were used as inputs to produce the
forecasts.
Returns:
the unique forecast ID that this forecast is saved under.
"""
unique_key = base_indexing.get_unique_key()
forecast_directory = os.path.join(directory, "forecasts")
if not os.path.exists(forecast_directory):
os.makedirs(forecast_directory)
output_filepath = os.path.join(forecast_directory,
f"forecasts_{unique_key}.csv")
assert not os.path.exists(output_filepath), (
f"Forecasts already exist at {output_filepath}")
with open(output_filepath, "w") as fid:
predictions_df.to_csv(fid, index=False)
logging.info("Saved model forecasts with forecast ID %s to %s", unique_key,
output_filepath)
extra_info = extra_info or {}
extra_info["forecast_horizon"] = forecast_horizon
if model_description is not None:
extra_info["model_description"] = model_description
current_datetime = datetime.datetime.utcnow()
dataset_index = dataset_indexing.DatasetIndex(directory, dataset_name)
dataset_location = dataset_index.get_entry(dataset_index_key)["file_location"]
entry = build_entry(
forecast_id=unique_key,
file_location=output_filepath,
dataset_name=dataset_name,
last_observation_date=last_observation_date,
creation_timestamp=current_datetime.strftime(constants.DATETIME_FORMAT),
dataset_index_key=dataset_index_key,
dataset_location=dataset_location,
cadence=cadence,
features_used=features_used,
extra_info=extra_info)
base_indexing.open_index_and_add_entry(
directory, dataset_name, index_class=ForecastIndex, key=unique_key,
entry=entry)
return unique_key
| dm_c19_modelling-main | evaluation/forecast_indexing.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tools for evaluating model forecasts."""
import datetime
import itertools
import os
from typing import List, Optional, Sequence, Tuple
from absl import logging
from dm_c19_modelling.evaluation import base_indexing
from dm_c19_modelling.evaluation import constants
from dm_c19_modelling.evaluation import dataset_factory
from dm_c19_modelling.evaluation import dataset_indexing
from dm_c19_modelling.evaluation import forecast_indexing
from dm_c19_modelling.evaluation import metrics
from dm_c19_modelling.evaluation import plot_utils
import numpy as np
import pandas as pd
# Internal imports.
_METRICS_TO_CALCULATE = {"rmse": metrics.rmse, "mae": metrics.mae}
def _all_arrays_equal(arrs: Sequence[np.ndarray]) -> bool:
"""Checks whether all elements of a list of numpy arrays are equal."""
first = arrs[0]
for arr in arrs[1:]:
if arr.shape != first.shape or not np.all(arr == first):
return False
return True
def _get_sorted_intersection_of_arrays(
arrs: Sequence[np.ndarray]) -> Tuple[np.ndarray, np.ndarray]:
"""Gets the intersecting and non-intersecting elements of a list of arrays."""
sets = [set(arr) for arr in arrs]
intersecting_elts = set.intersection(*sets)
non_intersecting_elts = set.union(*sets) - set(intersecting_elts)
return np.sort(list(intersecting_elts)), np.sort(list(non_intersecting_elts))
def _load_all_entries_and_forecasts(
directory: str, dataset_name: str, forecast_ids: Sequence[str],
target_name: str
) -> Tuple[List[base_indexing.IndexEntryType], List[pd.DataFrame]]:
"""Loads all forecast index entries and dataframes for the forecast IDs."""
forecast_index = forecast_indexing.ForecastIndex(directory, dataset_name)
all_forecast_entries = []
all_forecasts = []
for forecast_id in forecast_ids:
key = forecast_index.query_by_forecast_id(forecast_id)
if key is None:
raise ValueError(f"Could not find forecast ID {forecast_id} in forecast "
f"index for dataset {dataset_name} in directory "
f"{directory}")
all_forecast_entries.append(forecast_index.get_entry(key))
forecast_df = forecast_index.load_file_by_key(key)
# Filter the forecasts for the target of interest
forecast_df_for_target = forecast_df[forecast_df[constants.TARGET_NAME] ==
target_name]
if forecast_df_for_target.empty:
raise ValueError(f"Unable to find forecasts for target {target_name} in "
f"forecast {forecast_id}")
# We only require the date, site and prediction columns; drop any others
forecast_df_for_target = forecast_df_for_target[[
constants.DATE, constants.SITE_ID, constants.PREDICTION
]]
all_forecasts.append(forecast_df_for_target)
return all_forecast_entries, all_forecasts
def _convert_forecasts_to_arrays(
forecasts: Sequence[pd.DataFrame]) -> List[dataset_factory.DataArrays]:
return [dataset_factory.df_to_arrays(forecast) for forecast in forecasts]
def _get_last_observation_date_and_validate_comparable(
forecast_entries: Sequence[base_indexing.IndexEntryType]
) -> Tuple[str, int]:
"""Checks that the forecast index entries are compatible for evaluation."""
last_observation_dates = np.array(
[entry["last_observation_date"] for entry in forecast_entries])
if not _all_arrays_equal(last_observation_dates):
raise ValueError("Models can only be compared if they have the same "
"last_observation_date. Found last_observation_dates: "
f"{last_observation_dates}")
forecast_cadences = np.array([entry["cadence"] for entry in forecast_entries])
if not _all_arrays_equal(forecast_cadences):
raise ValueError(
"Models can only be compared if they have the same forecast cadence. "
f"Found cadences: {forecast_cadences}")
forecast_sources = [
np.array(entry["source_data_info"]["dataset_key"])
for entry in forecast_entries
]
if not _all_arrays_equal(forecast_sources):
raise ValueError(
"Models can only be compared if they were trained using the same "
f"dataset. Found dataset keys: {forecast_sources}")
return str(last_observation_dates[0]), int(forecast_cadences[0])
def _get_forecast_spec_and_comparable_predictions(
forecasts_arrays: List[dataset_factory.DataArrays],
num_forecast_dates: Optional[int] = None
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""Validates that the forecast dataframes are compatible for evaluation."""
date_arrays = [arrays.dates_array for arrays in forecasts_arrays]
site_arrays = [arrays.sites_array for arrays in forecasts_arrays]
feature_arrays = [arrays.features_array for arrays in forecasts_arrays]
data_arrays = [arrays.data_array for arrays in forecasts_arrays]
feature_set = set(itertools.chain.from_iterable(feature_arrays))
assert feature_set == {
"prediction"
}, (f"Unexpected columns in forecasts: {feature_set - {'prediction'}}")
if not _all_arrays_equal(site_arrays):
overlapping_sites, sites_to_drop = _get_sorted_intersection_of_arrays(
site_arrays)
if not overlapping_sites.size:
raise ValueError("Models can only be compared if they produce "
"predictions for overlapping sites.")
updated_data_arrays = []
for site_array, data_array in zip(site_arrays, data_arrays):
site_locs = np.in1d(site_array, overlapping_sites)
updated_data_arrays.append(data_array[:, site_locs, :])
data_arrays = updated_data_arrays
else:
overlapping_sites = site_arrays[0]
sites_to_drop = np.array([])
if not _all_arrays_equal(date_arrays):
overlapping_dates, _ = _get_sorted_intersection_of_arrays(date_arrays)
if not overlapping_dates.size:
raise ValueError("Models can only be compared if they produce "
"predictions for overlapping dates.")
logging.warn(
"Using the set of dates that are valid for all "
"forecasts, from %s to %s", overlapping_dates[0], overlapping_dates[-1])
else:
overlapping_dates = date_arrays[0]
updated_data_arrays = []
if num_forecast_dates:
overlapping_dates = overlapping_dates[:num_forecast_dates]
for date_array, data_array in zip(date_arrays, data_arrays):
date_locs = np.in1d(date_array, overlapping_dates)
if not np.all(np.diff(np.where(date_locs)[0]) == 1):
raise ValueError("Overlapping dates aren't consecutive through time.")
updated_data_arrays.append(data_array[date_locs])
return overlapping_dates, overlapping_sites, sites_to_drop, np.array(
updated_data_arrays)
def _validate_eval_dataset_comparable(dataset: dataset_factory.Dataset,
dates: np.ndarray, sites: np.ndarray):
"""Checks the eval dataset contains all the sites & dates in the forecasts."""
assert np.all(np.sort(sites) == sites), "Forecast sites should be sorted"
assert np.all(np.sort(dataset.sites) == dataset.sites), (
"Dataset sites should be sorted")
forecast_sites = set(sites)
sites_available_in_dataset = set(dataset.sites)
if not forecast_sites.issubset(sites_available_in_dataset):
raise ValueError(
"Not all of the sites in the forecasts are present in the evaluation "
"dataset. Missing data for sites: "
f"{forecast_sites - sites_available_in_dataset}")
assert np.array_equal(dates, dataset.evaluation_dates), (
"Dates in forecasts differ from dates in evaluation dataset")
def _calculate_metrics(forecast_id: str, predictions: np.ndarray,
ground_truth: np.ndarray,
target_name: str) -> pd.DataFrame:
"""Calculates metrics for a given dataframe of forecasts."""
assert predictions.shape == ground_truth.shape, (
f"Predictions array has shape {predictions.shape}, ground truth has "
f"shape {ground_truth.shape}")
metrics_data = []
for metric_name, metric_fn in _METRICS_TO_CALCULATE.items():
metric_value = metric_fn(predictions=predictions, ground_truth=ground_truth)
metrics_data.append([forecast_id, metric_name, metric_value, target_name])
return pd.DataFrame(
data=metrics_data,
columns=[
"forecast_id", "metric_name", "metric_value", constants.TARGET_NAME
])
def get_recorded_creation_date(directory: str, dataset_name: str,
key: str) -> str:
"""Gets the actual creation date in case the creation date is 'latest'."""
index = dataset_indexing.DatasetIndex(directory, dataset_name)
entry = index.get_entry(key)
return str(entry["creation_date"])
def evaluate(directory: str, dataset_name: str, eval_dataset_creation_date: str,
target_name: constants.Targets, forecast_ids: Sequence[str],
save: bool,
sites_permitted_to_drop: Optional[Sequence[str]],
num_forecast_dates: Optional[int]) -> pd.DataFrame:
"""Calculates and saves metrics for model forecasts if they are comparable."""
all_forecast_entries, all_forecasts = _load_all_entries_and_forecasts(
directory, dataset_name, forecast_ids, target_name.value)
last_observation_date, forecast_cadence = (
_get_last_observation_date_and_validate_comparable(all_forecast_entries))
all_forecast_arrays = _convert_forecasts_to_arrays(all_forecasts)
dates_to_eval, sites_to_eval, sites_to_drop, all_predictions = (
_get_forecast_spec_and_comparable_predictions(all_forecast_arrays,
num_forecast_dates))
if sites_to_drop.size and set(sites_to_drop) != set(sites_permitted_to_drop):
raise ValueError(
f"Only {sites_permitted_to_drop} are allowed to be dropped but "
f"{len(sites_to_drop)} non-intersecting sites were found: "
f"{sites_to_drop}.")
elif sites_to_drop.size:
logging.warn(
"Using the set of sites that are defined for all forecasts: the "
"following sites are being dropped: %s", sites_to_drop)
forecast_horizon = (
datetime.datetime.strptime(max(dates_to_eval), constants.DATE_FORMAT) -
datetime.datetime.strptime(last_observation_date,
constants.DATE_FORMAT)).days
eval_dataset = dataset_factory.get_dataset(
directory=directory,
dataset_name=dataset_name,
creation_date=eval_dataset_creation_date,
last_observation_date=last_observation_date,
targets=[target_name],
features=[],
num_forecast_dates=len(dates_to_eval),
cadence=forecast_cadence)
_validate_eval_dataset_comparable(eval_dataset, dates_to_eval, sites_to_eval)
if np.any(np.isnan(eval_dataset.evaluation_targets)):
raise ValueError(
"NaNs found in the ground truth. A likely cause is that "
f"the dataset does not contain {forecast_horizon} days "
"of data after the last_observation_date. A later creation date "
"may be required.")
# Get the ground truth data for the required sites on the evaluation dates
available_sites = eval_dataset.sites
sites_locs = np.where(np.in1d(available_sites, sites_to_eval))[0]
available_dates = eval_dataset.evaluation_dates
dates_locs = np.where(np.in1d(available_dates, dates_to_eval))[0]
ground_truth = eval_dataset.evaluation_targets[:, sites_locs, :]
ground_truth = ground_truth[dates_locs, :, :]
metrics_dfs = []
for forecast_id, predictions in zip(forecast_ids, all_predictions):
metrics_dfs.append(
_calculate_metrics(forecast_id, predictions, ground_truth,
target_name.value))
metrics_df = pd.concat(metrics_dfs)
# Get the actual evaluation creation date in case using 'latest'
eval_dataset_creation_date = get_recorded_creation_date(
directory, dataset_name, eval_dataset.dataset_index_key)
metrics_dir = os.path.join(directory, "metrics")
if save:
filename_base = (
f"metrics_{'_'.join(forecast_ids)}_{eval_dataset_creation_date}_"
f"{forecast_horizon}d"
)
_save_metrics(metrics_dir, filename_base, eval_dataset_creation_date,
metrics_df)
_plot_metrics_and_save(
directory=metrics_dir,
filename_base=filename_base,
target_name=target_name.value,
metrics_df=metrics_df,
forecast_index_entries=all_forecast_entries,
last_observation_date=last_observation_date,
forecast_horizon=forecast_horizon,
eval_dataset_creation_date=eval_dataset_creation_date,
num_dates=len(dates_to_eval),
num_sites=len(sites_to_eval),
cadence=forecast_cadence,
dropped_sites=sites_to_drop
)
return metrics_df
def _save_metrics(directory: str, filename_base: str,
eval_dataset_creation_date: str, metrics_df: pd.DataFrame):
"""Saves metrics dataframe as a csv file in the metrics directory."""
if not os.path.exists(directory):
os.makedirs(directory)
data_filepath = os.path.join(directory, f"{filename_base}.csv")
if os.path.exists(data_filepath):
raise IOError(f"Metrics already exist at {data_filepath}")
logging.info("Saving metrics data to %s", data_filepath)
with open(data_filepath, "w") as fid:
metrics_df.to_csv(fid, index=False)
def _plot_metrics_and_save(directory: str, filename_base: str, target_name: str,
metrics_df: pd.DataFrame,
forecast_index_entries: Sequence[
base_indexing.IndexEntryType],
last_observation_date: str, forecast_horizon: int,
eval_dataset_creation_date: str, num_dates: int,
num_sites: int,
cadence: int,
dropped_sites: np.ndarray) -> None:
"""Plots metrics as a series of bar plots and saves them to file."""
plot_filepath = os.path.join(directory, f"{filename_base}.png")
fig = plot_utils.plot_metrics(
metrics_df=metrics_df,
forecast_index_entries=forecast_index_entries,
target_name=target_name,
last_observation_date=last_observation_date,
forecast_horizon=forecast_horizon,
eval_dataset_creation_date=eval_dataset_creation_date,
num_sites=num_sites,
num_dates=num_dates,
cadence=cadence,
dropped_sites=dropped_sites)
logging.info("Saving metrics plots to %s", plot_filepath)
with open(plot_filepath, 'wb') as fid:
fig.savefig(fid, format="png", bbox_inches="tight")
| dm_c19_modelling-main | evaluation/evaluation.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""General constants available in dataset and evaluation modules."""
import enum
DATE = "date"
SITE_ID = "site_id"
DATE_FORMAT = "%Y-%m-%d"
DATETIME_FORMAT = "%Y-%m-%d_%H:%M:%S"
DECEASED_NEW = "new_deceased"
CONFIRMED_NEW = "new_confirmed"
HOSPITALISED_NEW = "new_hospitalised"
# Column names used in forecasts
PREDICTION = "prediction"
TARGET_NAME = "target_name"
# At least one target needs to be defined in a dataset.
class Targets(enum.Enum):
DECEASED_NEW = DECEASED_NEW
CONFIRMED_NEW = CONFIRMED_NEW
HOSPITALISED_NEW = HOSPITALISED_NEW
# Models that are available
class Models(enum.Enum):
LOGISTIC = "logistic" # Fit cumulative targets as a Logistic function of time
GOMPERTZ = "gompertz" # Fit cumulative targets as a Gompertz function of time
LINEAR = "linear" # Fit targets as a linear function of time
QUADRATIC = "quadratic" # Fit targets as a quadratic function of time
REPEAT_LAST_WEEK = "repeat_last_week" # Repeat the last week's data
| dm_c19_modelling-main | evaluation/constants.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for dm_c19_modelling.evaluation.dataset_factory."""
from unittest import mock
from absl.testing import absltest
from dm_c19_modelling.evaluation import constants
from dm_c19_modelling.evaluation import dataset_factory
import numpy as np
import pandas as pd
_DEFAULT_ARGS = {"directory": "", "dataset_name": "", "creation_date": "",
"cadence": 1}
def _get_raw_dataset():
dates = ["2020-05-01",
"2020-05-02",
"2020-05-03",
"2020-05-04",
"2020-05-05",
"2020-05-06",
"2020-05-07",
"2020-05-08",
"2020-05-09"]
sites = ["site_1", "site_2"]
df = pd.DataFrame({
constants.DATE: np.repeat(dates, len(sites)),
constants.SITE_ID: np.tile(sites, len(dates)),
"new_deceased": np.random.randint(0, 5,
len(sites) * len(dates)),
"new_confirmed": np.random.randint(0, 9,
len(sites) * len(dates)),
"feature_1": np.random.rand(len(sites) * len(dates)),
"feature_2": np.random.rand(len(sites) * len(dates)),
"feature_3": np.random.rand(len(sites) * len(dates)),
"feature_4": np.random.rand(len(sites) * len(dates))
})
df[constants.DATE] = pd.to_datetime(df[constants.DATE])
return df
class DataFactoryTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._raw_dataset = _get_raw_dataset()
self._key = "12345"
self._mock_raw_dataset = self.enter_context(
mock.patch.object(
dataset_factory, "_load_dataset_by_creation_date", autospec=True))
self._mock_raw_dataset.return_value = self._raw_dataset, self._key
def test_invalid_last_observation_date(self):
"""Checks for failure when the last date is beyond the defined range."""
last_observation_date = "2020-05-10"
with self.assertRaisesRegex(
ValueError,
f"Forecast date {last_observation_date} not found in dataset. *"):
dataset_factory.get_dataset(
last_observation_date=last_observation_date,
targets=[],
features=[],
num_forecast_dates=14,
**_DEFAULT_ARGS)
def test_insufficient_num_forecast_dates(self):
"""Checks padding applied when the dataset is missing evaluation dates."""
last_observation_date = "2020-05-08"
num_forecast_dates = 2
dataset = dataset_factory.get_dataset(
last_observation_date=last_observation_date,
targets=[constants.Targets.CONFIRMED_NEW,
constants.Targets.DECEASED_NEW],
features=[],
num_forecast_dates=num_forecast_dates,
**_DEFAULT_ARGS)
np.testing.assert_equal(dataset.evaluation_dates,
["2020-05-09", "2020-05-10"])
self.assertTrue(np.isnan(dataset.evaluation_targets[1:]).all())
def test_invalid_min_num_training_dates(self):
"""Checks for failure when there's insufficient training data."""
last_observation_date = "2020-05-08"
min_num_training_dates = 9
with self.assertRaisesWithLiteralMatch(
ValueError,
f"Could not retrieve {min_num_training_dates} days of data before "
f"{last_observation_date} from dataset."):
dataset_factory.get_dataset(
last_observation_date=last_observation_date,
targets=[],
features=[],
num_forecast_dates=1,
min_num_training_dates=min_num_training_dates,
**_DEFAULT_ARGS)
def test_valid_min_num_training_dates(self):
"""Checks that a valid constraint on training data succeeds."""
last_observation_date = "2020-05-08"
min_num_training_dates = 2
dataset = dataset_factory.get_dataset(
last_observation_date=last_observation_date,
targets=[],
features=[],
num_forecast_dates=1,
min_num_training_dates=min_num_training_dates,
**_DEFAULT_ARGS)
self.assertLen(dataset.training_dates, 8)
def test_invalid_feature(self):
"""Checks for failure when a requested feature isn't present."""
features = ["feature_4", "feature_5"]
with self.assertRaisesWithLiteralMatch(
ValueError,
"Could not find requested features ['feature_5'] in dataset"):
dataset_factory.get_dataset(
last_observation_date="2020-05-08",
targets=[],
features=features,
num_forecast_dates=1,
**_DEFAULT_ARGS)
def test_training_data_truncation(self):
"""Checks that training data is truncated when requested."""
last_observation_date = "2020-05-08"
dataset = dataset_factory.get_dataset(
last_observation_date=last_observation_date,
targets=[constants.Targets.DECEASED_NEW,
constants.Targets.CONFIRMED_NEW],
features=[],
max_num_training_dates=1,
num_forecast_dates=1,
**_DEFAULT_ARGS)
self.assertEqual(dataset.training_dates[0], last_observation_date)
actual_sum_past_targets = self._raw_dataset[
dataset.target_names].to_numpy().reshape(9, 2, 2)[:-2].sum(0)
np.testing.assert_array_equal(
actual_sum_past_targets, dataset.sum_past_targets)
def test_dataset_shapes_and_values(self):
"""Checks the shapes and values of a valid dataset specification."""
dataset = dataset_factory.get_dataset(
last_observation_date="2020-05-08",
targets=[constants.Targets.CONFIRMED_NEW],
features=["feature_1", "feature_4", "feature_2"],
num_forecast_dates=1,
**_DEFAULT_ARGS)
self.assertEqual(dataset.training_targets.shape, (8, 2, 1))
self.assertEqual(dataset.evaluation_targets.shape, (1, 2, 1))
self.assertEqual(dataset.training_features.shape, (8, 2, 3))
actual_target = self._raw_dataset[dataset.target_names].to_numpy().reshape(
9, 2, 1)
np.testing.assert_array_equal(actual_target[:8], dataset.training_targets)
np.testing.assert_array_equal(actual_target[8:], dataset.evaluation_targets)
np.testing.assert_array_equal(
actual_target[0] * 0, dataset.sum_past_targets)
actual_features = (
self._raw_dataset[dataset.feature_names].to_numpy().reshape(9, 2, 3))
np.testing.assert_array_equal(actual_features[:8],
dataset.training_features)
np.testing.assert_array_equal(["2020-05-01",
"2020-05-02",
"2020-05-03",
"2020-05-04",
"2020-05-05",
"2020-05-06",
"2020-05-07",
"2020-05-08"],
dataset.training_dates)
np.testing.assert_array_equal(["2020-05-09"], dataset.evaluation_dates)
np.testing.assert_array_equal(["site_1", "site_2"], dataset.sites)
def test_nan_targets(self):
"""Checks for failure when there are rows with undefined targets."""
self._raw_dataset.loc[1, "new_deceased"] = np.nan
self._mock_raw_dataset.return_value = self._raw_dataset, self._key
with self.assertRaisesWithLiteralMatch(ValueError,
"NaNs found in the target columns."):
dataset_factory.get_dataset(
last_observation_date="2020-05-08",
targets=[
constants.Targets.CONFIRMED_NEW, constants.Targets.DECEASED_NEW
],
features=["feature_1", "feature_2"],
num_forecast_dates=1,
**_DEFAULT_ARGS)
def test_missing_row(self):
"""Checks for failure when there are missing rows."""
dataset = self._raw_dataset.loc[1:]
self._mock_raw_dataset.return_value = dataset, self._key
with self.assertRaisesWithLiteralMatch(
ValueError, "Found missing rows in the dataset for a date and site."):
dataset_factory.get_dataset(
last_observation_date="2020-05-08",
targets=[
constants.Targets.CONFIRMED_NEW, constants.Targets.DECEASED_NEW
],
features=["feature_1", "feature_2"],
num_forecast_dates=1,
**_DEFAULT_ARGS)
def test_duplicate_row(self):
"""Checks for failure when there are duplicate rows."""
dataset = pd.concat(
[self._raw_dataset, self._raw_dataset[self._raw_dataset.index == 0]])
self._mock_raw_dataset.return_value = dataset, self._key
with self.assertRaisesWithLiteralMatch(
ValueError, "Found duplicate rows in the dataset for a date and site."):
dataset_factory.get_dataset(
last_observation_date="2020-05-08",
targets=[
constants.Targets.CONFIRMED_NEW, constants.Targets.DECEASED_NEW
],
features=["feature_1", "feature_2"],
num_forecast_dates=1,
**_DEFAULT_ARGS)
def test_shape_and_value_non_daily_cadence_eval_data(self):
"""Checks that evaluation data is downsampled to fit a required cadence."""
args = _DEFAULT_ARGS.copy()
args["cadence"] = 2
dataset = dataset_factory.get_dataset(
last_observation_date="2020-05-08",
targets=[
constants.Targets.CONFIRMED_NEW, constants.Targets.DECEASED_NEW
],
features=["feature_1", "feature_2"],
num_forecast_dates=1,
**args)
np.testing.assert_array_equal(dataset.evaluation_dates, ["2020-05-10"])
np.testing.assert_array_equal(
dataset.evaluation_targets.shape,
(1, 2, 2)) # 1 evaluation date, 2 sites, 2 targets
self.assertTrue(np.all(np.isnan(dataset.evaluation_targets)))
def test_shape_and_value_non_daily_cadence_train_data(self):
"""Checks that training data is downsampled to fit a required cadence."""
args = _DEFAULT_ARGS.copy()
args["cadence"] = 2
feature_names = ["feature_1", "feature_2"]
dataset = dataset_factory.get_dataset(
last_observation_date="2020-05-09",
targets=[
constants.Targets.CONFIRMED_NEW, constants.Targets.DECEASED_NEW
],
features=feature_names,
num_forecast_dates=1,
**args)
np.testing.assert_array_equal(dataset.training_dates, [
"2020-05-03",
"2020-05-05",
"2020-05-07",
"2020-05-09"])
np.testing.assert_array_equal(
dataset.training_targets.shape, (4, 2, 2))
actual_targets = self._raw_dataset[dataset.target_names].to_numpy().reshape(
9, 2, 2)
np.testing.assert_array_equal(
dataset.training_targets[-1], np.sum(actual_targets[-2:], axis=0))
np.testing.assert_array_equal(
dataset.training_features.shape, (4, 2, 2))
actual_features = self._raw_dataset[feature_names].to_numpy().reshape(
9, 2, 2)
np.testing.assert_array_equal(
dataset.training_features[-1], np.mean(actual_features[-2:], axis=0))
# Cadence of 2, should have discarded the first date, but this should
# still show in the summed_past_targets.
actual_sum_past_targets = self._raw_dataset[
dataset.target_names].to_numpy().reshape(9, 2, 2)[:1].sum(0)
np.testing.assert_array_equal(
actual_sum_past_targets, dataset.sum_past_targets)
def test_non_daily_cadence_train_data_no_features(self):
"""Checks that downsampling works when there are no features."""
args = _DEFAULT_ARGS.copy()
args["cadence"] = 2
feature_names = []
dataset = dataset_factory.get_dataset(
last_observation_date="2020-05-09",
targets=[
constants.Targets.CONFIRMED_NEW, constants.Targets.DECEASED_NEW
],
features=feature_names,
num_forecast_dates=1,
**args)
self.assertEqual(dataset.training_features.size, 0)
def test_error_when_dropping_sites(self):
"""Checks for failure when sites have completely missing data."""
self._raw_dataset["feature_1"].loc[self._raw_dataset.site_id ==
"site_1"] = np.nan
self._mock_raw_dataset.return_value = self._raw_dataset, self._key
with self.assertRaisesRegex(
ValueError,
"Found 1 sites where at least 1 feature was entirely missing *"):
dataset_factory.get_dataset(
last_observation_date="2020-05-08",
targets=[
constants.Targets.CONFIRMED_NEW, constants.Targets.DECEASED_NEW
],
features=["feature_1", "feature_2"],
num_forecast_dates=1,
**_DEFAULT_ARGS)
def test_sites_dropped_when_permitted(self):
"""Checks that sites are dropped when they completely missing data."""
self._raw_dataset["feature_1"].loc[self._raw_dataset.site_id ==
"site_1"] = np.nan
self._mock_raw_dataset.return_value = self._raw_dataset, self._key
dataset = dataset_factory.get_dataset(
last_observation_date="2020-05-08",
targets=[
constants.Targets.CONFIRMED_NEW, constants.Targets.DECEASED_NEW
],
features=["feature_1", "feature_2"],
num_forecast_dates=1,
allow_dropped_sites=True,
**_DEFAULT_ARGS)
self.assertEqual(dataset.sites, ["site_2"])
if __name__ == "__main__":
absltest.main()
| dm_c19_modelling-main | evaluation/dataset_factory_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""DeepMind COVID-19 evaluation."""
| dm_c19_modelling-main | evaluation/__init__.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Script to visualize trajectories of data and forecasts."""
import datetime
from typing import Sequence
from absl import app
from absl import flags
from absl import logging
from dm_c19_modelling.evaluation import constants
from dm_c19_modelling.evaluation import dataset_factory
from dm_c19_modelling.evaluation import evaluation
from dm_c19_modelling.evaluation import plot_utils
_PROJECT_DIR = flags.DEFINE_string(
"project_directory", None, "The directory where datasets and models are "
"saved.")
_DATASET_NAME = flags.DEFINE_string("dataset_name", "covid_open_data_world",
"The name of the dataset to use.")
_FORECAST_IDS = flags.DEFINE_list(
"forecast_ids", None, "The IDs of the forecasts to evaluate. The forecasts "
"must be comparable: the models used to generate them must have used the "
"same training dataset, they must have the same forecast date and "
"forecast_horizon, provide forecasts for the same sites, and must predict "
"whatever is specified to be target_name.")
_EVAL_DATASET_CREATION_DATE = flags.DEFINE_string(
"eval_dataset_creation_date", "latest", "The creation date of the dataset "
"to use for getting the ground truth for the evaluation dates.")
_TARGET_NAME = flags.DEFINE_string("target_name", None,
"The name of the target to evaluate.")
_NUM_FORECAST_DATES = flags.DEFINE_integer(
"num_forecast_dates", None, "The number of dates to use for evaluation. "
"This is optional: if not specified, evaluation will run on the maximum "
"number of overlapping dates available between the different forecasts.")
_NUM_SITES = flags.DEFINE_integer(
"num_sites", 16, "The number of sites to use for evaluation. "
"This is optional: if not specified, will plot 16 sites.")
_OVERWRITE = flags.DEFINE_boolean(
"overwrite", False, "Force overwriting of existing images. "
"This is optional: if not specified, will default to False..")
flags.mark_flags_as_required(
["project_directory", "forecast_ids", "target_name"])
def get_forecast_arrays(directory: str, dataset_name: str,
target_name: constants.Targets,
forecast_ids: Sequence[str]):
"""Get the forecasts from disk."""
(all_forecast_entries,
all_forecasts) = evaluation._load_all_entries_and_forecasts( # pylint: disable=protected-access
directory, dataset_name, forecast_ids, target_name.value)
last_observation_date, forecast_cadence = (
evaluation._get_last_observation_date_and_validate_comparable( # pylint: disable=protected-access
all_forecast_entries))
all_forecast_arrays = evaluation._convert_forecasts_to_arrays(all_forecasts) # pylint: disable=protected-access
return (last_observation_date, forecast_cadence, all_forecast_arrays,
all_forecast_entries)
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
del argv # Unused
directory = _PROJECT_DIR.value
dataset_name = _DATASET_NAME.value
eval_dataset_creation_date = _EVAL_DATASET_CREATION_DATE.value
last_observation_date = None
target_name = constants.Targets(_TARGET_NAME.value)
forecast_ids = _FORECAST_IDS.value
num_forecast_dates = _NUM_FORECAST_DATES.value
num_sites = _NUM_SITES.value
(last_observation_date, forecast_cadence, all_forecast_arrays,
all_forecast_entries) = get_forecast_arrays(
directory=directory,
dataset_name=dataset_name,
target_name=target_name,
forecast_ids=forecast_ids,
)
dates_to_eval, _, sites_to_drop, _ = (
evaluation._get_forecast_spec_and_comparable_predictions( # pylint: disable=protected-access
all_forecast_arrays, num_forecast_dates))
if sites_to_drop:
logging.warn("sites to drop includes: %s", sites_to_drop.join(", "))
forecast_horizon = (
datetime.datetime.strptime(max(dates_to_eval), constants.DATE_FORMAT) -
datetime.datetime.strptime(last_observation_date,
constants.DATE_FORMAT)).days
eval_dataset = dataset_factory.get_dataset(
directory=directory,
dataset_name=dataset_name,
creation_date=eval_dataset_creation_date,
last_observation_date=last_observation_date,
targets=[target_name],
features=[],
num_forecast_dates=len(dates_to_eval),
cadence=forecast_cadence)
# Get the actual evaluation creation date in case using 'latest'
eval_dataset_creation_date = evaluation.get_recorded_creation_date(
directory, dataset_name, eval_dataset.dataset_index_key)
plot_utils.plot_trajectories_and_save(
directory=directory,
forecast_ids=forecast_ids,
eval_dataset_creation_date=eval_dataset_creation_date,
forecast_horizon=forecast_horizon,
save=True,
target_name=target_name,
all_forecast_entries=all_forecast_entries,
all_forecast_arrays=all_forecast_arrays,
num_sites=num_sites,
eval_dataset=eval_dataset,
overwrite=_OVERWRITE.value)
if __name__ == "__main__":
app.run(main)
| dm_c19_modelling-main | evaluation/run_trajectory_visualization.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Baseline models to predict COVID-19 deaths / cases by region."""
import abc
import math
from typing import Optional, Tuple
from dm_c19_modelling.evaluation import dataset_factory
import numpy as np
from scipy import optimize
class CurveFittingModel(metaclass=abc.ABCMeta):
"""Models that fit a single target as a function of time for each site.
A curve function is used to fit time to the target, separately for each site.
"""
def __init__(self, num_context_dates: Optional[int], fit_cumulatives: bool):
"""Gets target predictions for the evaluation dates.
Args:
num_context_dates: Number of most recent data points to fit the curve to.
If None, it will fit all available steps in the inputs.
fit_cumulatives: Whether to fit the function to raw targets, or to
cumulatives.
"""
super().__init__()
self._fit_cumulatives = fit_cumulatives
self._num_context_dates = num_context_dates
@abc.abstractmethod
def _fit_and_predict(
self, x_inputs: np.ndarray, y_inputs: np.ndarray, x_outputs: np.ndarray
) -> np.ndarray:
"""Returns the predictions for x_outputs, by fitting the inputs."""
def predict(self, dataset: dataset_factory.Dataset) -> np.ndarray:
"""Uses per-site fitted params to predict targets for evaluation dates.
Args:
dataset: The training dataset.
Returns:
Predictions for the evaluation dates, of shape:
(num_evaluation_dates, num_sites, 1)
"""
num_context_dates = self._num_context_dates
if num_context_dates is None:
num_context_dates = len(dataset.training_dates)
if num_context_dates > len(dataset.training_dates):
raise ValueError(
f"`Not enough training dates ({len(dataset.training_dates)}) for"
f"`the required number of context dates ({num_context_dates}).")
training_date_range = np.arange(0, num_context_dates)
eval_date_range = np.arange(
num_context_dates, num_context_dates + len(dataset.evaluation_dates))
training_targets = dataset.training_targets
if self._fit_cumulatives:
# We want incremental predictions for the evaluation dates, so need to
# produce a value for the final training date as well to enable taking
# the diff of predictions.
# Alternatively we could consider using the last ground truth value to
# produce the diff for the first day.
eval_date_range = np.concatenate(
[[eval_date_range[0] - 1], eval_date_range])
training_targets = np.cumsum(
training_targets, axis=0) + dataset.sum_past_targets
# Clip after calculating the `cumsum` so the totals are still correct.
training_targets = training_targets[-num_context_dates:] # pylint: disable=invalid-unary-operand-type
predictions_all_sites = np.full(dataset.evaluation_targets.shape, np.nan)
for target_idx in range(len(dataset.target_names)):
for site_idx in range(len(dataset.sites)):
train_targets_for_site = training_targets[:, site_idx, target_idx]
prediction_targets_for_site = self._fit_and_predict(
x_inputs=training_date_range,
y_inputs=train_targets_for_site,
x_outputs=eval_date_range)
if self._fit_cumulatives:
prediction_targets_for_site = np.diff(
prediction_targets_for_site, axis=0)
predictions_all_sites[:, site_idx, target_idx] = (
prediction_targets_for_site)
return predictions_all_sites
class PolynomialFit(CurveFittingModel):
"""Extrapolates fitting a polynomial to data from the last weeks."""
def __init__(
self, polynomial_degree: int, num_context_dates: Optional[int],
fit_cumulatives: bool = False):
"""Gets target predictions for the evaluation dates.
Args:
polynomial_degree: Degree of the polynomial to fit.
num_context_dates: See base class.
fit_cumulatives: If True, fit cumulatives, instead of globals.
"""
super().__init__(
num_context_dates=num_context_dates, fit_cumulatives=fit_cumulatives)
self._polynomial_degree = polynomial_degree
def _fit_and_predict(
self, x_inputs: np.ndarray, y_inputs: np.ndarray, x_outputs: np.ndarray
) -> np.ndarray:
"""Returns the predictions for x_outputs, by fitting the inputs."""
fit_coefficients = np.polyfit(x_inputs, y_inputs, self._polynomial_degree)
return np.polyval(fit_coefficients, x_outputs)
class ScipyCurveFittingModel(CurveFittingModel):
"""Model that fits an arbitrary function using scipy.optimize.curve_fit."""
@abc.abstractmethod
def _curve_function(self, *params):
"""The function to use to fit the target to time for each site."""
@abc.abstractmethod
def _get_initial_params(self, x_inputs: np.ndarray,
y_inputs: np.ndarray) -> Tuple[float, ...]:
"""Gets initialisation values for the parameters in the curve function."""
def _fit_and_predict(
self, x_inputs: np.ndarray, y_inputs: np.ndarray, x_outputs: np.ndarray
) -> np.ndarray:
"""Returns the predictions for x_outputs, by fitting the inputs."""
params, _ = optimize.curve_fit(
self._curve_function,
x_inputs, y_inputs,
maxfev=int(1e5),
p0=self._get_initial_params(x_inputs, y_inputs))
return self._curve_function(x_outputs, *params)
class Logistic(ScipyCurveFittingModel):
"""Fits a logistic function to the cumulative sum of the target."""
def __init__(self, num_context_dates: int = None):
super().__init__(fit_cumulatives=True, num_context_dates=num_context_dates)
def _curve_function(self, t: np.ndarray, a: float, b: float,
c: float) -> np.ndarray:
return a / (1.0 + np.exp(-b * (t - c)))
def _get_initial_params(
self, x_inputs: np.ndarray,
y_inputs: np.ndarray) -> Tuple[float, float, float]:
return (max(y_inputs), 1, np.median(x_inputs))
class Gompertz(ScipyCurveFittingModel):
"""Fits a Gompertz function to the cumulative sum of the target."""
def __init__(self, num_context_dates: Optional[int] = None):
super().__init__(fit_cumulatives=True, num_context_dates=num_context_dates)
def _curve_function(self, t: np.ndarray, a: float, b: float,
c: float) -> np.ndarray:
return a * np.exp(-b * np.exp(-c * t))
def _get_initial_params(
self, x_inputs: np.ndarray,
y_inputs: np.ndarray) -> Tuple[float, float, float]:
return (max(y_inputs), np.median(x_inputs), 1)
class RepeatLastWeek:
"""Repeats the last week's data to predict targets for evaluation dates."""
def predict(self, dataset: dataset_factory.Dataset) -> np.ndarray:
"""Gets target predictions for the evaluation dates.
Args:
dataset: The training dataset.
Returns:
Predictions for the evaluation dates, of shape:
(num_forecast_dates, num_sites, num_targets)
"""
if dataset.cadence == 1:
repeat_period = 7
elif dataset.cadence == 7:
repeat_period = 1
else:
raise ValueError(
"Repeating the last week of data is only valid with a daily or "
f"weekly cadence. Found cadence of {dataset.cadence}.")
if len(dataset.training_dates) < repeat_period:
raise ValueError(
"At least 1 week of training data required to repeat weekly targets. "
f"Found {len(dataset.training_dates)} days.")
last_week_of_observed_targets = dataset.training_targets[-repeat_period:]
num_forecast_dates = len(dataset.evaluation_dates)
num_forecast_weeks = math.ceil(num_forecast_dates / repeat_period)
predictions = np.concatenate([
last_week_of_observed_targets for _ in range(num_forecast_weeks)
])[:num_forecast_dates]
return predictions
| dm_c19_modelling-main | evaluation/baseline_models.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Downloads and saves data from the COVID-19 Open Data repository."""
from absl import app
from absl import flags
from dm_c19_modelling.evaluation import dataset_indexing
from dm_c19_modelling.evaluation import download_data
_PROJECT_DIR = flags.DEFINE_string(
"project_directory", default=None, help="The output directory where the "
"dataset index and dataset should be saved.")
_DATASET_NAME = flags.DEFINE_enum(
"dataset_name", default=None, enum_values=download_data.VALID_DATASETS,
help="The name of the dataset to download.")
flags.mark_flags_as_required(["project_directory", "dataset_name"])
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
del argv # Unused
df, source_data_info = download_data.fetch_data(_DATASET_NAME.value)
dataset_indexing.save_dataset(
df,
directory=_PROJECT_DIR.value,
dataset_name=_DATASET_NAME.value,
source_data_info=source_data_info)
if __name__ == "__main__":
app.run(main)
| dm_c19_modelling-main | evaluation/run_download_data.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Script to fit a baseline model on a given dataset."""
from absl import app
from absl import flags
from dm_c19_modelling.evaluation import baseline_models
from dm_c19_modelling.evaluation import constants
from dm_c19_modelling.evaluation import dataset_factory
from dm_c19_modelling.evaluation import forecast_indexing
from dm_c19_modelling.evaluation import forecast_utils
_PROJECT_DIR = flags.DEFINE_string(
"project_directory", None, "The directory where data and models are saved.")
_DATASET_NAME = flags.DEFINE_string(
"dataset_name", "covid_open_data_world", "The name of the dataset to use.")
_DATASET_CREATION_DATE = flags.DEFINE_string(
"creation_date", "latest", "The creation date of the dataset to use for "
"training")
_TARGET_NAME = flags.DEFINE_enum("target_name", "new_deceased",
[target.value for target in constants.Targets],
"The name of the target to predict.")
_LAST_OBSERVATION_DATE = flags.DEFINE_string(
"last_observation_date", None, "The date to train up to and evaluate from")
_NUM_FORECAST_DATES = flags.DEFINE_integer(
"num_forecast_dates", 14, "The number of dates to use for evaluation. "
"The forecast horizon in days is equal to num_forecast_dates * cadence")
_MODEL_NAME = flags.DEFINE_enum(
"model_name", "logistic", [model.value for model in constants.Models],
"The model to fit to the data")
_MODEL_DESCRIPTION = flags.DEFINE_string(
"model_description", None, "Optional description to associate with the "
"forecasts output by the model in the forecast index.")
_CADENCE = flags.DEFINE_integer(
"cadence", 1, "The cadence in days of the predictions. i.e. daily "
"predictions have a cadence of 1, weekly predictions have a cadence of 7.")
_WEEKLY_CONVERSION_END_DAY = flags.DEFINE_string(
"weekly_conversion_end_day", None, "Whether to convert predictions to "
"weekly predictions, and if so, what day the week should end on. e.g. A "
"value of Sunday would aggregate through normal weeks, while a value of "
"Saturday would aggregate through epidemiological weeks. This can only be "
"used with a daily cadence.")
_NUM_CONTEXT_DATES = flags.DEFINE_integer(
"num_context_dates", None,
"The number of most recent dates that the baseline will be fitted to. "
"The context horizon in days is equal to num_context_dates * cadence")
flags.mark_flags_as_required(["project_directory", "last_observation_date"])
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
dataset = dataset_factory.get_dataset(
directory=_PROJECT_DIR.value,
dataset_name=_DATASET_NAME.value,
creation_date=_DATASET_CREATION_DATE.value,
last_observation_date=_LAST_OBSERVATION_DATE.value,
targets=[constants.Targets(_TARGET_NAME.value)],
features=[],
num_forecast_dates=_NUM_FORECAST_DATES.value,
cadence=_CADENCE.value)
if _MODEL_NAME.value == constants.Models.LOGISTIC.value:
model_kwargs = dict(num_context_dates=_NUM_CONTEXT_DATES.value)
model = baseline_models.Logistic(**model_kwargs)
elif _MODEL_NAME.value == constants.Models.GOMPERTZ.value:
model_kwargs = dict(num_context_dates=_NUM_CONTEXT_DATES.value)
model = baseline_models.Gompertz(**model_kwargs)
elif _MODEL_NAME.value == constants.Models.LINEAR.value:
model_kwargs = dict(num_context_dates=_NUM_CONTEXT_DATES.value,
polynomial_degree=1)
model = baseline_models.PolynomialFit(**model_kwargs)
elif _MODEL_NAME.value == constants.Models.QUADRATIC.value:
model_kwargs = dict(num_context_dates=_NUM_CONTEXT_DATES.value,
polynomial_degree=2)
model = baseline_models.PolynomialFit(**model_kwargs)
elif _MODEL_NAME.value == constants.Models.REPEAT_LAST_WEEK.value:
model_kwargs = {}
model = baseline_models.RepeatLastWeek(**model_kwargs)
predictions = model.predict(dataset)
cadence = _CADENCE.value
if _WEEKLY_CONVERSION_END_DAY.value:
if cadence != 1:
raise ValueError("Only daily cadence predictions can be pooled to "
"weekly predictions")
predictions, evaluation_dates = (
forecast_utils.pool_daily_forecasts_to_weekly(
predictions, dataset.evaluation_dates,
_WEEKLY_CONVERSION_END_DAY.value))
cadence = 7
else:
evaluation_dates = dataset.evaluation_dates
model_description = {
"name": _MODEL_NAME.value,
"model_kwargs": model_kwargs,
"model_description": _MODEL_DESCRIPTION.value,
}
predictions_df = forecast_indexing.build_predictions_df(
predictions, evaluation_dates, dataset.sites,
dataset.target_names)
forecast_indexing.save_predictions_df(
predictions_df,
directory=str(_PROJECT_DIR.value),
last_observation_date=str(_LAST_OBSERVATION_DATE.value),
forecast_horizon=_NUM_FORECAST_DATES.value,
model_description=model_description,
dataset_name=_DATASET_NAME.value,
dataset_index_key=dataset.dataset_index_key,
cadence=cadence,
features_used=[_TARGET_NAME.value],
extra_info={"first_training_date": dataset.training_dates[0]})
if __name__ == "__main__":
app.run(main)
| dm_c19_modelling-main | evaluation/fit_baseline_model.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tools for constructing datasets for modelling."""
import datetime
from typing import NamedTuple, Optional, Sequence, Tuple
from absl import logging
from dm_c19_modelling.evaluation import constants
from dm_c19_modelling.evaluation import dataset_indexing
import numpy as np
import pandas as pd
class Dataset(NamedTuple):
"""A dataset with specific training / eval dates, targets & features."""
training_targets: np.ndarray # (num_train_dates, num_sites, num_targets)
evaluation_targets: np.ndarray # (num_forecast_dates, num_sites, num_targets)
training_features: np.ndarray # (num_train_dates, num_sites, num_features)
# Number of summed targets up to the beginning of the training dates.
# Since targets are incremental numbers some models may use this.
sum_past_targets: np.ndarray # (num_sites, num_targets)
feature_names: np.ndarray
target_names: np.ndarray
training_dates: np.ndarray
evaluation_dates: np.ndarray
sites: np.ndarray
dataset_index_key: str
cadence: int
class DataArrays(NamedTuple):
"""Internal helper structure for arrays of data."""
data_array: np.ndarray # Shape (num_dates, num_sites, num_features)
dates_array: np.ndarray # Array of dates for the data
sites_array: np.ndarray # Array of site IDs for the data
features_array: np.ndarray # Array of available feature names for the data
def _load_dataset_by_creation_date(
directory: str, dataset_name: str,
creation_date: str) -> Tuple[pd.DataFrame, str]:
"""Loads a dataset according to its creation date."""
index = dataset_indexing.DatasetIndex(directory, dataset_name)
key = index.query_by_creation_date(creation_date)
dataset = index.load_file_by_key(key)
dataset[constants.DATE] = pd.to_datetime(dataset[constants.DATE])
return dataset, key
def _load_dataset_by_key(directory: str, dataset_name: str,
dataset_index_key: str) -> pd.DataFrame:
index = dataset_indexing.DatasetIndex(directory, dataset_name)
return index.get_entry(dataset_index_key)
def df_to_arrays(df: pd.DataFrame) -> DataArrays:
"""Converts dataframe into a 3D array with axes: (time, site, feature).
Args:
df: Dataframe containing site, date and feature columns.
Returns:
DataArrays containing:
* data_array: the dataset transformed into a 3D numpy array with axes
(time, site, feature)
* date_array: the dates that are present in the dataset
* site_array: the sites that are present in the dataset
* feature_array: the available features / targets in the dataset
"""
num_sites = len(df[constants.SITE_ID].unique())
num_dates = len(df[constants.DATE].unique())
# The total number of feature columns is the total columns, minus the site
# and date columns.
total_num_columns = len(df.columns) - 2
# For each new grouping of the table, we will take:
# * The count, so we can verify later that there are zero, or 1 elements at
# most per data and per site.
# * The first value corresponding to the only element.
pivoted_df = pd.pivot_table(
df,
index=constants.DATE,
columns=[constants.SITE_ID],
dropna=False,
aggfunc=["first", len]).swaplevel(axis=1)
# Assert that we had no extra rows. The pivot table replaces missing values
# with NaNs, so we replace the NaN lengths by zeros, and then check that at
# most one row contributed to each value.
if pivoted_df["len"].isna().any().any():
raise ValueError("Found missing rows in the dataset for a date and site.")
if not np.all(pivoted_df["len"].values == 1.):
raise ValueError("Found duplicate rows in the dataset for a date and site.")
# Get the first (and only) value for each feature, date and site.
pivoted_df = pivoted_df["first"].swaplevel(axis=1)
# Convert pivot to a numpy array of shape (num_dates, num_sites, num_columns)
data_array = pivoted_df.to_numpy().reshape(num_dates, total_num_columns,
num_sites).swapaxes(2, 1)
dates_array = pivoted_df.index
features_array, sites_array = pivoted_df.columns.levels
return DataArrays(
data_array=data_array,
dates_array=np.array(dates_array),
sites_array=np.array(sites_array),
features_array=np.array(features_array))
def _get_train_and_eval_date_slices(
dates_arr: np.ndarray, last_observation_date: str,
min_num_days_in_training: Optional[int],
max_num_days_in_training: Optional[int],
num_days_in_evaluation_period: int) -> Tuple[slice, slice]:
"""Gets the slices along the date axis for the training and eval dates."""
last_observation_datetime = np.datetime64(last_observation_date)
forecast_index = np.where(dates_arr == last_observation_datetime)[0]
if not forecast_index.size:
raise ValueError(
f"Forecast date {last_observation_date} not found in dataset. "
f"The following dates are available: {dates_arr}")
assert len(forecast_index) == 1, "Found duplicate dates."
forecast_index = forecast_index[0]
first_training_index = 0
if max_num_days_in_training:
first_training_index = max(
forecast_index - max_num_days_in_training + 1, first_training_index)
if min_num_days_in_training is None:
min_num_days_in_training = 0
if forecast_index - first_training_index + 1 < min_num_days_in_training:
raise ValueError(
f"Could not retrieve {min_num_days_in_training} days of data before "
f"{last_observation_date} from dataset.")
last_eval_index = forecast_index + num_days_in_evaluation_period
if last_eval_index >= len(dates_arr):
logging.info(
"Could not retrieve %s days of data after %s from dataset. "
"Evaluation data will be padded with NaNs",
num_days_in_evaluation_period, last_observation_date)
return (slice(first_training_index, forecast_index + 1),
slice(forecast_index + 1, last_eval_index + 1))
def get_dataset(directory: Optional[str],
dataset_name: str,
creation_date: Optional[str],
last_observation_date: Optional[str],
targets: Sequence[constants.Targets],
features: Sequence[str],
cadence: int,
num_forecast_dates: int,
allow_dropped_sites: bool = False,
min_num_training_dates: Optional[int] = None,
max_num_training_dates: Optional[int] = None) -> Dataset:
"""Gets a dataset.
Args:
directory: The directory where the dataset index is stored.
dataset_name: The name of the dataset (typically the region).
creation_date: The creation date of the dataset to be used. To use the
most recently available dataset, pass 'latest'.
last_observation_date: The last date to include in the training data
(inclusive).
targets: The names of the targets to be modelled.
features: The names of the features to use to predict the targets.
cadence: The cadence of the data to retrieve. All datasets have daily
cadence by default. If the cadence is greater than 1, then incremental
target values are summed over the cadence period, and other features are
averaged.
num_forecast_dates: The number of dates after the last_observation_date to
use for evaluation.
allow_dropped_sites: Whether to allow sites to be dropped if any of the
requested features aren't defined for that site for at least one training
date.
min_num_training_dates: Optional requirement for a minimum number of dates
that must be included in the training data. An error is raised if there is
insufficient data available to satisfy this for the given forecast date.
max_num_training_dates: Optional setting for the maximum number of dates
that can be included in the training data up to and including the
last_observation_date. The training data is truncated to at most this
number of dates.
Returns:
Dataset, containing the following fields:
training_targets: the targets for the training dates, shape
(time, site, target)
training_features: the features for the training dates, shape
(time, site, feature)
evaluation_targets: the targets for the evaluation dates, shape
(time, site, target)
target_names: the list of target names, corresponding to the final axis
in the target arrays.
feature_names: the list of feature names, corresponding to the final axis
in the feature arrays.
training_dates: the list of training dates, corresponding to the first
axis in the training target & feature arrays.
evaluation_dates: the list of evaluation dates, corresponding to the first
axis in the evaluation target array.
sites: the list of site IDs, corresponding to the second axis in the
target & feature arrays.
"""
if num_forecast_dates <= 0:
raise ValueError("At least one future evaluation date must be specified.")
if cadence < 1:
raise ValueError("Cadence must be at least daily.")
targets = sorted([target.value for target in targets])
features = sorted(features)
raw_dataset, dataset_index_key = _load_dataset_by_creation_date(
directory, dataset_name, creation_date)
data_arrays = df_to_arrays(raw_dataset)
missing_features = [
feat for feat in features + targets
if feat not in data_arrays.features_array
]
if missing_features:
raise ValueError(
f"Could not find requested features {missing_features} in dataset")
num_days_in_evaluation_period = num_forecast_dates * cadence
max_num_days_in_training = (
max_num_training_dates
if not max_num_training_dates else max_num_training_dates * cadence)
min_num_days_in_training = (
min_num_training_dates
if not min_num_training_dates else min_num_training_dates * cadence)
train_date_slice, eval_date_slice = _get_train_and_eval_date_slices(
data_arrays.dates_array, last_observation_date,
min_num_days_in_training, max_num_days_in_training,
num_days_in_evaluation_period)
target_indices = np.where(np.in1d(data_arrays.features_array, targets))[0]
assert len(target_indices) == len(targets)
feature_indices = np.where(np.in1d(data_arrays.features_array, features))[0]
assert len(feature_indices) == len(features)
if pd.isnull(data_arrays.data_array[:, :, target_indices]).any():
raise ValueError("NaNs found in the target columns.")
training_features = data_arrays.data_array[
train_date_slice, :, feature_indices].astype(np.float64)
dates_str_arr = np.array([
datetime.datetime.strftime(
pd.to_datetime(str(date)), constants.DATE_FORMAT)
for date in data_arrays.dates_array
])
training_targets = data_arrays.data_array[train_date_slice, :,
target_indices].astype(np.float64)
# We assume our source of data had data from the beginning of pandemic, so
# sum of past targets is zero.
sum_past_targets = np.zeros_like(training_targets[0])
# Plus any discarded initial dates.
if train_date_slice.start: # pylint: disable=using-constant-test
sum_past_targets += data_arrays.data_array[
:train_date_slice.start, :, target_indices].astype(np.float64).sum(0)
evaluation_targets = _maybe_pad_data(
data_arrays.data_array[eval_date_slice, :, target_indices],
num_days_in_evaluation_period).astype(np.float64)
evaluation_dates = _get_evaluation_dates(last_observation_date,
num_days_in_evaluation_period)
# A site is 'valid' to be included in the dataset if all of the requested
# features are defined for at one least date in the training dates.
if features:
features_and_sites_with_at_least_one_date = ~np.all(
np.isnan(training_features), axis=0)
# There must be at least one date defined for all features.
valid_site_indices = np.where(
np.all(features_and_sites_with_at_least_one_date, axis=1))[0]
else:
valid_site_indices = np.arange(len(data_arrays.sites_array))
if len(valid_site_indices) != len(data_arrays.sites_array):
sites_to_drop = set(data_arrays.sites_array) - set(
np.take(data_arrays.sites_array, valid_site_indices))
if not allow_dropped_sites:
raise ValueError(f"Found {len(sites_to_drop)} sites where at least 1 "
"feature was entirely missing for the requested "
f"features {features}. Set allow_dropped_sites to True "
"if you want to allow these sites to be dropped.")
logging.warn(
"Found %s sites where at least 1 feature was entirely missing "
"for the requested features %s. These sites are being dropped: %s",
len(sites_to_drop), ",".join(features), "\n".join(sites_to_drop))
dataset = Dataset(
training_targets=np.take(training_targets, valid_site_indices, axis=1),
training_features=np.take(training_features, valid_site_indices, axis=1),
evaluation_targets=np.take(
evaluation_targets, valid_site_indices, axis=1),
sum_past_targets=np.take(
sum_past_targets, valid_site_indices, axis=0),
target_names=np.array(targets),
feature_names=np.array(features),
training_dates=dates_str_arr[train_date_slice],
evaluation_dates=evaluation_dates,
sites=np.take(data_arrays.sites_array, valid_site_indices),
dataset_index_key=dataset_index_key,
cadence=1)
if cadence > 1:
dataset = _downsample_dataset_in_time(dataset, cadence)
return dataset
def _downsample_dataset_in_time(dataset: Dataset, cadence: int) -> Dataset:
"""Downsamples a dataset in time according to the required cadence."""
# Crop remainder, removing data from the beginning.
remainder = len(dataset.training_dates) % cadence
training_dates = dataset.training_dates[remainder:]
# Select the last date from each group as the data to be used as label for
# the downsample data. For example, if data is downsampled by putting
# data from Tuesday to Monday together, Monday will be used as the date for
# the data point.
training_dates = training_dates[cadence - 1::cadence]
evaluation_dates = dataset.evaluation_dates[cadence - 1::cadence]
training_features = _downsample_features(
dataset.training_features[remainder:], training_dates,
dataset.feature_names, cadence)
training_targets = _downsample_features(
dataset.training_targets[remainder:], training_dates,
dataset.target_names, cadence)
evaluation_targets = _downsample_features(
dataset.evaluation_targets, evaluation_dates,
dataset.target_names, cadence)
sum_past_targets = dataset.sum_past_targets
if remainder:
sum_past_targets += dataset.training_targets[:remainder].sum(0)
return Dataset(training_targets=training_targets,
evaluation_targets=evaluation_targets,
training_features=training_features,
sum_past_targets=sum_past_targets,
feature_names=dataset.feature_names,
target_names=dataset.target_names,
training_dates=training_dates,
evaluation_dates=evaluation_dates,
sites=dataset.sites,
dataset_index_key=dataset.dataset_index_key,
cadence=cadence)
def _downsample_features(features: np.ndarray, downsampled_dates: np.ndarray,
feature_names: np.ndarray, cadence: int):
"""Downsamples an array of features according to the downsampled dates."""
if not feature_names.size:
return features[::cadence]
# Reshape the features into [downsampled_dates, cadence, ....]
reshaped_features = features.reshape(
[len(downsampled_dates), cadence, -1, len(feature_names)])
output_values = []
for feature_index, feature_name in enumerate(feature_names):
feature_values = reshaped_features[..., feature_index]
if feature_name in [target.value for target in constants.Targets]:
# Accumulate incremental target features.
summarized_values = feature_values.sum(axis=1)
else:
# Take the mean otherwise.
summarized_values = feature_values.mean(axis=1)
output_values.append(summarized_values)
# Rebuild the data array.
output_values = np.stack(output_values, axis=-1)
return output_values
def _maybe_pad_data(data: np.ndarray, required_num_days: int) -> np.ndarray:
"""Maybe pads the date axis of a data array up the required number of days."""
num_dates, num_sites, num_targets = data.shape
padding = np.full((required_num_days - num_dates, num_sites, num_targets),
np.nan)
return np.concatenate([data, padding], axis=0)
def _get_evaluation_dates(last_observation_date: str,
num_days_in_evaluation_period: int) -> np.ndarray:
"""Gets num_days_in_evaluation_period post last_observation_date."""
last_observation_datetime = datetime.datetime.strptime(
last_observation_date, constants.DATE_FORMAT)
eval_datetimes = [
last_observation_datetime + datetime.timedelta(days=1 + eval_days_ahead)
for eval_days_ahead in range(num_days_in_evaluation_period)
]
return np.array([
datetime.datetime.strftime(eval_datetime, constants.DATE_FORMAT)
for eval_datetime in eval_datetimes
])
| dm_c19_modelling-main | evaluation/dataset_factory.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Library to download and save data from the COVID-19 Open Data repository."""
import functools
import itertools
from typing import Dict, List, Tuple
from absl import logging
from dm_c19_modelling.evaluation import constants
import pandas as pd
_WORLD_DATASET_NAME = "covid_open_data_world"
_US_STATES_DATASET_NAME = "covid_open_data_us_states"
VALID_DATASETS = [_WORLD_DATASET_NAME, _US_STATES_DATASET_NAME]
_BASE_PATH = "https://storage.googleapis.com/covid19-open-data/v2/"
_TABLES_OF_INTEREST = ("index", "epidemiology", "demographics", "mobility")
_TARGETS = [constants.Targets.DECEASED_NEW, constants.Targets.CONFIRMED_NEW]
# Start date for which target data will be kept.
_FIRST_DATE = {
_WORLD_DATASET_NAME: "2020-01-05",
_US_STATES_DATASET_NAME: "2020-01-22"
}
# Up to this date any missing target data is assumed to be zero.
# This is a grace period to be able to align in time sites that start
# reporting later than the first date, for which our investigations indicate
# the lack of reporting is related to the lack of cases.
_END_GRACE_PERIOD_DATE = {
_WORLD_DATASET_NAME: None,
_US_STATES_DATASET_NAME: "2020-03-15"
}
_DATASET_FILTERS = {
_WORLD_DATASET_NAME: lambda df: df.query("aggregation_level == 0"),
_US_STATES_DATASET_NAME: (
lambda df: df.query("aggregation_level == 1 and country_code == 'US'")
)
}
_SITES_FORCED_DROPPED = {
_WORLD_DATASET_NAME: [],
# Drop US territories. This leaves 50 states + the District of Columbia.
_US_STATES_DATASET_NAME: ["AS", "PR", "VI", "GU", "MP"],
}
def _load_table_data(table_name: str) -> pd.DataFrame:
"""Loads a dataframe from the COVID-19 Open Data repository."""
# Avoid parsing NaNs from strings
df = pd.read_csv(
_BASE_PATH + table_name + ".csv", keep_default_na=False, na_values=[""])
return df
def _get_column_rename_map(dataset_name: str) -> Dict[str, str]:
rename_map = {"date": constants.DATE}
if dataset_name == _WORLD_DATASET_NAME:
rename_map["country_code"] = constants.SITE_ID
elif dataset_name == _US_STATES_DATASET_NAME:
rename_map["subregion1_code"] = constants.SITE_ID
else:
raise ValueError(f"Unknown dataset name {dataset_name}")
return rename_map
def _join_two(df1: pd.DataFrame, df2: pd.DataFrame) -> pd.DataFrame:
"""Merges dataframes on the "key" field, and the "date" field if present."""
join_fields = ["key"]
if "date" in df1 and "date" in df2:
join_fields.append("date")
return pd.merge(df1, df2, on=join_fields, how="outer")
def _add_missing_grace_dates_rows(
input_df, first_required_date, end_grace_period_date):
"""Adds potentially missing rows for the grace period."""
all_keys = input_df["key"].unique()
grace_dates = list(
pd.date_range(first_required_date, end_grace_period_date).strftime(
constants.DATE_FORMAT))
primary_key_columns = ["key", constants.DATE]
grace_dates_df = pd.DataFrame(
itertools.product(all_keys, grace_dates), columns=primary_key_columns)
merged = input_df.merge(grace_dates_df, how="outer", on=primary_key_columns)
return merged.sort_values(constants.DATE)
def _drop_sites_with_insufficient_data(df: pd.DataFrame,
dataset_name: str,
target_name: str) -> pd.DataFrame:
"""Drops sites from the dataset due to missing or insufficient data.
Sites are dropped if they meet any of the following:
1. The target_name feature is always missing for that site.
2. The target_name feature isn't defined from _FIRST_DATE (
or _END_GRACE_PERIOD_DATE if applicable) for that site.
3. The target_name feature is missing at any point in the range where it is
defined.
Args:
df: dataframe, the merged data table.
dataset_name: the name of the dataset.
target_name: str, the name of a feature which may be used as a prediction
target.
Returns:
df: dataframe, with sites with insufficient data dropped, and dates
truncated to the valid range.
"""
first_date = _FIRST_DATE[dataset_name]
sites_forced_dropped = _SITES_FORCED_DROPPED[dataset_name]
sites_to_drop_and_reason = {}
max_dates_with_target = []
for site_id, site_df in df.groupby(constants.SITE_ID):
if site_id in sites_forced_dropped:
sites_to_drop_and_reason[site_id] = "forced removed"
continue
if site_df[target_name].isna().all():
sites_to_drop_and_reason[site_id] = "all nans"
continue
min_date_with_target = min(
site_df[~site_df[target_name].isna()][constants.DATE])
max_date_with_target = max(
site_df[~site_df[target_name].isna()][constants.DATE])
if min_date_with_target > first_date:
sites_to_drop_and_reason[site_id] = "missing date rows before first date"
continue
site_df_valid = site_df.query(
f"date >= '{first_date}' and date <= '{max_date_with_target}'"
)
if site_df_valid[target_name].isna().any():
sites_to_drop_and_reason[site_id] = "nan target values"
continue
# Verify that there is exactly one row for each day in the available range
if not (pd.to_datetime(
site_df_valid[constants.DATE]).diff().iloc[1:] == pd.Timedelta(
1, "D")).all():
sites_to_drop_and_reason[site_id] = "non-daily cadence"
continue
max_dates_with_target.append(max_date_with_target)
if sites_to_drop_and_reason:
logging.info("Removing the following sites due to insufficient data for "
"target %s: %s", target_name, sites_to_drop_and_reason)
sites_to_drop = list(sites_to_drop_and_reason.keys())
df = df.query(f"{constants.SITE_ID} not in {sites_to_drop}")
if not max_dates_with_target:
raise ValueError("All sites have been dropped.")
max_available_date = min(max_dates_with_target)
df = df.query(
f"date >= '{first_date}' and date <= '{max_available_date}'")
if df[target_name].isna().any():
raise ValueError(f"NaNs found for {target_name}.")
return df
def _maybe_set_zero_epidemiology_targets_in_grace_period(df, dataset_name):
"""Sets epidemiology targets to 0 for grace period."""
first_date = _FIRST_DATE[dataset_name]
end_grace_period_date = _END_GRACE_PERIOD_DATE[dataset_name]
if (end_grace_period_date is not None and
first_date <= end_grace_period_date):
# Add missing row combinations of dates and sites for the grace period.
df = _add_missing_grace_dates_rows(
df, first_date, end_grace_period_date)
# Replace any nan targets by zeros in between the first target date, and
# the end of the grace period.
for target in _TARGETS:
mask = (df[target.value].isna() &
(df[constants.DATE] >= first_date) &
(df[constants.DATE] <= end_grace_period_date))
df.loc[mask, target.value] = 0
return df
def fetch_data(dataset_name: str) -> Tuple[pd.DataFrame, List[str]]:
"""Download and process data from the COVID-19 Open Data repository.
Args:
dataset_name: The name of the dataset to download and process. Valid options
are 'covid_open_data_world' and 'covid_open_data_us_states'
Returns:
A tuple of (dataframe, dataset_name, dataset_sources.) The dataframe has
target, feature, site and date columns. No target entry may be missing for
any target or feature. The dataset_sources indicate where the data was
downloaded from.
"""
# Filter the table according to the dataset requirements.
if dataset_name not in VALID_DATASETS:
raise ValueError(f"Unrecognised dataset name {dataset_name} specified. "
f"Valid dataset names are {VALID_DATASETS}")
tables = {name: _load_table_data(name) for name in _TABLES_OF_INTEREST}
# Get the keys that need to be filtered, and filter all tables to those.
keys_to_keep = _DATASET_FILTERS[dataset_name](tables["index"])["key"]
for name in tables.keys():
table = tables[name]
tables[name] = table[table["key"].isin(keys_to_keep)]
# Handle initial grace period for missing epidemiology targets.
tables["epidemiology"] = _maybe_set_zero_epidemiology_targets_in_grace_period(
tables["epidemiology"], dataset_name)
df = functools.reduce(_join_two, tables.values())
df.rename(columns=_get_column_rename_map(dataset_name), inplace=True)
df = _DATASET_FILTERS[dataset_name](df)
# Drop rows without population or date data
df = df[~df.population.isna()]
df = df[~df[constants.DATE].isna()]
# Drop sites with insufficient data for the possible prediction targets
for target in _TARGETS:
df = _drop_sites_with_insufficient_data(df, dataset_name, target.value)
source_data = [_BASE_PATH + table for table in _TABLES_OF_INTEREST]
return df, source_data
| dm_c19_modelling-main | evaluation/download_data.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tools for indexing datasets."""
import datetime
import os
from typing import Any, Dict, Optional, Sequence
from absl import logging
from dm_c19_modelling.evaluation import base_indexing
from dm_c19_modelling.evaluation import constants
import pandas as pd
# Internal imports.
class DatasetIndex(base_indexing.BaseIndex):
"""Manages loading, querying, and adding entries to an index of datasets."""
@property
def _index_type(self):
return "dataset"
@property
def _additional_fields(self):
return ("creation_date",)
def query_by_creation_date(
self, creation_date: str) -> str:
"""Gets the key in the index corresponding to the given creation_date.
Args:
creation_date: The required creation date. May be 'latest' which
defaults to the most recent creation date available.
Returns:
The most recent entry in the index with the required creation date, if
available. None if no match is found.
"""
if creation_date == "latest":
# Get the maximum creation date in the dataset
creation_date = max(
[entry["creation_date"] for entry in self._index_dict.values()])
# Get the entry with the required creation date. If there are duplicates
# then take the entry that was most recently created.
matches = {
key: entry["creation_timestamp"]
for key, entry in self._index_dict.items()
if entry["creation_date"] == creation_date
}
if matches:
key, _ = max(matches.items(), key=lambda elt: elt[1])
return key
else:
raise ValueError(
f"Unable to find a dataset with creation date: {creation_date}."
)
def load_file_by_key(self,
key: str,
validate: bool = True) -> pd.DataFrame:
"""Loads the file contained in the index entry with the given key."""
entry = self.get_entry(key)
file_location = entry["file_location"]
if validate:
base_indexing.validate_path(file_location)
logging.info("Loading dataset from %s", file_location)
return pd.read_csv(open(file_location, "r"), keep_default_na=False,
na_values=[""], dtype={constants.SITE_ID: str})
def _validate_file_in_entry(self,
entry: base_indexing.IndexEntryType) -> None:
# Avoid parsing NaNs from strings
file_location = entry["file_location"]
df = pd.read_csv(open(file_location, "r"), keep_default_na=False,
na_values=[""], dtype={constants.SITE_ID: str})
target_names = [target.value for target in constants.Targets]
target_columns = [col for col in df.columns if col in target_names]
if not target_columns:
raise ValueError(
f"No column found for any of the targets: {target_names}")
required_columns = (
constants.DATE,
constants.SITE_ID,
) + tuple(target_columns)
# Validate that all required fields are present and fully defined.
for required_column in required_columns:
if required_column not in df.columns:
raise ValueError(
f"{required_column} missing from dataset at {file_location}")
if df[required_column].isna().any():
raise ValueError(
f"NaNs found in {required_column} for dataset at {file_location}"
)
for site_id, site_df in df.groupby(constants.SITE_ID):
# Check that the diff in dates for all but the first element is always
# 1 day (pandas computes a backwards diff and returns NaN for the first
# element.
if not (pd.to_datetime(
site_df[constants.DATE]).diff().iloc[1:] == pd.Timedelta(
1, "D")).all():
raise ValueError(f"Non-daily cadence found in data for {site_id}")
def build_entry(
file_location: str, dataset_name: str, creation_date: str,
creation_timestamp: str, source_data_info: Sequence[str],
extra_info: Dict[str, Any]) -> base_indexing.IndexEntryType:
"""Builds an entry into a dataset index.
Args:
file_location: the path to the dataset (may be a URL).
dataset_name: the name of the dataset.
creation_date: the date upon which the data was reported.
creation_timestamp: the datetime at which the dataset was created.
source_data_info: a list of the sources used to create this dataset.
extra_info: any extra information that is useful to store alongside the
rest of the dataset metadata.
Returns:
An entry for this dataset that can be added to the dataset index.
"""
return {
"file_location": file_location,
"dataset_name": dataset_name,
"creation_date": creation_date,
"creation_timestamp": creation_timestamp,
"source_data_info": source_data_info,
"extra_info": extra_info
}
def save_dataset(df: pd.DataFrame,
directory: str,
dataset_name: str,
source_data_info: Sequence[str],
creation_date: Optional[str] = None) -> None:
"""Saves the dataset and updates the dataset indexer with its metadata."""
# Create a unique key into the index based on the current time.
index_key = base_indexing.get_unique_key()
datasets_directory = os.path.join(directory, "datasets")
current_datetime = datetime.datetime.utcnow()
if not creation_date:
creation_date = current_datetime.strftime(constants.DATE_FORMAT)
output_filepath = os.path.join(
datasets_directory, f"{dataset_name}_{creation_date}_{index_key}.csv")
if not os.path.exists(datasets_directory):
os.makedirs(datasets_directory)
assert not os.path.exists(output_filepath), (
f"A dataset already exists at {output_filepath}.")
df.to_csv(open(output_filepath, "w"), index=False)
logging.info("Saved dataset to %s", output_filepath)
extra_dataset_info = {
"first_data_date": df[constants.DATE].min(),
"late_data_date": df[constants.DATE].max(),
"number_of_sites": len(df[constants.SITE_ID].unique())
}
entry = build_entry(
file_location=output_filepath,
dataset_name=dataset_name,
creation_date=creation_date,
creation_timestamp=current_datetime.strftime(constants.DATETIME_FORMAT),
source_data_info=source_data_info,
extra_info=extra_dataset_info
)
base_indexing.open_index_and_add_entry(
directory, dataset_name, index_class=DatasetIndex, key=index_key,
entry=entry)
| dm_c19_modelling-main | evaluation/dataset_indexing.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for dataset and evaluation modules."""
import os # pylint: disable=unused-import
import time
# Internal imports.
class FileLock(object):
"""Creates a file lock."""
def __init__(self,
lock_path: str,
timeout: int,
retry_interval: int = 1) -> None:
self._lock_path = lock_path
self._timeout = timeout
self._retry_interval = retry_interval
def acquire(self) -> None:
time_elapsed = 0
while os.path.exists(self._lock_path):
if time_elapsed > self._timeout:
raise IOError(f"Unable to acquire lock {self._lock_path}.")
time_elapsed += self._retry_interval
time.sleep(self._retry_interval)
open(self._lock_path, "w").close()
def release(self) -> None:
os.remove(self._lock_path)
| dm_c19_modelling-main | evaluation/utils.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for plotting metrics."""
import pathlib
from typing import Any, List, Sequence
from absl import logging
from dm_c19_modelling.evaluation import base_indexing
from dm_c19_modelling.evaluation import constants
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import pandas as pd
def plot_metrics(metrics_df: pd.DataFrame, target_name: str,
last_observation_date: str, eval_dataset_creation_date: str,
forecast_horizon: int,
forecast_index_entries: Sequence[base_indexing.IndexEntryType],
num_dates: int, num_sites: int, cadence: int,
dropped_sites: np.ndarray) -> plt.Figure:
"""Plots metrics dataframe as a series of bar charts.
Args:
metrics_df: Dataframe of metrics, with columns [forecast_id, metric_name,
metric_value, target_name].
target_name: the target being predicted.
last_observation_date: the last date in the training data.
eval_dataset_creation_date: the creation date of the dataset used for
evaluation.
forecast_horizon: the number of days into the future that the forecasts
extend to.
forecast_index_entries: the entries in the forecast index for each of the
forecasts that are included in the metrics dataframe.
num_dates: the number of dates included in this evaluation.
num_sites: the number of sites included in this evaluation.
cadence: the cadence of the forecasts i.e. a cadence of 1 corresponds to
daily forecasts, a cadence of 7 corresponds to weekly forecasts.
dropped_sites: optional list of sites that were dropped during evaluation
from at least one forecast to ensure that all forecasts are for the same
sites.
Returns:
A series of bar plots, one for each metric calculated in the dataframe,
evaluating different forecasts against each other.
"""
fig = plt.figure(figsize=(4, 3))
plot_width = 2
offset = 0
column_width = 0.8
axes = []
metric_names = metrics_df.metric_name.unique()
for _ in metric_names:
ax = fig.add_axes([offset, 0.1, plot_width, 1.])
ax.grid(axis="y", alpha=0.3, which="both", zorder=0)
axes.append(ax)
offset += plot_width * 1.2
colour_map = plt.get_cmap("tab20c")(
np.linspace(0, 1.0, len(forecast_index_entries)))
x_centers = np.arange(len(forecast_index_entries))
for ax_idx, metric_name in enumerate(metric_names):
x_offset = ax_idx * column_width - plot_width / 2 + column_width / 2
x_values = x_centers + x_offset
ax = axes[ax_idx]
for bar_idx, forecast_entry in enumerate(forecast_index_entries):
forecast_id = forecast_entry["forecast_id"]
row = metrics_df.query(
f"forecast_id=='{forecast_id}' and metric_name=='{metric_name}'")
assert len(row) == 1, (
"Duplicate entries found in metrics dataframe. "
f"Found {len(row)} entries for {forecast_id} and {metric_name}")
row = row.iloc[0]
metric_value = row.metric_value
ax.bar(
x_values[bar_idx],
metric_value,
width=column_width,
zorder=2,
color=colour_map[bar_idx],
label=_get_model_label(forecast_entry))
ax.set_xticklabels([])
ax.set_xticks([])
ax.set_ylabel(metric_name)
axes[0].legend(
ncol=len(forecast_index_entries),
loc="center left",
bbox_to_anchor=[0., 1.07],
frameon=False)
fig.text(0, 0, _get_plot_footnote(num_sites, num_dates, dropped_sites,
cadence))
fig.suptitle(
_get_plot_title(target_name, last_observation_date,
eval_dataset_creation_date, forecast_horizon),
y=1.35,
x=1)
return fig
def _get_model_label(forecast_entry: base_indexing.IndexEntryType) -> str:
"""Gets a description of a model from its entry in the forecast index."""
description = str(forecast_entry["forecast_id"])
if "model_description" in forecast_entry["extra_info"]:
description += f": {forecast_entry['extra_info']['model_description']}"
return description
def _get_plot_title(target_name: str, last_observation_date: str,
eval_dataset_creation_date: str,
forecast_horizon: int) -> str:
"""Gets the title of the plot."""
return (
f"Comparison of metrics for predicting {target_name}. Forecast date: "
f"{last_observation_date}, forecast horizon: {forecast_horizon} days, "
f"evaluation reporting date: {eval_dataset_creation_date}.")
def _get_plot_footnote(num_sites: int, num_dates: int,
dropped_sites: np.ndarray, cadence: int):
"""Gets the footnote to be added to the plot."""
footnote = (
f"Forecasts evaluated in this plot have a cadence of {cadence} days. "
f"{num_dates} dates and {num_sites} sites were included in the "
"evaluation that produced this plot.")
if dropped_sites.size:
footnote += (
"Note that the following sites were dropped from some forecasts during "
f"evaluation to achieve an overlapping set of sites: {dropped_sites}")
return footnote
def _plot_trajectories(
all_forecast_entries: List[Any],
all_forecast_arrays: List[Any],
target_name: constants.Targets,
num_sites: int,
eval_dataset: Any = None
) -> plt.Figure:
"""Plots trajectories.
Args:
all_forecast_entries: TODO
all_forecast_arrays: TODO
target_name: the target being predicted.
num_sites: number of sites to plot
eval_dataset: evaluation dataset
Returns:
Figure.
"""
fig = plt.figure(figsize=(16, 16))
dates = all_forecast_arrays[0].dates_array
num_dates = len(dates)
forecast_x = np.arange(num_dates)
x = forecast_x.copy()
x_stride = 14 # Weekly x tick strides.
previous_x = None
avg_values = []
for fa in all_forecast_arrays:
avg_values.append(np.squeeze(fa.data_array, axis=2).mean(axis=0))
site_indices = np.argsort(np.max(avg_values, axis=0))[::-1][:num_sites]
site_names = all_forecast_arrays[0].sites_array[site_indices]
n = len(site_names)
nrows = int(np.ceil(np.sqrt(n)))
ncols = int(np.ceil(n / nrows))
axes = fig.subplots(nrows, ncols)
fig.subplots_adjust(hspace=0.35)
flat_axes = sum(map(list, axes), [])
for _ in range(nrows * ncols - n):
ax = flat_axes.pop()
fig.delaxes(ax)
num_colors = len(all_forecast_entries) + 1
colormap = plt.get_cmap("tab20")
colors = [colormap(i / num_colors) for i in range(num_colors)]
if eval_dataset is not None:
num_previous_dates = num_dates
previous_dates = eval_dataset.training_dates[-num_previous_dates:]
previous_x = np.arange(num_previous_dates)
previous_true_ys = eval_dataset.training_targets[-num_previous_dates:, :, 0]
forecast_true_ys = eval_dataset.evaluation_targets[-num_previous_dates:, :,
0]
forecast_x += num_previous_dates
dates = np.concatenate([previous_dates, dates])
x = np.concatenate([previous_x, forecast_x])
num_dates = len(dates)
x_idx = np.arange(num_dates)[::-1][::x_stride][::-1]
# Center the x axis date ticks around the forecast date.
diffs = x_idx - forecast_x[0]
smallest_diff = np.argmin(np.abs(diffs))
x_idx -= diffs[smallest_diff]
x_idx = np.clip(x_idx, 0, len(x) - 1)
for ax, site_name in zip(flat_axes, site_names):
title = f'site_name="{site_name}"'
ax.set_title(title)
site_idx = all_forecast_arrays[0].sites_array.tolist().index(site_name)
if previous_x is not None:
previous_true_y = previous_true_ys[:, site_idx]
forecast_true_y = forecast_true_ys[:, site_idx]
# Plot vertical forecast date line.
combined_y = np.concatenate([previous_true_y, forecast_true_y])
mn = np.min(combined_y)
mx = np.max(combined_y)
ax.plot(
[forecast_x[0] - 0.5] * 2, [mn, mx],
color=(0.5, 0.5, 0.5),
linestyle="--",
label=f"(forecast date={dates[forecast_x[0]]})")
# Plot past and future true data.
ax.plot(previous_x, previous_true_y, color="k")
ax.plot(forecast_x, forecast_true_y, color="k", label="true_data")
# Plot the forecast trajectories.
ax.axes.set_prop_cycle(color=colors) # Color forecast curves differently.
for forecast_entry, forecast_array in zip(all_forecast_entries,
all_forecast_arrays):
y = forecast_array.data_array[:, site_idx, 0]
ax.plot(
forecast_x, y, label=f"forecast_id={forecast_entry['forecast_id']}")
ax.set_xticks(x[x_idx])
ax.set_xticklabels(dates[x_idx], rotation=30)
if ax.is_last_row():
ax.set_xlabel("Date")
if ax.is_first_col():
ax.set_ylabel(target_name.value)
if ax.is_first_col() and ax.is_first_row():
ax.legend(loc="upper left")
return fig
def plot_trajectories_and_save(directory: str, forecast_ids: Sequence[str],
eval_dataset_creation_date: str,
forecast_horizon: int, save: bool,
target_name: constants.Targets,
all_forecast_entries: List[Any],
all_forecast_arrays: List[Any],
num_sites: int = 16,
eval_dataset: Any = None,
overwrite: bool = False) -> None:
"""Plots trajectories and saves them to file."""
fig = _plot_trajectories(all_forecast_entries, all_forecast_arrays,
target_name, num_sites, eval_dataset=eval_dataset)
if save:
trajectories_dir = pathlib.Path(directory) / "trajectories"
filename_base = (
f"trajectories_{'_'.join(forecast_ids)}_{eval_dataset_creation_date}_"
f"{forecast_horizon}d")
plot_filepath = trajectories_dir / f"{filename_base}.png"
if not trajectories_dir.exists():
trajectories_dir.mkdir(parents=True)
if not overwrite and plot_filepath.exists():
raise IOError(f"Trajectories already exist at {plot_filepath}")
logging.info("Saving trajectory plots to %s", plot_filepath)
fig.savefig(plot_filepath, format="png", bbox_inches="tight")
| dm_c19_modelling-main | evaluation/plot_utils.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for dm_c19_modelling.evaluation.forecast_utils."""
import datetime
from absl.testing import absltest
from absl.testing import parameterized
from dm_c19_modelling.evaluation import constants
from dm_c19_modelling.evaluation import forecast_utils
import numpy as np
class ForecastUtilsTest(parameterized.TestCase):
@parameterized.named_parameters([
("saturday_start_friday_end", "2020-10-17", 13, ["2020-10-24"],
[slice(1, 8)]),
("saturday_start_saturday_end", "2020-10-17", 14,
["2020-10-24", "2020-10-31"], [slice(1, 8), slice(8, 15)]),
("sunday_start_saturday_end", "2020-10-18", 13,
["2020-10-24", "2020-10-31"], [slice(0, 7), slice(7, 14)]),
("sunday_start_sunday_end", "2020-10-18", 14,
["2020-10-24", "2020-10-31"], [slice(0, 7), slice(7, 14)])
])
def test_epiweekly_pooling_dates(self, first_eval_date, num_eval_dates,
expected_eval_dates, expected_summed_slices):
"""Checks that evaluation dates and predictions are pooled correctly."""
eval_datetimes = [
datetime.datetime.strptime(first_eval_date, constants.DATE_FORMAT) +
datetime.timedelta(days=i) for i in range(num_eval_dates + 1)
]
eval_dates = [
date.strftime(constants.DATE_FORMAT) for date in eval_datetimes
]
predictions = np.random.randint(0, 10, (len(eval_dates), 3, 2))
epiweekly_preds, epiweekly_eval_dates = (
forecast_utils.pool_daily_forecasts_to_weekly(
predictions, eval_dates, "Saturday"))
# Ensure that the pooled evaluation dates are the Saturdays in range for
# which there is a full week of predictions available.
np.testing.assert_array_equal(np.array(epiweekly_eval_dates),
expected_eval_dates)
expected_preds = [
np.sum(predictions[expected_slice], axis=0)
for expected_slice in expected_summed_slices
]
# Ensure that the pooled predictions are summed over epiweeks.
np.testing.assert_array_equal(epiweekly_preds, np.array(expected_preds))
def test_epiweekly_pooling_insufficient_data(self):
"""Checks that pooling fails when there's insufficient data."""
eval_dates = ["2020-10-17", "2020-10-18"]
predictions = np.random.randint(0, 10, (2, 2, 2))
with self.assertRaises(ValueError):
forecast_utils.pool_daily_forecasts_to_weekly(predictions, eval_dates,
"Saturday")
if __name__ == "__main__":
absltest.main()
| dm_c19_modelling-main | evaluation/forecast_utils_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for manipulating forecasts."""
import datetime
import time
from typing import List, Tuple
from dm_c19_modelling.evaluation import constants
import numpy as np
def _get_weekly_slices(datetimes: np.ndarray, week_end_day: str) -> List[slice]:
"""Gets slices from consecutive datetimes corresponding to specified weeks.
Args:
datetimes: 1D array of datetimes from which we want to extract weeks.
week_end_day: the required day of the week that the extracted weeks should
end on. Specified as the full weekday name e.g. 'Saturday'.
Returns:
the slices in the datetimes array that correspond to the specified weeks.
"""
week_start_day = time.strptime(week_end_day, "%A").tm_wday + 1
week_starts = np.where(
[date.weekday() == week_start_day for date in datetimes])[0]
return [
slice(start, start + 7)
for start in week_starts
if (start + 7) <= len(datetimes)
]
def pool_daily_forecasts_to_weekly(
predictions: np.ndarray, evaluation_dates: np.ndarray,
week_end_day: str) -> Tuple[np.ndarray, np.ndarray]:
"""Sums daily forecasts up to complete epiweekly forecasts.
Args:
predictions: shape(num_dates, num_sites, num_targets).
evaluation_dates: strings, shape (num_dates).
week_end_day: the desired day of the week to pool up to. E.g. a value of
Sunday would pool predictions over normal weeks; a value of Saturday would
pool predictions over epidemiological weeks.
Returns:
predictions summed across weeks ending on week_end_day, and the end dates
corresponding to the end of those weeks. Where there are insufficient
predictions to construct a full week, these are discarded.
"""
evaluation_datetimes = np.array([
datetime.datetime.strptime(evaluation_date, constants.DATE_FORMAT)
for evaluation_date in evaluation_dates
])
if not np.all(np.diff(evaluation_datetimes) == datetime.timedelta(days=1)):
raise ValueError("Pooling forecasts to epiweekly is only available for "
"daily predictions.")
week_slices = _get_weekly_slices(evaluation_datetimes, week_end_day)
if not week_slices:
raise ValueError("Insufficient predictions to pool to weekly cadence.")
incremental_weekly_preds = [
np.sum(predictions[week_slice], axis=0) for week_slice in week_slices
]
# Get the last date in the pooled week for each week in the evaluation dates.
weekly_evaluation_datetimes = [
evaluation_datetimes[week_slice][-1] for week_slice in week_slices
]
condition = np.all([
date.strftime("%A") == week_end_day
for date in weekly_evaluation_datetimes
])
assert condition, "Incorrect day found in evaluation datetimes"
weekly_evaluation_dates = [
date.strftime(constants.DATE_FORMAT)
for date in weekly_evaluation_datetimes
]
return np.array(incremental_weekly_preds), np.array(weekly_evaluation_dates)
| dm_c19_modelling-main | evaluation/forecast_utils.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for dm_c19_modelling.evaluation.evaluation."""
import datetime
import itertools
from absl.testing import absltest
from dm_c19_modelling.evaluation import constants
from dm_c19_modelling.evaluation import dataset_factory
from dm_c19_modelling.evaluation import evaluation
from dm_c19_modelling.evaluation import forecast_indexing
import numpy as np
import pandas as pd
_TEST_DATASET = "test_dataset"
def _get_forecasts(dates, sites):
return pd.DataFrame({
constants.DATE: np.repeat(dates, len(sites)),
constants.SITE_ID: np.tile(sites, len(dates)),
constants.PREDICTION: np.random.rand(len(dates) * len(sites)) * 10,
})
def _get_entry(last_observation_date="2020-05-07",
dataset_index_key="dset_index_1", cadence=1):
return forecast_indexing.build_entry(
forecast_id="",
file_location="",
dataset_name=_TEST_DATASET,
creation_timestamp="",
dataset_index_key=dataset_index_key,
dataset_location="",
last_observation_date=last_observation_date,
cadence=cadence,
extra_info={})
def _get_dataset(eval_dates, sites, target="new_confirmed", cadence=1):
training_datetime = (
datetime.datetime.strptime(eval_dates[0], "%Y-%m-%d") -
datetime.timedelta(days=1))
return dataset_factory.Dataset(
training_targets=np.random.randint(0, 10, (1, len(sites), 1)),
training_features=[],
evaluation_targets=np.random.randint(0, 10,
(len(eval_dates), len(sites), 1)),
sum_past_targets=np.random.randint(0, 10, (len(sites), 1)),
target_names=[target],
feature_names=[],
training_dates=[
datetime.datetime.strftime(training_datetime, "%Y-%m-%d")
],
evaluation_dates=eval_dates,
sites=np.array(sites),
dataset_index_key="test_dset_index_1",
cadence=cadence)
class EvaluationTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._default_dates = ["2020-05-07", "2020-05-08", "2020-05-09"]
self._default_sites = ["site_1", "site_2"]
def test_comparable_forecasts_subset_sites(self):
"""Check that validation fails for forecasts with different sites."""
forecasts_1 = _get_forecasts(self._default_dates, self._default_sites)
forecasts_2 = _get_forecasts(self._default_dates, ["site_1", "site_3"])
all_forecast_arrays = evaluation._convert_forecasts_to_arrays(
[forecasts_1, forecasts_2])
_, sites_to_eval, sites_to_drop, updated_data_arrays = (
evaluation._get_forecast_spec_and_comparable_predictions(
all_forecast_arrays))
np.testing.assert_array_equal(sites_to_eval, ["site_1"])
np.testing.assert_array_equal(sites_to_drop, ["site_2", "site_3"])
np.testing.assert_array_equal(
updated_data_arrays, np.array(
[arr.data_array for arr in all_forecast_arrays])[:, :, 0:1, :])
np.testing.assert_array_equal(updated_data_arrays[0].shape,
updated_data_arrays[1].shape)
def test_incomparable_forecasts_subset_dates(self):
"""Check that validation fails for forecasts with different start dates."""
forecasts_1 = _get_forecasts(self._default_dates, self._default_sites)
forecasts_2 = _get_forecasts(["2020-05-06", "2020-05-07"],
self._default_sites)
all_forecast_arrays = evaluation._convert_forecasts_to_arrays(
[forecasts_1, forecasts_2])
dates_to_eval, _, _, updated_data_arrays = (
evaluation._get_forecast_spec_and_comparable_predictions(
all_forecast_arrays))
np.testing.assert_array_equal(dates_to_eval, ["2020-05-07"])
np.testing.assert_array_equal(
updated_data_arrays[0], all_forecast_arrays[0].data_array[0:1])
np.testing.assert_array_equal(
updated_data_arrays[1], all_forecast_arrays[1].data_array[1:2])
np.testing.assert_array_equal(updated_data_arrays[0].shape,
updated_data_arrays[1].shape)
def test_valid_different_forecast_horizons(self):
"""Check that validation passes for forecasts with different horizons."""
forecasts_1 = _get_forecasts(self._default_dates, self._default_sites)
forecasts_2 = _get_forecasts(["2020-05-07", "2020-05-08"],
self._default_sites)
all_forecast_arrays = evaluation._convert_forecasts_to_arrays(
[forecasts_1, forecasts_2])
evaluation._get_forecast_spec_and_comparable_predictions(
all_forecast_arrays)
def test_badly_formatted_forecasts(self):
"""Checks that forecasts with unexpected format fail evaluation."""
forecasts = _get_forecasts(self._default_dates, self._default_sites)
forecasts["extra_column"] = ""
all_forecast_arrays = evaluation._convert_forecasts_to_arrays([forecasts])
with self.assertRaisesRegex(AssertionError,
"Unexpected columns in forecasts:*"):
evaluation._get_forecast_spec_and_comparable_predictions(
all_forecast_arrays)
def test_incomparable_last_observation_dates(self):
"""Checks that validation fails for different last_observation_dates."""
entry_1 = _get_entry(last_observation_date="2020-05-06")
entry_2 = _get_entry(last_observation_date="2020-05-07")
with self.assertRaisesRegex(
ValueError,
"Models can only be compared if they have the same "
"last_observation_date. *"):
evaluation._get_last_observation_date_and_validate_comparable(
[entry_1, entry_2])
def test_incomparable_forecast_cadences(self):
"""Checks that validation fails for forecasts with different cadences."""
entry_1 = _get_entry(cadence=1)
entry_2 = _get_entry(cadence=7)
with self.assertRaisesRegex(
ValueError,
"Models can only be compared if they have the same forecast cadence *"):
evaluation._get_last_observation_date_and_validate_comparable(
[entry_1, entry_2])
def test_incomparable_forecast_sources(self):
"""Checks validation fails for forecasts trained on different datasets."""
entry_1 = _get_entry(dataset_index_key="dset_index_1")
entry_2 = _get_entry(dataset_index_key="dset_index_2")
with self.assertRaisesRegex(
ValueError,
"Models can only be compared if they were trained using the same "
"dataset.*"):
evaluation._get_last_observation_date_and_validate_comparable(
[entry_1, entry_2])
def test_incomparable_eval_dset_missing_sites(self):
"""Checks that validation fails when the dataset is missing sites."""
dataset = _get_dataset(self._default_dates, ["site_1", "site_3"])
with self.assertRaisesRegex(
ValueError, "Not all of the sites in the forecasts are present in the "
"evaluation dataset*"):
evaluation._validate_eval_dataset_comparable(dataset, self._default_dates,
self._default_sites)
def test_incomparable_eval_dset_missing_dates(self):
"""Checks that validation fails when the dataset is missing dates."""
dataset = _get_dataset(["2020-05-08", "2002-05-09"], self._default_sites)
with self.assertRaisesWithLiteralMatch(
AssertionError, "Dates in forecasts differ from dates in evaluation "
"dataset"):
evaluation._validate_eval_dataset_comparable(dataset, self._default_dates,
self._default_sites)
def test_calculate_metrics(self):
"""Checks that metric calculations yield the correct values."""
predictions = np.array([[[1.], [2.], [3.]], [[1.], [2.], [2.]]])
ground_truth = np.array([[[1.], [1.], [1.]], [[1.], [1.], [2.]]])
metrics_df = evaluation._calculate_metrics("", predictions, ground_truth,
"new_confirmed")
self.assertAlmostEqual(
1.,
metrics_df.query("metric_name=='rmse'").metric_value.values[0])
def test_convert_forecasts_to_arrays(self):
"""Checks all data is preserved when converting DataFrames to arrays."""
forecasts_1 = _get_forecasts(self._default_dates, self._default_sites)
forecasts_2 = _get_forecasts(self._default_dates, self._default_sites[::-1])
forecast_dfs = [forecasts_1, forecasts_2]
forecast_arrays = evaluation._convert_forecasts_to_arrays(forecast_dfs)
for array, df in zip(forecast_arrays, forecast_dfs):
np.testing.assert_array_equal(array.dates_array, self._default_dates)
np.testing.assert_array_equal(array.sites_array, self._default_sites)
np.testing.assert_array_equal(array.features_array,
[constants.PREDICTION])
for date, site in itertools.product(self._default_dates,
self._default_sites):
date_idx = list(array.dates_array).index(date)
site_idx = list(array.sites_array).index(site)
array_entry = array.data_array[date_idx][site_idx][0]
df_entry = df.query(
f"site_id=='{site}' and date=='{date}'").prediction.values[0]
self.assertAlmostEqual(df_entry, array_entry)
if __name__ == "__main__":
absltest.main()
| dm_c19_modelling-main | evaluation/evaluation_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for dm_c19_modelling.evaluation.dataset_indexing."""
import os
from absl.testing import absltest
from dm_c19_modelling.evaluation import dataset_indexing
import pandas as pd
_TEST_DATASET = "test_dataset"
_TEST_DATASET_FILE = "test_dataset.csv"
def _get_test_entry(directory):
return {
"file_location": os.path.join(directory, _TEST_DATASET_FILE),
"source_data_info": ["test_source_1", "test_source_2"],
"creation_timestamp": "2020-06-07_12:43:02",
"dataset_name": _TEST_DATASET,
"creation_date": "2020-06-07",
"extra_info": {}
}
def _create_dataset(file_location):
df = pd.DataFrame({"site_id": ["A"], "date": ["2020-05-07"],
"new_deceased": [0], "new_confirmed": [0]})
df.to_csv(file_location, index=False)
class DatasetIndexingTest(absltest.TestCase):
def setUp(self):
super().setUp()
self._test_dir = absltest.get_default_test_tmpdir()
os.makedirs(self._test_dir, exist_ok=True)
self._key = "12345"
self._entry = _get_test_entry(self._test_dir)
self._index_path = os.path.join(
self._test_dir, f"dataset_index-{_TEST_DATASET}.json")
_create_dataset(self._entry["file_location"])
self._remove_index_if_exists()
def _remove_index_if_exists(self):
if os.path.exists(self._index_path):
os.remove(self._index_path)
def test_write_operation_not_in_read_only(self):
"""Test that opening the index in read-only mode prevents writing."""
index = dataset_indexing.DatasetIndex(self._test_dir, _TEST_DATASET)
with self.assertRaisesWithLiteralMatch(
IOError,
"Attempting to write to the index when it is in read-only mode."):
index.add_entry(self._key, {})
def test_write_operation_not_in_context(self):
"""Tests that the index can't be used in write mode outside of a context."""
index = dataset_indexing.DatasetIndex(self._test_dir, _TEST_DATASET,
read_only=False)
with self.assertRaisesWithLiteralMatch(
IOError, ("Index has not been loaded. The index should be used as a "
"context when not in read-only mode")):
index.add_entry(self._key, {})
def test_create_new_index_and_add_entry(self):
"""Tests that an index can get created and an entry added."""
with dataset_indexing.DatasetIndex(self._test_dir, _TEST_DATASET,
read_only=False) as index:
index.add_entry(self._key, self._entry)
assert os.path.exists(self._index_path)
def test_create_new_index_add_entry_with_missing_field(self):
"""Tests that adding an entry with missing fields fails."""
del self._entry["creation_timestamp"]
with dataset_indexing.DatasetIndex(
self._test_dir, _TEST_DATASET, read_only=False) as index:
with self.assertRaisesRegex(ValueError, "Entry must have fields *"):
index.add_entry(self._key, self._entry)
def test_add_duplicate_entry(self):
"""Tests that adding an entry with a duplicated key fails."""
with dataset_indexing.DatasetIndex(
self._test_dir, _TEST_DATASET, read_only=False) as index:
index.add_entry(self._key, self._entry)
with self.assertRaisesWithLiteralMatch(
ValueError,
("Found entry for given key. Index keys must be unique.")):
index.add_entry(self._key, self._entry)
def test_create_new_index_add_invalid_creation_timestamp(self):
"""Tests creation timestamp format validation."""
self._entry["creation_timestamp"] = "2020-06-07"
with dataset_indexing.DatasetIndex(
self._test_dir, _TEST_DATASET, read_only=False) as index:
with self.assertRaisesWithLiteralMatch(ValueError,
"Cannot parse creation_timestamp"):
index.add_entry(self._key, self._entry)
def test_create_new_index_add_non_existent_file(self):
"""Tests filepath validation."""
bad_file_location = os.path.join(self._test_dir, "bad_file")
self._entry["file_location"] = bad_file_location
with dataset_indexing.DatasetIndex(
self._test_dir, _TEST_DATASET, read_only=False) as index:
with self.assertRaisesRegex(IOError,
f"Path {bad_file_location} not found *"):
index.add_entry(self._key, self._entry)
def test_add_to_existing_index(self):
"""Tests that an entry can be added to an existing index."""
entry_2 = self._entry.copy()
entry_2["creation_date"] = "2020-06-08"
key_2 = "123456"
with dataset_indexing.DatasetIndex(self._test_dir, _TEST_DATASET,
read_only=False) as index:
index.add_entry(self._key, self._entry)
with dataset_indexing.DatasetIndex(self._test_dir, _TEST_DATASET,
read_only=False) as index:
index.add_entry(key_2, entry_2)
read_index = dataset_indexing.DatasetIndex(self._test_dir, _TEST_DATASET)
self.assertIsNotNone(read_index.query_by_creation_date("2020-06-07"))
self.assertIsNotNone(read_index.query_by_creation_date("2020-06-08"))
def test_get_latest_creation_date(self):
"""Tests that querying 'latest' creation date returns the correct key."""
with dataset_indexing.DatasetIndex(self._test_dir, _TEST_DATASET,
read_only=False) as index:
index.add_entry(self._key, self._entry)
read_index = dataset_indexing.DatasetIndex(self._test_dir, _TEST_DATASET)
self.assertEqual(read_index.query_by_creation_date("latest"), self._key)
def test_query_by_creation_date_duplicates(self):
"""Tests that querying a duplicated creation date gets the latest entry."""
entry_2 = self._entry.copy()
key_2 = "123456"
entry_2["creation_timestamp"] = "2020-06-07_16:43:02"
with dataset_indexing.DatasetIndex(self._test_dir, _TEST_DATASET,
read_only=False) as index:
index.add_entry(self._key, self._entry)
index.add_entry(key_2, entry_2)
read_index = dataset_indexing.DatasetIndex(self._test_dir, _TEST_DATASET)
self.assertEqual(read_index.query_by_creation_date("latest"), key_2)
if __name__ == "__main__":
absltest.main()
| dm_c19_modelling-main | evaluation/dataset_indexing_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Base class for indexers that store dataset and forecast metadata."""
import abc
import datetime
import json
import os
import shutil # pylint: disable=unused-import
from typing import Any, Callable, Dict, Optional, Sequence, Set, Union
from absl import logging
from dm_c19_modelling.evaluation import constants
from dm_c19_modelling.evaluation import utils
# Internal imports.
_JSON_DUMP_KWARGS = dict(sort_keys=True, indent=4, separators=(",", ": "))
IndexEntryType = Dict[str, Union[str, int, Dict[str, Any], Sequence[str]]]
def get_unique_key() -> str:
"""Gets a unique key based on the current time to use in indexing."""
return str(int(datetime.datetime.utcnow().timestamp() * int(1e6)))
def validate_path(path: str) -> None:
"""Validates that a path exists."""
if not os.path.exists(path):
existing = "\n".join(sorted(os.listdir(os.path.dirname(path))))
raise IOError(f"Path {path} not found. Found:\n{existing}")
def _create_index_path_from_name(directory: str,
dataset_name: str,
index_type: str) -> str:
"""Gets the index's path for a given directory and dataset name."""
filename = f"{index_type}_index-{dataset_name}.json"
return os.path.join(directory, filename)
def _load_index(path: str) -> Dict[str, IndexEntryType]:
"""Loads index given its path ."""
logging.info("Index file found, loading")
with open(path, "r") as infile:
index_dict = json.load(infile)
return index_dict
def _save_index(path: str, index: Dict[str, IndexEntryType]) -> None:
"""Saves an index as JSON to file, creating a backup of the previous index."""
if os.path.exists(path):
backup_path = path + ".backup"
if os.path.exists(backup_path):
os.remove(backup_path)
shutil.copy2(path, backup_path)
with open(path, "w") as outfile:
json.dump(index, outfile, **_JSON_DUMP_KWARGS)
logging.info("Saved index dict at %s", path)
def validate_entry(entry: IndexEntryType,
required_fields: Set[str],
validate_file_location: bool = True) -> None:
"""Checks that an entry's required fields exist and have valid values."""
if ("creation_timestamp" not in required_fields or
"file_location" not in required_fields):
raise ValueError(
"Required fields must include creation_timestamp and file_location")
if set(entry) != required_fields:
raise ValueError(
f"Entry must have fields: {', '.join(sorted(required_fields))}. "
f"Has fields: {', '.join(sorted(entry))}")
try:
datetime.datetime.strptime(entry["creation_timestamp"],
constants.DATETIME_FORMAT)
except ValueError:
raise ValueError("Cannot parse creation_timestamp")
if validate_file_location:
validate_path(entry["file_location"])
class BaseIndex(metaclass=abc.ABCMeta):
"""Manages loading, querying, and adding entries to an index.
Minimum required entry fields:
"file_location": `str` with absolute file path/url of the file
"source_data_info": `dict` with info about the file's source data
"creation_timestamp": `str` that can be parsed with "%Y-%m-%d_%H:%M:%S"
"extra_info": `dict` with any additional relevant information
Indexes may have additional required fields, specified in '_additional_fields'
"""
def __init__(self, directory: str, dataset_name: str, read_only: bool = True):
self._index_path = _create_index_path_from_name(
directory, dataset_name, self._index_type)
if not read_only:
self._lock = utils.FileLock(f"{self._index_path}.lock", timeout=100)
else:
self._lock = None
self._index_dict = None
if read_only:
self._index_dict = self._load_index()
def __repr__(self):
return json.dumps(self._index_dict, **self._json_dump_kwargs)
def __enter__(self):
if self._lock:
self._lock.acquire()
self._index_dict = self._load_index()
return self
def __exit__(self, *args):
if self._lock:
self._store()
self._lock.release()
@property
def _fields(self):
"""The required fields in every index entry."""
return set(self._additional_fields) | {
"dataset_name", "file_location", "source_data_info",
"creation_timestamp", "extra_info"
}
@abc.abstractproperty
def _index_type(self):
"""The name of the type of index."""
@abc.abstractproperty
def _additional_fields(self):
"""The names of additional fields specific to the index."""
@abc.abstractmethod
def load_file_by_key(self, key: str) -> Any:
"""Loads the file contained in the index entry with the given key."""
@abc.abstractmethod
def _validate_file_in_entry(self, entry: IndexEntryType) -> None:
"""Checks that the file in the entry loads / is formatted correctly."""
def _load_index(self) -> Dict[str, IndexEntryType]:
"""Loads an index from file."""
try:
validate_path(self._index_path)
except IOError:
logging.info("No existing index found, creating a new one.")
return {}
return _load_index(self._index_path)
def _store(self) -> None:
"""Stores an index at index_path."""
if not self._lock:
raise IOError(
"Attempting to write to the index when it is in read-only mode.")
if self._index_dict is None:
raise IOError(
"Index has not been loaded. The index should be used as a context "
"when not in read-only mode")
_save_index(self._index_path, self._index_dict)
def add_entry(self,
key: Optional[str],
entry: IndexEntryType,
validate_file_location: bool = True,
validate_file_in_entry: bool = True) -> None:
"""Adds an entry to an index, validating its fields."""
key = key or get_unique_key()
if not self._lock:
raise IOError(
"Attempting to write to the index when it is in read-only mode.")
if self._index_dict is None:
raise IOError(
"Index has not been loaded. The index should be used as a context "
"when not in read-only mode")
if key in self._index_dict:
raise ValueError("Found entry for given key. Index keys must be unique.")
validate_entry(entry, self._fields, validate_file_location)
if validate_file_in_entry:
self._validate_file_in_entry(entry)
self._index_dict[key] = entry
def remove_entry(self, key: str) -> None:
"""Removes an entry from the index given its key."""
if not self._lock:
raise IOError(
"Attempting to modify the index when it is in read-only mode.")
if self._index_dict is None:
raise IOError(
"Index has not been loaded. The index should be used as a context "
"when not in read-only mode")
if key not in self._index_dict:
raise ValueError(f"Could not find entry with key {key} to delete.")
del self._index_dict[key]
def get_entry(self, key: str) -> IndexEntryType:
"""Gets an entry from the index given a key."""
if key not in self._index_dict:
raise ValueError(f"Invalid key {key} not found in index.")
return self._index_dict[key]
def open_index_and_add_entry(directory: str,
dataset_name: str,
index_class: Callable[..., BaseIndex],
key: Optional[str],
entry: IndexEntryType,
validate_file_location: bool = True,
validate_file_in_entry: bool = False) -> None:
"""Opens an index and adds an entry into it.
Args:
directory: the directory where the index is or will be stored.
dataset_name: the name given to the dataset to which the index relates.
index_class: the class of index to add to.
key: optional unique identifier.
entry: the entry into the index, returned from build_entry.
validate_file_location: whether to validate that the file location in the
entry exists on disk.
validate_file_in_entry: whether to validate that the file referenced in the
entry adheres to specific formatting requirements.
"""
# Create a unique key into the index based on the current time.
key = key or get_unique_key()
with index_class(directory, dataset_name, read_only=False) as index:
index.add_entry(key, entry, validate_file_location=validate_file_location,
validate_file_in_entry=validate_file_in_entry)
| dm_c19_modelling-main | evaluation/base_indexing.py |
# pylint: disable=g-bad-file-header
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for dm_c19_modelling.evaluation.download_data."""
from absl.testing import absltest
from dm_c19_modelling.evaluation import constants
from dm_c19_modelling.evaluation import download_data
import numpy as np
import pandas as pd
class DownloadDataTest(absltest.TestCase):
def test_join_dataframes_by_key_and_date(self):
df1 = {
"key": ["a", "b"],
"date": ["2020-06-07", "2020-07-07"],
"values_1": [1, 2]
}
df2 = {
"key": ["a", "c"],
"date": ["2020-06-07", "2020-07-07"],
"values_2": [2, 3]
}
df_joined = download_data._join_two(pd.DataFrame(df1), pd.DataFrame(df2))
np.testing.assert_array_equal(df_joined.columns,
["key", "date", "values_1", "values_2"])
np.testing.assert_array_equal(df_joined["key"], ["a", "b", "c"])
np.testing.assert_array_equal(df_joined["date"],
["2020-06-07", "2020-07-07", "2020-07-07"])
np.testing.assert_array_equal(df_joined["values_1"], [1, 2, np.nan])
np.testing.assert_array_equal(df_joined["values_2"], [2, np.nan, 3])
def test_join_dataframes_by_key(self):
df1 = {"key": ["a", "c"], "values_1": [1, 2]}
df2 = {
"key": ["a", "a", "c"],
"date": ["2020-06-07", "2020-07-07", "2020-07-07"],
"values_2": [2, 3, 4]
}
df_joined = download_data._join_two(pd.DataFrame(df1), pd.DataFrame(df2))
np.testing.assert_array_equal(df_joined.columns,
["key", "values_1", "date", "values_2"])
np.testing.assert_array_equal(df_joined["key"], ["a", "a", "c"])
np.testing.assert_array_equal(df_joined["date"],
["2020-06-07", "2020-07-07", "2020-07-07"])
np.testing.assert_array_equal(df_joined["values_1"], [1, 1, 2])
np.testing.assert_array_equal(df_joined["values_2"], [2, 3, 4])
def test_non_daily_cadence(self):
df = {
"site_id": ["a", "a", "b"],
"date": ["2020-01-05", "2020-01-15", "2020-01-05"],
"new_confirmed": [10, 20, 30]
}
df = download_data._drop_sites_with_insufficient_data(
pd.DataFrame(df), "covid_open_data_world", "new_confirmed")
np.testing.assert_array_equal(df["site_id"], ["b"])
def test_all_sites_dropped(self):
df = {
"site_id": ["a"],
# The first required date for for the world data is 2020-01-05, so this
# should be dropped.
"date": ["2020-01-15"],
"new_confirmed": [10]
}
with self.assertRaisesWithLiteralMatch(
ValueError, "All sites have been dropped."):
download_data._drop_sites_with_insufficient_data(
pd.DataFrame(df), "covid_open_data_world", "new_confirmed")
def test_missing_early_data_dropped(self):
df = {
"site_id": ["a", "b", "b"],
"date": ["2020-01-05", "2020-01-06", "2020-01-07"],
"new_confirmed": [10, 30, 20]
}
df = download_data._drop_sites_with_insufficient_data(
pd.DataFrame(df), "covid_open_data_world", "new_confirmed")
np.testing.assert_array_equal(df["site_id"], ["a"])
def test_grace_period(self):
df = {
"key": ["a", "a", "a",
"b", "b", "b",
],
"date": ["2020-03-16", "2020-03-17", "2020-03-18",
"2020-03-15", "2020-03-16", "2020-03-17",],
"new_confirmed": [1, 2, 3,
4, 5, 6],
"new_deceased": [10, 20, 30,
40, 50, 60]
}
df = download_data._maybe_set_zero_epidemiology_targets_in_grace_period(
pd.DataFrame(df), "covid_open_data_us_states")
self.assertSetEqual({"a", "b"}, set(df["key"].unique()))
# Check values and dates for "a":
a_df = df.query("key=='a'").sort_values("date")
expected_dates = list(pd.date_range("2020-01-22", "2020-03-18").strftime(
constants.DATE_FORMAT))
np.testing.assert_array_equal(a_df["date"], expected_dates)
expected_confirmed = [0] * 54 + [1, 2, 3]
np.testing.assert_array_equal(a_df["new_confirmed"], expected_confirmed)
expected_deceased = [0] * 54 + [10, 20, 30]
np.testing.assert_array_equal(a_df["new_deceased"], expected_deceased)
# Check values and dates for "b":
b_df = df.query("key=='b'").sort_values("date")
expected_dates = list(pd.date_range("2020-01-22", "2020-03-17").strftime(
constants.DATE_FORMAT))
np.testing.assert_array_equal(b_df["date"], expected_dates)
expected_confirmed = [0] * 53 + [4, 5, 6]
np.testing.assert_array_equal(b_df["new_confirmed"], expected_confirmed)
expected_deceased = [0] * 53 + [40, 50, 60]
np.testing.assert_array_equal(b_df["new_deceased"], expected_deceased)
def test_missing_data_dropped(self):
df = {
"site_id": ["a", "a", "a", "b"],
"date": ["2020-01-05", "2020-01-06", "2020-01-07", "2020-01-05"],
"new_confirmed": [10, np.NaN, 10, 20]
}
df = download_data._drop_sites_with_insufficient_data(
pd.DataFrame(df), "covid_open_data_world", "new_confirmed")
np.testing.assert_array_equal(df["site_id"], ["b"])
def test_data_truncation(self):
df = {
"site_id": ["a", "a", "a", "b", "b"],
"date": [
"2020-01-05", "2020-01-06", "2020-01-07", "2020-01-05", "2020-01-06"
],
"new_confirmed": [10, 10, 10, 20, 30]
}
df = download_data._drop_sites_with_insufficient_data(
pd.DataFrame(df), "covid_open_data_world", "new_confirmed")
self.assertEqual(df.date.max(), "2020-01-06")
if __name__ == "__main__":
absltest.main()
| dm_c19_modelling-main | evaluation/download_data_test.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""CLEVR (with masks) dataset reader."""
import tensorflow.compat.v1 as tf
COMPRESSION_TYPE = tf.io.TFRecordOptions.get_compression_type_string('GZIP')
IMAGE_SIZE = [240, 320]
# The maximum number of foreground and background entities in the provided
# dataset. This corresponds to the number of segmentation masks returned per
# scene.
MAX_NUM_ENTITIES = 11
BYTE_FEATURES = ['mask', 'image', 'color', 'material', 'shape', 'size']
# Create a dictionary mapping feature names to `tf.Example`-compatible
# shape and data type descriptors.
features = {
'image': tf.FixedLenFeature(IMAGE_SIZE+[3], tf.string),
'mask': tf.FixedLenFeature([MAX_NUM_ENTITIES]+IMAGE_SIZE+[1], tf.string),
'x': tf.FixedLenFeature([MAX_NUM_ENTITIES], tf.float32),
'y': tf.FixedLenFeature([MAX_NUM_ENTITIES], tf.float32),
'z': tf.FixedLenFeature([MAX_NUM_ENTITIES], tf.float32),
'pixel_coords': tf.FixedLenFeature([MAX_NUM_ENTITIES, 3], tf.float32),
'rotation': tf.FixedLenFeature([MAX_NUM_ENTITIES], tf.float32),
'size': tf.FixedLenFeature([MAX_NUM_ENTITIES], tf.string),
'material': tf.FixedLenFeature([MAX_NUM_ENTITIES], tf.string),
'shape': tf.FixedLenFeature([MAX_NUM_ENTITIES], tf.string),
'color': tf.FixedLenFeature([MAX_NUM_ENTITIES], tf.string),
'visibility': tf.FixedLenFeature([MAX_NUM_ENTITIES], tf.float32),
}
def _decode(example_proto):
# Parse the input `tf.Example` proto using the feature description dict above.
single_example = tf.parse_single_example(example_proto, features)
for k in BYTE_FEATURES:
single_example[k] = tf.squeeze(tf.decode_raw(single_example[k], tf.uint8),
axis=-1)
return single_example
def dataset(tfrecords_path, read_buffer_size=None, map_parallel_calls=None):
"""Read, decompress, and parse the TFRecords file.
Args:
tfrecords_path: str. Path to the dataset file.
read_buffer_size: int. Number of bytes in the read buffer. See documentation
for `tf.data.TFRecordDataset.__init__`.
map_parallel_calls: int. Number of elements decoded asynchronously in
parallel. See documentation for `tf.data.Dataset.map`.
Returns:
An unbatched `tf.data.TFRecordDataset`.
"""
raw_dataset = tf.data.TFRecordDataset(
tfrecords_path, compression_type=COMPRESSION_TYPE,
buffer_size=read_buffer_size)
return raw_dataset.map(_decode, num_parallel_calls=map_parallel_calls)
| multi_object_datasets-master | clevr_with_masks.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tetrominoes dataset reader."""
import tensorflow.compat.v1 as tf
COMPRESSION_TYPE = tf.io.TFRecordOptions.get_compression_type_string('GZIP')
IMAGE_SIZE = [35, 35]
# The maximum number of foreground and background entities in the provided
# dataset. This corresponds to the number of segmentation masks returned per
# scene.
MAX_NUM_ENTITIES = 4
BYTE_FEATURES = ['mask', 'image']
# Create a dictionary mapping feature names to `tf.Example`-compatible
# shape and data type descriptors.
features = {
'image': tf.FixedLenFeature(IMAGE_SIZE+[3], tf.string),
'mask': tf.FixedLenFeature([MAX_NUM_ENTITIES]+IMAGE_SIZE+[1], tf.string),
'x': tf.FixedLenFeature([MAX_NUM_ENTITIES], tf.float32),
'y': tf.FixedLenFeature([MAX_NUM_ENTITIES], tf.float32),
'shape': tf.FixedLenFeature([MAX_NUM_ENTITIES], tf.float32),
'color': tf.FixedLenFeature([MAX_NUM_ENTITIES, 3], tf.float32),
'visibility': tf.FixedLenFeature([MAX_NUM_ENTITIES], tf.float32),
}
def _decode(example_proto):
# Parse the input `tf.Example` proto using the feature description dict above.
single_example = tf.parse_single_example(example_proto, features)
for k in BYTE_FEATURES:
single_example[k] = tf.squeeze(tf.decode_raw(single_example[k], tf.uint8),
axis=-1)
return single_example
def dataset(tfrecords_path, read_buffer_size=None, map_parallel_calls=None):
"""Read, decompress, and parse the TFRecords file.
Args:
tfrecords_path: str. Path to the dataset file.
read_buffer_size: int. Number of bytes in the read buffer. See documentation
for `tf.data.TFRecordDataset.__init__`.
map_parallel_calls: int. Number of elements decoded asynchronously in
parallel. See documentation for `tf.data.Dataset.map`.
Returns:
An unbatched `tf.data.TFRecordDataset`.
"""
raw_dataset = tf.data.TFRecordDataset(
tfrecords_path, compression_type=COMPRESSION_TYPE,
buffer_size=read_buffer_size)
return raw_dataset.map(_decode, num_parallel_calls=map_parallel_calls)
| multi_object_datasets-master | tetrominoes.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Multi-dSprites dataset reader."""
import functools
import tensorflow.compat.v1 as tf
COMPRESSION_TYPE = tf.io.TFRecordOptions.get_compression_type_string('GZIP')
IMAGE_SIZE = [64, 64]
# The maximum number of foreground and background entities in each variant
# of the provided datasets. The values correspond to the number of
# segmentation masks returned per scene.
MAX_NUM_ENTITIES = {
'binarized': 4,
'colored_on_grayscale': 6,
'colored_on_colored': 5
}
BYTE_FEATURES = ['mask', 'image']
def feature_descriptions(max_num_entities, is_grayscale=False):
"""Create a dictionary describing the dataset features.
Args:
max_num_entities: int. The maximum number of foreground and background
entities in each image. This corresponds to the number of segmentation
masks and generative factors returned per scene.
is_grayscale: bool. Whether images are grayscale. Otherwise they're assumed
to be RGB.
Returns:
A dictionary which maps feature names to `tf.Example`-compatible shape and
data type descriptors.
"""
num_channels = 1 if is_grayscale else 3
return {
'image': tf.FixedLenFeature(IMAGE_SIZE+[num_channels], tf.string),
'mask': tf.FixedLenFeature(IMAGE_SIZE+[max_num_entities, 1], tf.string),
'x': tf.FixedLenFeature([max_num_entities], tf.float32),
'y': tf.FixedLenFeature([max_num_entities], tf.float32),
'shape': tf.FixedLenFeature([max_num_entities], tf.float32),
'color': tf.FixedLenFeature([max_num_entities, num_channels], tf.float32),
'visibility': tf.FixedLenFeature([max_num_entities], tf.float32),
'orientation': tf.FixedLenFeature([max_num_entities], tf.float32),
'scale': tf.FixedLenFeature([max_num_entities], tf.float32),
}
def _decode(example_proto, features):
# Parse the input `tf.Example` proto using a feature description dictionary.
single_example = tf.parse_single_example(example_proto, features)
for k in BYTE_FEATURES:
single_example[k] = tf.squeeze(tf.decode_raw(single_example[k], tf.uint8),
axis=-1)
# To return masks in the canonical [entities, height, width, channels] format,
# we need to transpose the tensor axes.
single_example['mask'] = tf.transpose(single_example['mask'], [2, 0, 1, 3])
return single_example
def dataset(tfrecords_path, dataset_variant, read_buffer_size=None,
map_parallel_calls=None):
"""Read, decompress, and parse the TFRecords file.
Args:
tfrecords_path: str. Path to the dataset file.
dataset_variant: str. One of ['binarized', 'colored_on_grayscale',
'colored_on_colored']. This is used to identify the maximum number of
entities in each scene. If an incorrect identifier is passed in, the
TFRecords file will not be read correctly.
read_buffer_size: int. Number of bytes in the read buffer. See documentation
for `tf.data.TFRecordDataset.__init__`.
map_parallel_calls: int. Number of elements decoded asynchronously in
parallel. See documentation for `tf.data.Dataset.map`.
Returns:
An unbatched `tf.data.TFRecordDataset`.
"""
if dataset_variant not in MAX_NUM_ENTITIES:
raise ValueError('Invalid `dataset_variant` provided. The supported values'
' are: {}'.format(list(MAX_NUM_ENTITIES.keys())))
max_num_entities = MAX_NUM_ENTITIES[dataset_variant]
is_grayscale = dataset_variant == 'binarized'
raw_dataset = tf.data.TFRecordDataset(
tfrecords_path, compression_type=COMPRESSION_TYPE,
buffer_size=read_buffer_size)
features = feature_descriptions(max_num_entities, is_grayscale)
partial_decode_fn = functools.partial(_decode, features=features)
return raw_dataset.map(partial_decode_fn,
num_parallel_calls=map_parallel_calls)
| multi_object_datasets-master | multi_dsprites.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Installation script for setuptools."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import setup
EXTRA_PACKAGES = {
'tf': ['tensorflow>=1.14'],
'tf_gpu': ['tensorflow-gpu>=1.14'],
}
setup(
name='multi_object_datasets',
version='1.0.0',
author='DeepMind',
license='Apache License, Version 2.0',
description=('Multi-object image datasets with'
'ground-truth segmentation masks and generative factors.'),
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
keywords=['datasets', 'machine learning', 'representation learning'],
url='https://github.com/deepmind/multi_object_datasets',
packages=['multi_object_datasets'],
package_dir={'multi_object_datasets': '.'},
extras_require=EXTRA_PACKAGES,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| multi_object_datasets-master | setup.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""CATER (with masks) dataset reader."""
import functools
import tensorflow as tf
COMPRESSION_TYPE = 'ZLIB'
IMAGE_SIZE = [64, 64]
SEQUENCE_LENGTH = 33
MAX_NUM_ENTITIES = 11
BYTE_FEATURES = ['image', 'mask']
def feature_descriptions(
sequence_length=SEQUENCE_LENGTH,
max_num_entities=MAX_NUM_ENTITIES):
return {
'camera_matrix': tf.io.FixedLenFeature(
[sequence_length, 4, 4], tf.float32),
'image': tf.io.FixedLenFeature([], tf.string),
'mask': tf.io.FixedLenFeature([], tf.string),
'object_positions': tf.io.FixedLenFeature(
[max_num_entities, sequence_length, 3], tf.float32)
}
def _decode(example_proto, features,
sequence_length=SEQUENCE_LENGTH,
max_num_entities=MAX_NUM_ENTITIES):
"""Parse the input `tf.Example` proto using a feature description dictionary.
Args:
example_proto: the serialized example.
features: feature descriptions to deserialize `example_proto`.
sequence_length: the length of each video in timesteps.
max_num_entities: the maximum number of entities in any frame of the video.
Returns:
A dict containing the following tensors:
- 'image': a sequence of RGB frames.
- 'mask': a mask for all entities in each frame.
- 'camera_matrix': a 4x4 matrix describing the camera pose in each frame.
- 'object_positions': 3D position for all entities in each frame.
"""
single_example = tf.io.parse_single_example(example_proto, features=features)
for key in BYTE_FEATURES:
single_example[key] = tf.io.decode_raw(single_example[key], tf.uint8)
single_example['image'] = tf.reshape(
single_example['image'],
[sequence_length] + IMAGE_SIZE + [3])
single_example['mask'] = tf.reshape(
single_example['mask'],
[sequence_length, max_num_entities] + IMAGE_SIZE + [1])
single_example['object_positions'] = tf.transpose(
single_example['object_positions'], [1, 0, 2])
return single_example
def dataset(tfrecords_path, read_buffer_size=None, map_parallel_calls=None):
"""Read, decompress, and parse TFRecords.
Args:
tfrecords_path: str or Sequence[str]. Path or paths to dataset files.
read_buffer_size: int. Number of bytes in the read buffer. See documentation
for `tf.data.TFRecordDataset.__init__`.
map_parallel_calls: int. Number of elements decoded asynchronously in
parallel. See documentation for `tf.data.Dataset.map`.
Returns:
An unbatched `tf.data.TFRecordDataset`.
"""
raw_dataset = tf.data.TFRecordDataset(
tfrecords_path, compression_type=COMPRESSION_TYPE,
buffer_size=read_buffer_size)
features = feature_descriptions()
partial_decode_fn = functools.partial(_decode, features=features)
return raw_dataset.map(
partial_decode_fn, num_parallel_calls=map_parallel_calls)
| multi_object_datasets-master | cater_with_masks.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementation of the adjusted Rand index."""
import tensorflow.compat.v1 as tf
def adjusted_rand_index(true_mask, pred_mask, name='ari_score'):
r"""Computes the adjusted Rand index (ARI), a clustering similarity score.
This implementation ignores points with no cluster label in `true_mask` (i.e.
those points for which `true_mask` is a zero vector). In the context of
segmentation, that means this function can ignore points in an image
corresponding to the background (i.e. not to an object).
Args:
true_mask: `Tensor` of shape [batch_size, n_points, n_true_groups].
The true cluster assignment encoded as one-hot.
pred_mask: `Tensor` of shape [batch_size, n_points, n_pred_groups].
The predicted cluster assignment encoded as categorical probabilities.
This function works on the argmax over axis 2.
name: str. Name of this operation (defaults to "ari_score").
Returns:
ARI scores as a tf.float32 `Tensor` of shape [batch_size].
Raises:
ValueError: if n_points <= n_true_groups and n_points <= n_pred_groups.
We've chosen not to handle the special cases that can occur when you have
one cluster per datapoint (which would be unusual).
References:
Lawrence Hubert, Phipps Arabie. 1985. "Comparing partitions"
https://link.springer.com/article/10.1007/BF01908075
Wikipedia
https://en.wikipedia.org/wiki/Rand_index
Scikit Learn
http://scikit-learn.org/stable/modules/generated/\
sklearn.metrics.adjusted_rand_score.html
"""
with tf.name_scope(name):
_, n_points, n_true_groups = true_mask.shape.as_list()
n_pred_groups = pred_mask.shape.as_list()[-1]
if n_points <= n_true_groups and n_points <= n_pred_groups:
# This rules out the n_true_groups == n_pred_groups == n_points
# corner case, and also n_true_groups == n_pred_groups == 0, since
# that would imply n_points == 0 too.
# The sklearn implementation has a corner-case branch which does
# handle this. We chose not to support these cases to avoid counting
# distinct clusters just to check if we have one cluster per datapoint.
raise ValueError(
"adjusted_rand_index requires n_groups < n_points. We don't handle "
"the special cases that can occur when you have one cluster "
"per datapoint.")
true_group_ids = tf.argmax(true_mask, -1)
pred_group_ids = tf.argmax(pred_mask, -1)
# We convert true and predicted clusters to one-hot ('oh') representations.
true_mask_oh = tf.cast(true_mask, tf.float32) # already one-hot
pred_mask_oh = tf.one_hot(pred_group_ids, n_pred_groups) # returns float32
n_points = tf.cast(tf.reduce_sum(true_mask_oh, axis=[1, 2]), tf.float32)
nij = tf.einsum('bji,bjk->bki', pred_mask_oh, true_mask_oh)
a = tf.reduce_sum(nij, axis=1)
b = tf.reduce_sum(nij, axis=2)
rindex = tf.reduce_sum(nij * (nij - 1), axis=[1, 2])
aindex = tf.reduce_sum(a * (a - 1), axis=1)
bindex = tf.reduce_sum(b * (b - 1), axis=1)
expected_rindex = aindex * bindex / (n_points*(n_points-1))
max_rindex = (aindex + bindex) / 2
ari = (rindex - expected_rindex) / (max_rindex - expected_rindex)
# The case where n_true_groups == n_pred_groups == 1 needs to be
# special-cased (to return 1) as the above formula gives a divide-by-zero.
# This might not work when true_mask has values that do not sum to one:
both_single_cluster = tf.logical_and(
_all_equal(true_group_ids), _all_equal(pred_group_ids))
return tf.where(both_single_cluster, tf.ones_like(ari), ari)
def _all_equal(values):
"""Whether values are all equal along the final axis."""
return tf.reduce_all(tf.equal(values, values[..., :1]), axis=-1)
| multi_object_datasets-master | segmentation_metrics.py |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Objects Room dataset reader."""
import functools
import tensorflow.compat.v1 as tf
COMPRESSION_TYPE = tf.io.TFRecordOptions.get_compression_type_string('GZIP')
IMAGE_SIZE = [64, 64]
# The maximum number of foreground and background entities in each variant
# of the provided datasets. The values correspond to the number of
# segmentation masks returned per scene.
MAX_NUM_ENTITIES = {
'train': 7,
'six_objects': 10,
'empty_room': 4,
'identical_color': 10
}
BYTE_FEATURES = ['mask', 'image']
def feature_descriptions(max_num_entities):
"""Create a dictionary describing the dataset features.
Args:
max_num_entities: int. The maximum number of foreground and background
entities in each image. This corresponds to the number of segmentation
masks returned per scene.
Returns:
A dictionary which maps feature names to `tf.Example`-compatible shape and
data type descriptors.
"""
return {
'image': tf.FixedLenFeature(IMAGE_SIZE+[3], tf.string),
'mask': tf.FixedLenFeature([max_num_entities]+IMAGE_SIZE+[1], tf.string),
}
def _decode(example_proto, features):
# Parse the input `tf.Example` proto using a feature description dictionary.
single_example = tf.parse_single_example(example_proto, features)
for k in BYTE_FEATURES:
single_example[k] = tf.squeeze(tf.decode_raw(single_example[k], tf.uint8),
axis=-1)
return single_example
def dataset(tfrecords_path, dataset_variant, read_buffer_size=None,
map_parallel_calls=None):
"""Read, decompress, and parse the TFRecords file.
Args:
tfrecords_path: str. Path to the dataset file.
dataset_variant: str. One of ['train', 'six_objects', 'empty_room',
'identical_color']. This is used to identify the maximum number of
entities in each scene. If an incorrect identifier is passed in, the
TFRecords file will not be read correctly.
read_buffer_size: int. Number of bytes in the read buffer. See documentation
for `tf.data.TFRecordDataset.__init__`.
map_parallel_calls: int. Number of elements decoded asynchronously in
parallel. See documentation for `tf.data.Dataset.map`.
Returns:
An unbatched `tf.data.TFRecordDataset`.
"""
if dataset_variant not in MAX_NUM_ENTITIES:
raise ValueError('Invalid `dataset_variant` provided. The supported values'
' are: {}'.format(list(MAX_NUM_ENTITIES.keys())))
max_num_entities = MAX_NUM_ENTITIES[dataset_variant]
raw_dataset = tf.data.TFRecordDataset(
tfrecords_path, compression_type=COMPRESSION_TYPE,
buffer_size=read_buffer_size)
features = feature_descriptions(max_num_entities)
partial_decode_fn = functools.partial(_decode, features=features)
return raw_dataset.map(partial_decode_fn,
num_parallel_calls=map_parallel_calls)
| multi_object_datasets-master | objects_room.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| distribution_shift_framework-master | distribution_shift_framework/__init__.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run the formalisation pipeline."""
import contextlib
import functools
import os
from typing import Generator, Mapping, Optional, Tuple
from absl import flags
from absl import logging
import chex
from distribution_shift_framework.core import checkpointing
from distribution_shift_framework.core.datasets import data_utils
from distribution_shift_framework.core.metrics import metrics
import haiku as hk
import jax
import jax.numpy as jnp
from jaxline import experiment
from jaxline import utils
import ml_collections
import numpy as np
import optax
from six.moves import cPickle as pickle
import tensorflow as tf
import tensorflow_datasets as tfds
FLAGS = flags.FLAGS
def get_per_device_batch_size(total_batch_size: int) -> int:
num_devices = jax.device_count()
per_device_batch_size, ragged = divmod(total_batch_size, num_devices)
if ragged:
raise ValueError(
f'Global batch size {total_batch_size} must be divisible by the '
f'total number of devices {num_devices}')
return per_device_batch_size
class Experiment(experiment.AbstractExperiment):
"""Formalisation experiment."""
CHECKPOINT_ATTRS = {
'_params': 'params',
'_state': 'state',
'_opt_state': 'opt_state',
'_d_params': 'd_params',
'_d_state': 'd_state',
'_d_opt_state': 'd_opt_state',
'_adapt_params': 'adapt_params',
'_adapt_state': 'adapt_state'
}
def __init__(self, mode: str, init_rng: chex.PRNGKey,
config: ml_collections.ConfigDict):
"""Initializes experiment."""
super(Experiment, self).__init__(mode=mode, init_rng=init_rng)
self.mode = mode
self.config = config
self.init_rng = init_rng
# Set up discriminator parameters.
self._d_params = None
self._d_state = None
self._d_opt_state = None
# Double transpose trick to improve performance on TPUs.
self._should_transpose_images = (
config.enable_double_transpose and
jax.local_devices()[0].platform == 'tpu')
self._params = None # params
self._state = None # network state for stats like batchnorm
self._opt_state = None # optimizer state
self._adapt_params = None
self._adapt_state = None
self._label = config.data.label
with utils.log_activity('transform functions'):
self.forward = hk.transform_with_state(self._forward_fn)
self.eval_batch = jax.pmap(self._eval_batch, axis_name='i')
self.learner_fn = hk.transform_with_state(self._learner_fn)
self.adversarial_fn = hk.transform_with_state(self._adversarial_fn)
self.adapt_fn = self._adapt_fn
self.adaptor = None
self._update_func = jax.pmap(
self._update_func, axis_name='i', donate_argnums=(0, 1, 2))
if mode == 'train':
with utils.log_activity('initialize training'):
self._init_train(init_rng)
if getattr(self.config.training.learn_adapt, 'fn', None):
learner_adapt_fn = self.config.training.learn_adapt.fn
learner_adapt_kwargs = self.config.training.learn_adapt.kwargs
self._train_adapter = learner_adapt_fn(**learner_adapt_kwargs)
if self._adapt_params is None:
self._adapt_params = self._params
self._adapt_state = self._state
self._train_adapter.set(self._adapt_params, self._adapt_state)
else:
self._train_adapter = None
def optimizer(self) -> optax.GradientTransformation:
optimizer_fn = getattr(optax, self.config.optimizer.name)
return optimizer_fn(**self.config.optimizer.kwargs)
def _maybe_undo_transpose_images(self, images: chex.Array) -> chex.Array:
if self._should_transpose_images:
return jnp.transpose(images, (1, 2, 3, 0)) # NHWC -> HWCN.
return images
def _maybe_transpose_images(self, images: chex.Array) -> chex.Array:
if self._should_transpose_images:
# We use the double transpose trick to improve performance for TPUs.
# Note that there is a matching NHWC->HWCN transpose in the data pipeline.
# Here we reset back to NHWC like our model expects. The compiler cannot
# make this optimization for us since our data pipeline and model are
# compiled separately.
images = jnp.transpose(images, (3, 0, 1, 2)) # HWCN -> NHWC.
return images
def _postprocess_fn(
self,
inputs: data_utils.Batch,
rng: chex.PRNGKey
) -> data_utils.Batch:
if not hasattr(self.config, 'postprocessing'):
return inputs
postprocessing = getattr(self.config.postprocessing, 'fn', None)
if postprocessing is None:
return inputs
postprocess_fn = functools.partial(postprocessing,
**self.config.postprocessing.kwargs)
images = inputs['image']
labels = inputs['one_hot_label']
postprocessed_images, postprocessed_labels = postprocess_fn(
images, labels, rng=rng)
postprocessed_inputs = dict(**inputs)
postprocessed_inputs['image'] = postprocessed_images
postprocessed_inputs['one_hot_label'] = postprocessed_labels
return postprocessed_inputs
def _learner_fn(self, inputs: data_utils.Batch,
reduction='mean') -> Tuple[data_utils.ScalarDict, chex.Array]:
logits = self._forward_fn(inputs, is_training=True)
if getattr(self.config.data, 'label_property', '') in inputs.keys():
property_vs = inputs[self.config.data.label_property]
property_onehot = hk.one_hot(property_vs, self.config.data.n_properties)
else:
property_onehot = None
algorithm_fn = self.config.training.algorithm.fn
kwargs = self.config.training.algorithm.kwargs
scalars, logits = algorithm_fn(**kwargs)(
logits, inputs['one_hot_label'], property_vs=property_onehot,
reduction=reduction)
predicted_label = jnp.argmax(logits, axis=-1)
top1_acc = jnp.equal(predicted_label,
inputs[self._label]).astype(jnp.float32)
scalars['top1_acc'] = top1_acc.mean()
return scalars, logits
def learner_adapt_weights_fn(
self, params: optax.Params, state: optax.OptState,
old_params: optax.Params, old_state: optax.OptState,
inputs: data_utils.Batch, rng: chex.PRNGKey,
global_step: chex.Array
) -> Tuple[Tuple[data_utils.ScalarDict, chex.Array], optax.OptState]:
(scalars, logits), g_state = self._train_adapter(
fn=functools.partial(self.learner_fn.apply, reduction=None),
params=params, state=state, inputs=inputs, global_step=global_step,
rng=rng, old_params=old_params, old_state=old_state)
return (scalars, logits), g_state
def _adversarial_fn(self, logits: chex.Array,
inputs: data_utils.Batch) -> data_utils.ScalarDict:
if getattr(self.config.data, 'label_property', '') in inputs.keys():
property_vs = inputs[self.config.data.label_property]
property_onehot = hk.one_hot(property_vs, self.config.data.n_properties)
else:
property_onehot = None
one_hot_labels = inputs['one_hot_label']
algorithm_fn = self.config.training.algorithm.fn
kwargs = self.config.training.algorithm.kwargs
return algorithm_fn(**kwargs).adversary(
logits, property_vs=property_onehot, reduction='mean',
targets=one_hot_labels)
def _adapt_fn(self, params: optax.Params, state: optax.OptState,
rng: chex.PRNGKey, is_final_eval: bool = False):
adapt_fn = getattr(self.config.adapter, 'fn')
adapt_kwargs = getattr(self.config.adapter, 'kwargs')
forward_fn = functools.partial(self.forward.apply, is_training=True,
test_local_stats=False)
self.adaptor = adapt_fn(init_params=params,
init_state=state,
forward=jax.pmap(forward_fn, axis_name='i'),
**adapt_kwargs)
per_device_batch_size = get_per_device_batch_size(
self.config.training.batch_size)
ds = self._load_data(per_device_batch_size=per_device_batch_size,
is_training=False,
data_kwargs=self.config.data.test_kwargs)
for step, batch in enumerate(ds, 1):
logging.info('Updating using an adaptor function.')
self.adaptor.update(batch, batch[self.config.data.label_property], rng)
if (not is_final_eval and
step > getattr(self.config.adapter, 'num_adaptation_steps')):
break
def _forward_fn(self,
inputs: data_utils.Batch,
is_training: bool,
test_local_stats: bool = False) -> chex.Array:
model_constructor = self.config.model.constructor
model_instance = model_constructor(**self.config.model.kwargs.to_dict())
images = inputs['image']
images = self._maybe_transpose_images(images)
images = self.config.model.preprocess(images)
if isinstance(model_instance, hk.nets.MLP):
return model_instance(images)
return model_instance(images, is_training=is_training)
def _d_loss_fn(
self, d_params: optax.Params, d_state: optax.OptState, inputs: chex.Array,
logits: chex.Array,
rng: chex.PRNGKey
) -> Tuple[chex.Array, Tuple[data_utils.ScalarDict, optax.OptState]]:
d_scalars, d_state = self.adversarial_fn.apply(d_params, d_state, rng,
logits, inputs)
if not d_scalars:
# No adversary.
return 0., (d_scalars, d_state)
scaled_loss = d_scalars['loss'] / jax.device_count()
d_scalars = {f'adv_{k}': v for k, v in d_scalars.items()}
return scaled_loss, (d_scalars, d_state)
def _run_postprocess_fn(self,
rng: chex.PRNGKey,
inputs: data_utils.Batch) -> data_utils.Batch:
inputs = self._postprocess_fn(inputs, rng)
return inputs
def _loss_fn(
self, g_params: optax.Params,
g_state: optax.OptState,
d_params: optax.Params,
d_state: optax.OptState,
inputs: chex.Array,
rng: chex.PRNGKey,
global_step: chex.Array,
old_g_params: Optional[optax.Params] = None,
old_g_state: Optional[optax.OptState] = None
) -> Tuple[chex.Array, Tuple[
data_utils.ScalarDict, chex.Array, data_utils.Batch, optax.OptState]]:
# Find the loss according to the generator.
if getattr(self.config.training.learn_adapt, 'fn', None):
# Use generator loss computed by a training adaptation algorithm.
(scalars, logits), g_state = self.learner_adapt_weights_fn(
params=g_params,
state=g_state,
old_params=old_g_params,
old_state=old_g_state,
rng=rng,
inputs=inputs,
global_step=global_step)
else:
(scalars, logits), g_state = self.learner_fn.apply(g_params, g_state, rng,
inputs)
d_scalars, _ = self.adversarial_fn.apply(d_params, d_state, rng, logits,
inputs)
# If there is an adversary:
if 'loss' in d_scalars.keys():
# Want to minimize the loss, so negate it.
adv_weight = self.config.training.adversarial_weight
scalars['loss'] = scalars['loss'] - d_scalars['loss'] * adv_weight
scalars.update({f'gen_adv_{k}': v for k, v in d_scalars.items()})
scaled_loss = scalars['loss'] / jax.device_count()
return scaled_loss, (scalars, logits, inputs, g_state)
# _ _
# | |_ _ __ __ _(_)_ __
# | __| '__/ _` | | '_ \
# | |_| | | (_| | | | | |
# \__|_| \__,_|_|_| |_|
#
def _prepare_train_batch(self, rng: chex.PRNGKey,
batch: data_utils.Batch) -> data_utils.Batch:
noise_threshold = self.config.training.label_noise
if noise_threshold > 0:
random_labels = jax.random.randint(
rng[0],
shape=batch[self._label].shape,
dtype=batch[self._label].dtype,
minval=0,
maxval=self.config.data.n_classes)
mask = jax.random.uniform(rng[0],
batch[self._label].shape) < noise_threshold
batch[self._label] = (random_labels * mask +
batch[self._label] * (1 - mask))
batch['one_hot_label'] = hk.one_hot(
batch[self._label], self.config.data.n_classes)
return batch
def _init_train(self, rng: chex.PRNGKey):
self._train_input = utils.py_prefetch(self._build_train_input)
if self._params is None:
logging.info('Initializing parameters randomly rather than restoring'
' from checkpoint.')
batch = next(self._train_input)
batch['one_hot_label'] = hk.one_hot(batch[self._label],
self.config.data.n_classes)
# Initialize generator.
self._params, self._state = self._init_params(rng, batch)
opt_init, _ = self.optimizer()
self._opt_state = jax.pmap(opt_init)(self._params)
# Initialize discriminator.
bcast_rng = utils.bcast_local_devices(rng)
(_, dummy_logits), _ = jax.pmap(self.learner_fn.apply)(self._params,
self._state,
bcast_rng, batch)
self._d_params, self._d_state = self._init_d_params(
rng, dummy_logits, batch)
opt_init, _ = self.optimizer()
if self._d_params:
self._d_opt_state = jax.pmap(opt_init)(self._d_params)
else:
# Is empty.
self._d_opt_state = None
def _init_params(
self, rng: chex.PRNGKey,
batch: data_utils.Batch) -> Tuple[optax.Params, optax.OptState]:
init_net = jax.pmap(self.learner_fn.init)
rng = utils.bcast_local_devices(rng)
params, state = init_net(rng, batch)
if not self.config.pretrained_checkpoint:
return params, state
ckpt_data = checkpointing.load_model(
self.config.pretrained_checkpoint)
ckpt_params, ckpt_state = ckpt_data['params'], ckpt_data['state']
ckpt_params = utils.bcast_local_devices(ckpt_params)
ckpt_state = utils.bcast_local_devices(ckpt_state)
def use_pretrained_if_shapes_match(params, ckpt_params):
if params.shape == ckpt_params.shape:
return ckpt_params
logging.warning('Shape mismatch! Initialized parameter: %s, '
'Pretrained parameter: %s.',
params.shape, ckpt_params.shape)
return params
params = jax.tree_multimap(
use_pretrained_if_shapes_match, params, ckpt_params)
return params, ckpt_state
def _init_d_params(
self, rng: chex.PRNGKey, logits: chex.Array,
batch: data_utils.Batch) -> Tuple[optax.Params, optax.OptState]:
init_net = jax.pmap(self.adversarial_fn.init)
rng = utils.bcast_local_devices(rng)
return init_net(rng, logits, batch)
def _write_images(self, writer, global_step: chex.Array,
images: Mapping[str, chex.Array]):
global_step = np.array(utils.get_first(global_step))
images_to_write = {
k: self._maybe_transpose_images(utils.get_first(v))
for k, v in images.items()}
writer.write_images(global_step, images_to_write)
def _load_data(self,
per_device_batch_size: int,
is_training: bool,
data_kwargs: ml_collections.ConfigDict
) -> Generator[data_utils.Batch, None, None]:
with contextlib.ExitStack() as stack:
if self.config.use_fake_data:
stack.enter_context(tfds.testing.mock_data(num_examples=128))
ds = data_utils.load_dataset(
is_training=is_training,
batch_dims=[jax.local_device_count(), per_device_batch_size],
transpose=self._should_transpose_images,
data_kwargs=data_kwargs)
return ds
def _build_train_input(self) -> Generator[data_utils.Batch, None, None]:
per_device_batch_size = get_per_device_batch_size(
self.config.training.batch_size)
return self._load_data(per_device_batch_size=per_device_batch_size,
is_training=True,
data_kwargs=self.config.data.train_kwargs)
def _update_func(
self,
params: optax.Params,
state: optax.OptState,
opt_state: optax.OptState,
global_step: chex.Array,
batch: data_utils.Batch,
rng: chex.PRNGKey,
old_g_params: Optional[optax.Params] = None,
old_g_state: Optional[optax.OptState] = None
) -> Tuple[Tuple[optax.Params, optax.Params], Tuple[
optax.OptState, optax.OptState], Tuple[optax.OptState, optax.OptState],
data_utils.ScalarDict, data_utils.Batch]:
"""Updates parameters ."""
# Obtain the parameters and discriminators.
(g_params, d_params) = params
(g_state, d_state) = state
(g_opt_state, d_opt_state) = opt_state
################
# Generator.
################
# Compute the loss for the generator.
inputs = self._run_postprocess_fn(rng, batch)
grad_loss_fn = jax.grad(self._loss_fn, has_aux=True)
scaled_grads, (g_scalars, logits, preprocessed_inputs,
g_state) = grad_loss_fn(g_params, g_state, d_params, d_state,
inputs, rng, global_step,
old_g_params=old_g_params,
old_g_state=old_g_state)
# Update the generator.
grads = jax.lax.psum(scaled_grads, axis_name='i')
_, opt_apply = self.optimizer()
updates, g_opt_state = opt_apply(grads, g_opt_state, g_params)
g_params = optax.apply_updates(g_params, updates)
################
# Discriminator.
################
if not self._d_opt_state:
# No discriminator.
scalars = dict(global_step=global_step, **g_scalars)
return ((g_params, d_params), (g_state, d_state),
(g_opt_state, d_opt_state), scalars, preprocessed_inputs)
# Compute the loss for the discriminator.
grad_loss_fn = jax.grad(self._d_loss_fn, has_aux=True)
scaled_grads, (d_scalars, d_state) = grad_loss_fn(d_params, d_state, batch,
logits, rng)
# Update the discriminator.
grads = jax.lax.psum(scaled_grads, axis_name='i')
_, opt_apply = self.optimizer()
updates, d_opt_state = opt_apply(grads, d_opt_state, d_params)
d_params = optax.apply_updates(d_params, updates)
# For logging while training.
scalars = dict(
global_step=global_step,
**g_scalars,
**d_scalars)
return ((g_params, d_params), (g_state, d_state),
(g_opt_state, d_opt_state), scalars, preprocessed_inputs)
def step(self, global_step: chex.Array, rng: chex.PRNGKey, writer,
**unused_kwargs) -> chex.Array:
"""Perform one step of the model."""
batch = next(self._train_input)
batch = self._prepare_train_batch(rng, batch)
params, state, opt_state, scalars, preprocessed_batch = (
self._update_func(
params=(self._params, self._d_params),
state=(self._state, self._d_state),
opt_state=(self._opt_state, self._d_opt_state),
global_step=global_step,
batch=batch,
rng=rng,
old_g_params=self._adapt_params,
old_g_state=self._adapt_state))
(self._params, self._d_params) = params
(self._state, self._d_state) = state
(self._opt_state, self._d_opt_state) = opt_state
if self._train_adapter:
self._adapt_params, self._adapt_state = self._train_adapter.update(
self._params, self._state, utils.get_first(global_step))
images = batch['image']
preprocessed_images = preprocessed_batch['image']
if self.config.training.save_images:
self._write_images(writer, global_step,
{'images': images,
'preprocessed_images': preprocessed_images})
# Just return the tracking metrics on the first device for logging.
return utils.get_first(scalars)
# _
# _____ ____ _| |
# / _ \ \ / / _` | |
# | __/\ V / (_| | |
# \___| \_/ \__,_|_|
#
def _load_eval_data(
self,
per_device_batch_size: int) -> Generator[data_utils.Batch, None, None]:
return self._load_data(per_device_batch_size=per_device_batch_size,
is_training=False,
data_kwargs=self.config.data.test_kwargs)
def _full_eval(self, rng: chex.PRNGKey, scalars: data_utils.ScalarDict,
checkpoint_path: Optional[str] = None
) -> data_utils.ScalarDict:
if checkpoint_path:
ckpt_data = checkpointing.load_model(checkpoint_path)
params, state = ckpt_data['params'], ckpt_data['state']
params = utils.bcast_local_devices(params)
state = utils.bcast_local_devices(state)
else:
params, state = self._params, self._state
# Iterate over all the test sets.
original_subset = self.config.data.test_kwargs.load_kwargs.subset
for test_subset in getattr(self.config.data, 'test_sets', ('test',)):
self.config.data.test_kwargs.load_kwargs.subset = test_subset
test_scalars = jax.device_get(
self._eval_top1_accuracy(params, state, rng, is_final=True))
scalars.update(
{f'{test_subset}_{k}': v for k, v in test_scalars.items()})
self.config.data.test_kwargs.load_kwargs.subset = original_subset
return scalars
def evaluate(self, global_step: chex.Array, rng: chex.PRNGKey, writer,
**unused_args) -> data_utils.ScalarDict:
"""See base class."""
# Need to set these so `on_new_best_model` can do a full eval.
self._writer = writer
self._rng = rng
global_step = np.array(utils.get_first(global_step))
scalars = jax.device_get(
self._eval_top1_accuracy(self._params, self._state, rng))
if FLAGS.config.eval_specific_checkpoint_dir:
scalars = self._full_eval(rng, scalars,
FLAGS.config.eval_specific_checkpoint_dir)
logging.info('[Step %d] Eval scalars: %s', global_step, scalars)
return scalars
def on_new_best_model(self, best_state: ml_collections.ConfigDict):
scalars = self._full_eval(self._rng, {})
if self._writer is not None:
self._writer.write_scalars(best_state.global_step, scalars)
ckpt_data = {}
for self_key, ckpt_key in self.CHECKPOINT_ATTRS.items():
ckpt_data[ckpt_key] = getattr(self, self_key)
checkpoint_path = checkpointing.get_checkpoint_dir(FLAGS.config)
checkpointing.save_model(os.path.join(checkpoint_path, 'best.pkl'),
ckpt_data)
def _eval_top1_accuracy(self, params: optax.Params, state: optax.OptState,
rng: chex.PRNGKey, is_final: bool = False
) -> data_utils.ScalarDict:
"""Evaluates an epoch."""
total_batch_size = self.config.evaluation.batch_size
per_device_batch_size = total_batch_size
eval_data = self._load_eval_data(per_device_batch_size)
# If using an adaptive method.
if getattr(self.config.adapter, 'fn', None):
self.adapt_fn(params, state, rng, is_final_eval=is_final)
self.adaptor.set_up_eval()
# Accuracies for each set of corruptions.
labels = []
predicted_labels = []
features = []
for batch in eval_data:
if self.adaptor is not None:
logging.info('Running adaptation algorithm for evaluation.')
property_label = batch[self.config.data.label_property]
predicted_label, _ = self.adaptor.run(
self.eval_batch, property_label, inputs=batch, rng=rng)
else:
predicted_label, _ = self.eval_batch(params, state, batch, rng)
label = batch[self._label]
feature = batch[self.config.data.label_property]
# Concatenate along the pmapped direction.
labels.append(jnp.concatenate(label))
features.append(jnp.concatenate(feature))
predicted_labels.append(jnp.concatenate(predicted_label))
# And finally concatenate along the first dimension.
labels = jnp.concatenate(labels)
features = jnp.concatenate(features)
predicted_labels = jnp.concatenate(predicted_labels)
# Compute the metrics.
results = {}
for metric in self.config.evaluation.metrics:
logging.info('Evaluating metric %s.', str(metric))
metric_fn = getattr(metrics, metric, None)
results[metric] = metric_fn(labels, features, predicted_labels, None)
# Dump all the results by saving pickled results to disk.
out_dir = checkpointing.get_checkpoint_dir(FLAGS.config)
dataset = self.config.data.test_kwargs.load_kwargs.subset
results_path = os.path.join(out_dir, f'results_{dataset}')
if not tf.io.gfile.exists(results_path):
tf.io.gfile.makedirs(results_path)
# Save numpy arrays.
with tf.io.gfile.GFile(
os.path.join(results_path, 'results.pkl'), 'wb') as f:
# Using protocol 4 as it's the default from Python 3.8 on.
pickle.dump({'all_labels': labels, 'all_features': features,
'all_predictions': predicted_labels}, f, protocol=4)
return results
def _eval_batch(self, params: optax.Params, state: optax.OptState,
inputs: data_utils.Batch,
rng: chex.PRNGKey
) -> Tuple[data_utils.ScalarDict, chex.Array]:
"""Evaluates a batch."""
logits, _ = self.forward.apply(
params, state, rng, inputs, is_training=False)
inputs['one_hot_label'] = hk.one_hot(
inputs[self._label], self.config.data.n_classes)
(_, logits), _ = self.learner_fn.apply(params, state, rng, inputs)
softmax_predictions = jax.nn.softmax(logits, axis=-1)
predicted_label = jnp.argmax(softmax_predictions, axis=-1)
return predicted_label, logits
| distribution_shift_framework-master | distribution_shift_framework/classification/experiment_lib.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config for imagenet experiment."""
import functools
from typing import Any, Callable, List, Mapping, Optional, Tuple
import chex
from distribution_shift_framework.configs import disentanglement_config
from distribution_shift_framework.core import adapt
from distribution_shift_framework.core import adapt_train
from distribution_shift_framework.core import algorithms
from distribution_shift_framework.core import hyper
from distribution_shift_framework.core.datasets import data_utils
from distribution_shift_framework.core.model_zoo import resnet
from distribution_shift_framework.core.pix import postprocessing
import haiku as hk
from jaxline import base_config
import ml_collections
DATASETS = ('dsprites', 'small_norb', 'shapes3d')
LEARNERS = algorithms.__all__
ADAPTERS = ('BNAdapt',)
TRAIN_ADAPTERS = ('JTT',)
POSTPROCESSORS = ('mixup',)
ALGORITHMS = LEARNERS + ADAPTERS + TRAIN_ADAPTERS + POSTPROCESSORS
ConfigAndSweeps = Tuple[ml_collections.ConfigDict, List[hyper.Sweep]]
_EXP = 'config.experiment_kwargs.config'
def parse_options(
options: str,
defaults: Mapping[str, Any],
types: Optional[Mapping[str, Callable[[str], Any]]] = None
) -> Mapping[str, Any]:
"""Parse a "k1=v1,k2=v2" option string."""
if not options:
return defaults
if types is None:
types = {}
else:
types = dict(**types)
for k, v in defaults.items():
if k not in types:
types[k] = type(v)
kwargs = dict(t.split('=', 1) for t in options.split(','))
for k, v in kwargs.items():
if k in types: # Default type is `str`.
kwargs[k] = ((v in ('True', 'true', 'yes')) if types[k] == bool
else types[k](v))
# Only allow options where defaults are specified to avoid typos.
for k in kwargs:
if k not in defaults:
raise ValueError('Unknown option `%s`.' % k)
for k, v in defaults.items():
if k not in kwargs:
kwargs[k] = v
return kwargs
def get_config(options: str = '') -> ml_collections.ConfigDict:
"""Return config object for training.
Args:
options: A list of options that are comma separated with:
key1=value1,key2=value2. The actual key value pairs are the following:
dataset_name -- The name of the dataset.
model -- The model to evaluate.
test_case -- Which of ood or correlated setups to run.
label -- The label we're predicting.
property_label -- Which property is treated as in or out of
distribution (for the ood test_case), is correlated with the label
(for the correlated setup) and is treated as having a low data region
(for the low_data setup).
algorithm -- What algorithm to use for training.
number_of_seeds -- How many seeds to evaluate the models with.
batch_size -- Batch size used for training and evaluation.
training_steps -- How many steps to train for.
pretrained_checkpoint -- Path to a checkpoint for a pretrained model.
overwrite_image_size -- Height and width to resize the images to. 0 means
no resizing.
eval_specific_ckpt -- Path to a checkpoint for a one time evaluation.
wids -- Which wids of the checkpoint to look at.
sweep_index -- Which experiment from the sweep to run.
use_fake_data -- Whether to use fake data for testing.
Returns:
ConfigDict: A dictionary of parameters.
"""
options = parse_options(
options,
defaults={
'dataset_name': 'dsprites',
'model': 'resnet18',
'test_case': 'ood',
'label': 'label_shape',
'property_label': 'label_color',
'algorithm': 'ERM',
'number_of_seeds': 1,
'batch_size': 128,
'training_steps': 100_000,
'pretrained_checkpoint': '',
'overwrite_image_size': 0, # Zero means no resizing.
'eval_specific_ckpt': '',
'wids': '1-1',
'sweep_index': 0,
'use_fake_data': False,
})
assert options['dataset_name'] in DATASETS
assert options['algorithm'] in ALGORITHMS
if options['algorithm'] in LEARNERS:
learner = options['algorithm']
adapter = ''
train_adapter = ''
postprocessor = ''
else:
learner = 'ERM'
if options['algorithm'] in ADAPTERS:
adapter = options['algorithm']
elif options['algorithm'] in TRAIN_ADAPTERS:
train_adapter = options['algorithm']
elif options['algorithm'] in POSTPROCESSORS:
postprocessor = options['algorithm']
config = base_config.get_base_config()
config.random_seed = 0
config.checkpoint_dir = '/tmp'
config.train_checkpoint_all_hosts = False
training_steps = options['training_steps']
config.experiment_kwargs = ml_collections.ConfigDict()
exp = config.experiment_kwargs.config = ml_collections.ConfigDict()
exp.use_fake_data = options['use_fake_data']
exp.enable_double_transpose = False
# Training.
exp.training = ml_collections.ConfigDict()
exp.training.use_gt_images = False
exp.training.save_images = False
exp.training.batch_size = options['batch_size']
exp.training.adversarial_weight = 1.
exp.training.label_noise = 0.0
# Evaluation.
exp.evaluation = ml_collections.ConfigDict()
exp.evaluation.batch_size = options['batch_size']
exp.evaluation.metrics = ['top1_accuracy']
# Optimizer.
exp.optimizer = ml_collections.ConfigDict()
exp.optimizer.name = 'adam'
exp.optimizer.kwargs = dict(learning_rate=0.001)
# Data.
exp.data = ml_collections.ConfigDict()
if data_utils.is_disentanglement_dataset(options['dataset_name']):
exp.data = disentanglement_config.get_renderers(
options['test_case'], dataset_name=options['dataset_name'],
label=options['label'],
property_label=options['property_label'])
data_sweep = disentanglement_config.get_renderer_sweep(
options['test_case'])
else:
dataset_name = options['dataset_name']
raise ValueError(f'Unsupported dataset {dataset_name}')
if exp.use_fake_data:
# Data loaders skip valid and test samples and default values are so high
# that we would need to generate too many fake datapoints.
batch_size = options['batch_size']
if options['dataset_name'] in ('dsprites', 'shapes3d'):
exp.data.train_kwargs.load_kwargs.dataset_kwargs.valid_size = batch_size
exp.data.train_kwargs.load_kwargs.dataset_kwargs.test_size = batch_size
exp.data.test_kwargs.load_kwargs.valid_size = batch_size
exp.data.test_kwargs.load_kwargs.test_size = batch_size
elif options['dataset_name'] == 'small_norb':
exp.data.train_kwargs.load_kwargs.dataset_kwargs.valid_size = batch_size
exp.data.test_kwargs.load_kwargs.valid_size = batch_size
# Model.
model = options['model']
exp.model, model_sweep = globals()[f'get_{model}_config'](
num_classes=exp.data.n_classes, resize_to=options['overwrite_image_size'])
exp.pretrained_checkpoint = options['pretrained_checkpoint']
# Learning algorithm.
exp.training.algorithm, learner_sweep = get_learner(
learner, model, exp.data.n_classes)
# Test time adaptation.
if adapter:
exp.adapter = get_adapter(adapter, exp.data.n_properties)
else:
exp.adapter = ml_collections.ConfigDict()
# Adapt training parameters and state.
if train_adapter:
exp.training.learn_adapt = get_train_adapter(
train_adapter, training_steps=training_steps)
else:
exp.training.learn_adapt = ml_collections.ConfigDict()
# Postprocessing.
if postprocessor:
exp.postprocess = get_postprocessing_step(postprocessor)
else:
exp.postprocess = ml_collections.ConfigDict()
if exp.data.train_kwargs.load_kwargs.get('shuffle_pre_sampling', False):
exp_train_kwargs = 'config.experiment_kwargs.config.data.train_kwargs.'
seeds = list(range(options['number_of_seeds']))
random_seedsweep = hyper.zipit([
hyper.sweep('config.random_seed', seeds),
hyper.sweep(f'{exp_train_kwargs}load_kwargs.shuffle_pre_sample_seed',
seeds)])
else:
random_seedsweep = hyper.sweep('config.random_seed',
list(range(options['number_of_seeds'])))
all_sweeps = hyper.product(
[random_seedsweep] + [data_sweep] + model_sweep + learner_sweep)
dataset_name = options['dataset_name']
config.autoxprof_warmup_steps = 5
config.autoxprof_measure_time_seconds = 50
# Use so get consistency between different models with different speeds.
config.interval_type = 'steps'
config.training_steps = training_steps
config.log_train_data_interval = 1_000
config.log_tensors_interval = 1_000
config.save_checkpoint_interval = 1_000
config.eval_specific_checkpoint_dir = options['eval_specific_ckpt']
if options['eval_specific_ckpt']:
min_wid, max_wid = [int(w) for w in options['wids'].split('-')]
config.eval_only = True
config.one_off_evaluate = True
all_sweeps = hyper.product([hyper.zipit([
hyper.sweep('config.eval_specific_checkpoint_dir',
[options['eval_specific_ckpt'].format(wid=w)
for w in range(min_wid, max_wid+1)]),
all_sweeps])])
else:
config.eval_only = False
config.best_model_eval_metric = 'top1_accuracy'
config.update_from_flattened_dict(all_sweeps[options['sweep_index']],
'config.')
# Prevents accidentally setting keys that aren't recognized (e.g. in tests).
config.lock()
return config
def get_postprocessing_step(postprocessing_name: str
) -> ml_collections.ConfigDict:
"""Config for postprocessing steps."""
postprocess = ml_collections.ConfigDict()
postprocess.fn = getattr(postprocessing, postprocessing_name)
postprocess.kwargs = ml_collections.ConfigDict()
if postprocessing_name == 'mixup':
postprocess.kwargs.alpha = 0.2
postprocess.kwargs.beta = 0.2
return postprocess
def get_train_adapter(adapter_name: str, training_steps: int
) -> ml_collections.ConfigDict:
"""Config for adapting the training parameters."""
adapter = ml_collections.ConfigDict()
adapter.fn = getattr(adapt_train, adapter_name)
adapter.kwargs = ml_collections.ConfigDict()
if adapter_name == 'JTT':
adapter.kwargs.lmbda = 20
adapter.kwargs.num_steps_in_first_iter = training_steps // 2
return adapter
def get_adapter(adapt_name: str, num_properties: int
) -> ml_collections.ConfigDict:
"""Config for how to adapt the model at test time."""
adapter = ml_collections.ConfigDict()
adapter.fn = getattr(adapt, adapt_name)
adapter.kwargs = ml_collections.ConfigDict(dict(n_properties=num_properties))
adapter.num_adaptation_steps = 1_000
return adapter
def get_learner(learner_name: str,
model_name: str,
num_classes: int = 10) -> ConfigAndSweeps:
"""Config for which learning algorithm to use."""
learner = ml_collections.ConfigDict()
learner.fn = getattr(algorithms, learner_name)
learner.kwargs = ml_collections.ConfigDict()
learner_sweep = []
exp_algthm = f'{_EXP}.training.algorithm'
if learner_name == 'IRM':
learner.kwargs.lambda_penalty = 1.
learner_sweep = [
hyper.sweep(f'{exp_algthm}.kwargs.lambda_penalty',
[0.01, 0.1, 1, 10])
]
elif learner_name == 'DANN':
learner.kwargs.mlp_output_sizes = ()
exp = f'{_EXP}.training'
learner_sweep = [
hyper.sweep(f'{exp}.adversarial_weight',
[0.01, 0.1, 1, 10]),
hyper.sweep(f'{exp_algthm}.kwargs.mlp_output_sizes',
[(64, 64)])
]
elif learner_name == 'CORAL':
learner.kwargs.coral_weight = 1.
learner_sweep = [
hyper.sweep(f'{exp_algthm}.kwargs.coral_weight',
[0.01, 0.1, 1, 10])
]
elif learner_name == 'SagNet':
if model_name == 'truncatedresnet18':
learner.kwargs.content_net_kwargs = ml_collections.ConfigDict(dict(
output_sizes=(num_classes,)))
learner.kwargs.style_net_kwargs = ml_collections.ConfigDict(dict(
output_sizes=(num_classes,)))
else:
learner.kwargs.content_net_kwargs = ml_collections.ConfigDict(dict(
output_sizes=(64, 64, num_classes)))
learner.kwargs.style_net_kwargs = ml_collections.ConfigDict(dict(
output_sizes=(64, 64, num_classes)))
return learner, learner_sweep
def _get_resizer(size: Optional[int]) -> Callable[[chex.Array], chex.Array]:
if size is not None and size > 0:
return functools.partial(data_utils.resize, size=(size, size))
return lambda x: x
def get_mlp_config(n_layers: int = 4, n_hidden: int = 256,
num_classes: int = 10, resize_to: Optional[int] = None
) -> ConfigAndSweeps:
"""Returns an MLP config and sweeps."""
resize = _get_resizer(resize_to)
mlp = ml_collections.ConfigDict(dict(
constructor=hk.nets.MLP,
kwargs=dict(output_sizes=[n_hidden] * n_layers + [num_classes]),
preprocess=lambda x: resize(x).reshape((x.shape[0], -1))))
sweep = hyper.sweep(f'{_EXP}.optimizer.kwargs.learning_rate',
[0.01, 0.001, 1e-4])
return mlp, [sweep]
def get_resnet18_config(num_classes: int = 10,
resize_to: Optional[int] = None) -> ConfigAndSweeps:
cnn = ml_collections.ConfigDict(dict(
constructor=hk.nets.ResNet18,
kwargs=dict(num_classes=num_classes),
preprocess=_get_resizer(resize_to)))
sweep = hyper.sweep(f'{_EXP}.optimizer.kwargs.learning_rate',
[0.01, 0.001, 1e-4])
return cnn, [sweep]
def get_resnet50_config(num_classes: int = 10,
resize_to: Optional[int] = None) -> ConfigAndSweeps:
cnn = ml_collections.ConfigDict(dict(
constructor=hk.nets.ResNet50,
kwargs=dict(num_classes=num_classes),
preprocess=_get_resizer(resize_to)))
sweep = hyper.sweep(f'{_EXP}.optimizer.kwargs.learning_rate',
[0.01, 0.001, 1e-4])
return cnn, [sweep]
def get_resnet101_config(num_classes: int = 10,
resize_to: Optional[int] = None) -> ConfigAndSweeps:
cnn = ml_collections.ConfigDict(dict(
constructor=hk.nets.ResNet101,
kwargs=dict(num_classes=num_classes),
preprocess=_get_resizer(resize_to)))
sweep = hyper.sweep(f'{_EXP}.optimizer.kwargs.learning_rate',
[0.01, 0.001, 1e-4])
return cnn, [sweep]
def get_truncatedresnet18_config(
num_classes: int = 10, resize_to: Optional[int] = None) -> ConfigAndSweeps:
"""Config for a truncated ResNet."""
cnn = ml_collections.ConfigDict(dict(
constructor=resnet.ResNet18,
kwargs=dict(num_classes=num_classes),
preprocess=_get_resizer(resize_to)))
sweep = hyper.sweep(f'{_EXP}.optimizer.kwargs.learning_rate',
[0.01, 0.001, 1e-4])
return cnn, [sweep]
| distribution_shift_framework-master | distribution_shift_framework/classification/config.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| distribution_shift_framework-master | distribution_shift_framework/classification/__init__.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run the formalisation pipeline."""
import functools
from absl import app
from absl import flags
from distribution_shift_framework.classification import experiment_lib
from jaxline import platform
if __name__ == '__main__':
flags.mark_flag_as_required('config')
app.run(functools.partial(platform.main, experiment_lib.Experiment))
| distribution_shift_framework-master | distribution_shift_framework/classification/experiment.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for distribution_shift_framework.classification.experiment_lib."""
from absl.testing import absltest
from absl.testing import flagsaver
from absl.testing import parameterized
from distribution_shift_framework.classification import config
from distribution_shift_framework.classification import experiment_lib
import jax
from jaxline import platform
_PREV_JAX_CONFIG = None
def setUpModule():
global _PREV_JAX_CONFIG
_PREV_JAX_CONFIG = jax.config.values.copy()
# Disable jax optimizations to speed up test.
jax.config.update('jax_disable_most_optimizations', True)
def tearDownModule():
# Set config to previous values.
jax.config.values.update(**_PREV_JAX_CONFIG)
class ExperimentLibTest(parameterized.TestCase):
@parameterized.parameters([
# Different algorithms.
dict(algorithm='CORAL', test_case='ood', model='resnet18',
dataset_name='dsprites', label='label_shape',
property_label='label_color', number_of_seeds=1),
dict(algorithm='DANN', test_case='ood', model='resnet18',
dataset_name='dsprites', label='label_shape',
property_label='label_color', number_of_seeds=1),
dict(algorithm='ERM', test_case='ood', model='resnet18',
dataset_name='dsprites', label='label_shape',
property_label='label_color', number_of_seeds=1),
dict(algorithm='IRM', test_case='ood', model='resnet18',
dataset_name='dsprites', label='label_shape',
property_label='label_color', number_of_seeds=1),
dict(algorithm='SagNet', test_case='ood', model='resnet18',
dataset_name='dsprites', label='label_shape',
property_label='label_color', number_of_seeds=1),
# Different datasets.
dict(algorithm='ERM', test_case='ood', model='resnet18',
dataset_name='small_norb', label='label_category',
property_label='label_azimuth', number_of_seeds=1),
dict(algorithm='ERM', test_case='ood', model='resnet18',
dataset_name='shapes3d', label='label_shape',
property_label='label_object_hue', number_of_seeds=1),
# Different test cases.
dict(algorithm='ERM', test_case='lowdata', model='resnet18',
dataset_name='shapes3d', label='label_shape',
property_label='label_object_hue', number_of_seeds=1),
dict(algorithm='ERM', test_case='correlated.lowdata', model='resnet18',
dataset_name='shapes3d', label='label_shape',
property_label='label_object_hue', number_of_seeds=1),
dict(algorithm='ERM', test_case='lowdata.noise', model='resnet18',
dataset_name='shapes3d', label='label_shape',
property_label='label_object_hue', number_of_seeds=1),
dict(algorithm='ERM', test_case='lowdata.fixeddata', model='resnet18',
dataset_name='shapes3d', label='label_shape',
property_label='label_object_hue', number_of_seeds=1),
])
def test_train(self, **kwargs):
kwargs['training_steps'] = 3
kwargs['use_fake_data'] = True
kwargs['batch_size'] = 8
options = ','.join([f'{k}={v}' for k, v in kwargs.items()])
cfg = config.get_config(options)
with flagsaver.flagsaver(config=cfg, jaxline_mode='train'):
platform.main(experiment_lib.Experiment, [])
if __name__ == '__main__':
absltest.main()
| distribution_shift_framework-master | distribution_shift_framework/classification/experiment_lib_test.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to create and combine hyper parameter sweeps."""
import functools
import itertools
from typing import Any, Dict, Iterable, List, Sequence
# A sweep is a list of parameter mappings that defines a set of experiments.
Sweep = List[Dict[str, Any]]
def sweep(parameter_name: str, values: Iterable[Any]) -> Sweep:
"""Creates a sweep from a list of values for a parameter."""
return [{parameter_name: value} for value in values]
def product(sweeps: Sequence[Sweep]) -> Sweep:
"""Builds a sweep from the cartesian product of a list of sweeps."""
return [functools.reduce(_combine_parameter_dicts, param_dicts, {})
for param_dicts in itertools.product(*sweeps)]
def zipit(sweeps: Sequence[Sweep]) -> Sweep:
"""Builds a sweep from zipping a list of sweeps."""
return [functools.reduce(_combine_parameter_dicts, param_dicts, {})
for param_dicts in zip(*sweeps)]
def _combine_parameter_dicts(x: Dict[str, Any], y: Dict[str, Any]
) -> Dict[str, Any]:
if x.keys() & y.keys():
raise ValueError('Cannot combine sweeps that set the same parameters. '
f'Keys in x: {x.keys()}, keys in y: {y.keys}, '
f'overlap: {x.keys() & y.keys()}')
return {**x, **y}
| distribution_shift_framework-master | distribution_shift_framework/core/hyper.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for distribution_shift_framework.core.hyper."""
from absl.testing import absltest
from absl.testing import parameterized
from distribution_shift_framework.core import hyper
class HyperTest(parameterized.TestCase):
@parameterized.parameters([
dict(parameter_name='a', values=[1, 2, 3],
expected_sweep=[{'a': 1}, {'a': 2}, {'a': 3}]),
dict(parameter_name='b', values=[.1, .2, .3],
expected_sweep=[{'b': .1}, {'b': .2}, {'b': .3}]),
dict(parameter_name='c', values=[True, False],
expected_sweep=[{'c': True}, {'c': False}]),
dict(parameter_name='d', values=['one', 'two', 'three'],
expected_sweep=[{'d': 'one'}, {'d': 'two'}, {'d': 'three'}]),
dict(parameter_name='e', values=[1, 0.5, True, 'string'],
expected_sweep=[{'e': 1}, {'e': 0.5}, {'e': True}, {'e': 'string'}]),
dict(parameter_name='f', values=[],
expected_sweep=[]),
])
def test_sweep(self, parameter_name, values, expected_sweep):
self.assertEqual(expected_sweep, hyper.sweep(parameter_name, values))
@parameterized.parameters([
dict(sweeps=[],
expected_sweep=[{}]),
dict(sweeps=[hyper.sweep('param1', [1, 2, 3, 4, 5, 6])],
expected_sweep=[
{'param1': 1}, {'param1': 2}, {'param1': 3},
{'param1': 4}, {'param1': 5}, {'param1': 6},
]),
dict(sweeps=[hyper.sweep('param1', [1, 2, 3]),
hyper.sweep('param2', [4, 5, 6])],
expected_sweep=[
{'param1': 1, 'param2': 4},
{'param1': 1, 'param2': 5},
{'param1': 1, 'param2': 6},
{'param1': 2, 'param2': 4},
{'param1': 2, 'param2': 5},
{'param1': 2, 'param2': 6},
{'param1': 3, 'param2': 4},
{'param1': 3, 'param2': 5},
{'param1': 3, 'param2': 6},
]),
dict(sweeps=[hyper.sweep('param1', [1, 2]),
hyper.sweep('param2', [3, 4]),
hyper.sweep('param3', [5, 6])],
expected_sweep=[
{'param1': 1, 'param2': 3, 'param3': 5},
{'param1': 1, 'param2': 3, 'param3': 6},
{'param1': 1, 'param2': 4, 'param3': 5},
{'param1': 1, 'param2': 4, 'param3': 6},
{'param1': 2, 'param2': 3, 'param3': 5},
{'param1': 2, 'param2': 3, 'param3': 6},
{'param1': 2, 'param2': 4, 'param3': 5},
{'param1': 2, 'param2': 4, 'param3': 6},
]),
dict(sweeps=[hyper.sweep('param1', [1, 2., 'Three']),
hyper.sweep('param2', [True, 'Two', 3.0])],
expected_sweep=[
{'param1': 1, 'param2': True},
{'param1': 1, 'param2': 'Two'},
{'param1': 1, 'param2': 3.0},
{'param1': 2., 'param2': True},
{'param1': 2., 'param2': 'Two'},
{'param1': 2., 'param2': 3.0},
{'param1': 'Three', 'param2': True},
{'param1': 'Three', 'param2': 'Two'},
{'param1': 'Three', 'param2': 3.0},
]),
])
def test_product(self, sweeps, expected_sweep):
self.assertEqual(expected_sweep, hyper.product(sweeps))
def test_product_raises_valueerror_for_same_name(self):
sweep1 = hyper.sweep('param1', [1, 2, 3])
sweep2 = hyper.sweep('param2', [4, 5, 6])
sweep3 = hyper.sweep('param1', [7, 8, 9])
with self.assertRaises(ValueError):
hyper.product([sweep1, sweep2, sweep3])
@parameterized.parameters([
dict(sweeps=[],
expected_sweep=[]),
dict(sweeps=[hyper.sweep('param1', [1, 2, 3, 4, 5, 6])],
expected_sweep=[
{'param1': 1}, {'param1': 2}, {'param1': 3},
{'param1': 4}, {'param1': 5}, {'param1': 6},
]),
dict(sweeps=[hyper.sweep('param1', [1, 2, 3]),
hyper.sweep('param2', [4, 5, 6])],
expected_sweep=[
{'param1': 1, 'param2': 4},
{'param1': 2, 'param2': 5},
{'param1': 3, 'param2': 6},
]),
dict(sweeps=[hyper.sweep('param1', [1, 2, 3]),
hyper.sweep('param2', [4, 5, 6]),
hyper.sweep('param3', [7, 8, 9])],
expected_sweep=[
{'param1': 1, 'param2': 4, 'param3': 7},
{'param1': 2, 'param2': 5, 'param3': 8},
{'param1': 3, 'param2': 6, 'param3': 9},
]),
dict(sweeps=[hyper.sweep('param1', [1, 2., 'Three']),
hyper.sweep('param2', [True, 'Two', 3.0])],
expected_sweep=[
{'param1': 1, 'param2': True},
{'param1': 2., 'param2': 'Two'},
{'param1': 'Three', 'param2': 3.0},
]),
dict(sweeps=[hyper.sweep('param1', [1, 2, 3]),
hyper.sweep('param2', [4, 5, 6, 7])],
expected_sweep=[
{'param1': 1, 'param2': 4},
{'param1': 2, 'param2': 5},
{'param1': 3, 'param2': 6},
]),
])
def test_zipit(self, sweeps, expected_sweep):
self.assertEqual(expected_sweep, hyper.zipit(sweeps))
def test_zipit_raises_valueerror_for_same_name(self):
sweep1 = hyper.sweep('param1', [1, 2, 3])
sweep2 = hyper.sweep('param2', [4, 5, 6])
sweep3 = hyper.sweep('param1', [7, 8, 9])
with self.assertRaises(ValueError):
hyper.zipit([sweep1, sweep2, sweep3])
if __name__ == '__main__':
absltest.main()
| distribution_shift_framework-master | distribution_shift_framework/core/hyper_test.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| distribution_shift_framework-master | distribution_shift_framework/core/__init__.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for distribution_shift_framework.core.checkpointing."""
import os
from absl.testing import absltest
from absl.testing import parameterized
from distribution_shift_framework.core import checkpointing
import jax
import ml_collections
import numpy as np
import numpy.testing as npt
import tensorflow as tf
class CheckpointingTest(parameterized.TestCase):
@parameterized.parameters([
dict(data={}),
dict(data={'params': []}),
dict(data={'state': None}),
dict(data={'params': 3, 'stuff': 5.3, 'something': 'anything'}),
dict(data={'params': {'stuff': 5.3, 'something': 'anything'}}),
dict(data={'params': {'stuff': {'something': 'anything'}}}),
dict(data={'params': {'stuff': {'something': np.random.rand(4, 3, 2)}}}),
])
def test_load_and_save_model(self, data):
ckpt_file = os.path.join(self.create_tempdir(), 'ckpt.pkl')
checkpointing.save_model(ckpt_file, data)
loaded_data = checkpointing.load_model(ckpt_file)
loaded_leaves, loaded_treedef = jax.tree_flatten(loaded_data)
leaves, treedef = jax.tree_flatten(data)
for leaf, loaded_leaf in zip(leaves, loaded_leaves):
npt.assert_array_equal(leaf, loaded_leaf)
self.assertEqual(treedef, loaded_treedef)
def test_empty_checkpoint_dir(self):
config = ml_collections.ConfigDict()
config.checkpoint_dir = None
self.assertIsNone(checkpointing.get_checkpoint_dir(config))
def test_get_checkpoint_dir(self):
config = ml_collections.ConfigDict()
temp_dir = self.create_tempdir()
config.checkpoint_dir = os.path.join(temp_dir, 'my_exp')
self.assertFalse(tf.io.gfile.exists(config.checkpoint_dir))
config.host_subdir = 'prefix_{host_id}_postfix'
path = checkpointing.get_checkpoint_dir(config)
self.assertEqual(os.path.join(temp_dir, 'my_exp', 'prefix_0_postfix'), path)
self.assertTrue(tf.io.gfile.exists(path))
if __name__ == '__main__':
absltest.main()
| distribution_shift_framework-master | distribution_shift_framework/core/checkpointing_test.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adaptation algorithms for modifying model parameters."""
import abc
from typing import Callable, Sequence
from absl import logging
import chex
from distribution_shift_framework.core.datasets import data_utils
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
def _broadcast(tensor1, tensor2):
num_ones = len(tensor1.shape) - len(tensor2.shape)
return tensor2.reshape(tensor2.shape + (1,) * num_ones)
def _bt_mult(tensor1, tensor2):
tensor2 = _broadcast(tensor1, tensor2)
return tensor1 * tensor2
def _get_mean(tensor):
if len(tensor.shape) == 1:
return jnp.mean(tensor, keepdims=True)
else:
return jnp.mean(tensor, axis=(0, 1), keepdims=True)
def _split_and_reshape(tree1, tree2):
"""Resize tree1 look like tree2 and return the resized tree and the modulo."""
tree1_reshaped = jax.tree_map(
lambda a, b: a[:np.prod(b.shape[0:2])].reshape(b.shape), tree1, tree2)
tree1_modulo = jax.tree_map(lambda a, b: a[np.prod(b.shape[0:2]):], tree1,
tree2)
return tree1_reshaped, tree1_modulo
class Adapt(abc.ABC):
"""Class to encapsulate an adaptation framework."""
@abc.abstractmethod
def __init__(self, init_params: optax.Params, init_state: optax.OptState,
forward: Callable[..., chex.Array]):
"""Initializes the adaptation algorithm.
This operates as follows. Given a number of examples, the model can update
the parameters as it sees fit. Then, the updated parameters are run on an
unseen test set.
Args:
init_params: The original parameters of the model.
init_state: The original state of the model.
forward: The forward call to the model.
"""
@abc.abstractmethod
def update(self, inputs: data_utils.Batch, property_label: chex.Array,
rng: chex.PRNGKey, **kwargs):
"""Updates the parameters of the adaptation algorithm.
Args:
inputs: The batch to be input to the model.
property_label: The properties of the image.
rng: The random key.
**kwargs: Keyword arguments specific to the forward function.
"""
@abc.abstractmethod
def run(self, fn: Callable[..., chex.Array], property_label: chex.Array,
**fn_kwargs):
"""Runs the adaptation algorithm on a given function.
Args:
fn: The function we wish to apply the adapted parameters to.
property_label: The property labels of the input values.
**fn_kwargs: Additional kwargs to be input to the function fn.
Returns:
The result of fn using the adapted parameters according to the
property_label value.
"""
class BNAdapt(Adapt):
"""Implements batch norm adaptation for a set of properties.
Given a set of properties, and initial parameters/state, the batch
normalization statistics are updated for each property value.
"""
def __init__(self,
init_params: optax.Params,
init_state: optax.OptState,
forward: Callable[..., chex.Array],
n_properties: int,
n: int = 10,
N: int = 100):
"""See parent."""
super().__init__(
init_params=init_params, init_state=init_state, forward=forward)
self.init_params = init_params
self.init_state = init_state
# Set the init state to 0. This will mean we always take the local stats.
self.empty_state = self._reset_state(self.init_state)
self.n_properties = n_properties
self.forward_fn = forward
self.adapted_state = {n: None for n in range(n_properties)}
self.interpolated_states = None
# Set up parameters that control the amount of adaptation.
self.w_new = n
self.w_old = N
# Set up the cached dataset values.
self._cached_dataset = [None] * self.n_properties
def _reset_state(self, old_state, keys=('average', 'hidden', 'counter')):
"""Set the average of the BN parameters to 0."""
state = hk.data_structures.to_mutable_dict(old_state)
for k in state.keys():
if 'batchnorm' in k and 'ema' in k:
logging.info('Resetting %s in BNAdapt.', k)
for state_key in keys:
state[k][state_key] = jnp.zeros_like(state[k][state_key])
state = hk.data_structures.to_haiku_dict(state)
return state
def _update_state(self, old_state, new_state, sz):
"""Update the state using the old and new running state."""
if old_state is None:
old_state = self._reset_state(self.init_state)
new_state = hk.data_structures.to_mutable_dict(new_state)
for k in new_state.keys():
if 'batchnorm' in k and 'ema' in k:
new_state_k = new_state[k]['average']
old_counter = _broadcast(old_state[k]['average'],
old_state[k]['counter'])
new_state_k = new_state_k * sz
old_state_k = old_state[k]['average'] * old_counter
counter = jnp.maximum(old_counter + sz, 1)
new_state[k]['average'] = (new_state_k + old_state_k) / counter
new_state[k]['counter'] = counter.squeeze()
new_state = hk.data_structures.to_haiku_dict(new_state)
return new_state
def _interpolate_state(self, old_state, new_state):
"""Update the state using the old and new running state."""
if new_state is None:
return old_state
new_state = hk.data_structures.to_mutable_dict(new_state)
new_ratio = self.w_new / (self.w_new + self.w_old)
old_ratio = self.w_old / (self.w_new + self.w_old)
for k in new_state.keys():
if 'batchnorm' in k and 'ema' in k:
new_state[k]['average'] = (
new_state[k]['average'] * new_ratio +
old_state[k]['average'] * old_ratio)
new_state = hk.data_structures.to_haiku_dict(new_state)
return new_state
def update(self, inputs: data_utils.Batch, property_label: chex.Array,
rng: chex.PRNGKey, **kwargs):
"""See parent."""
# First, update cached data.
for n in range(0, self.n_properties):
mask = property_label == n
masked_batch = jax.tree_map(lambda a: a[mask], inputs) # pylint: disable=cell-var-from-loop
if self._cached_dataset[n] is None:
self._cached_dataset[n] = masked_batch
else:
self._cached_dataset[n] = jax.tree_map(lambda *a: jnp.concatenate(a),
self._cached_dataset[n],
masked_batch)
# Then, if there are enough samples of a property, update the BN stats.
for n in range(0, self.n_properties):
# Update the adapted states with the output of the property labels.
if (self._cached_dataset[n]['image'].shape[0] < np.prod(
inputs['image'].shape[0:2])):
continue
# There are enough samples to do a forward pass.
batch, mod_batch = _split_and_reshape(self._cached_dataset[n], inputs)
_, state = self.forward_fn(self.init_params, self.empty_state, rng, batch,
**kwargs)
# Take the average over the cross replicas.
state = jax.tree_map(_get_mean, state)
self._update_state(
self.adapted_state[n], state, sz=np.prod(batch['image'].shape[:2]))
self._cached_dataset[n] = mod_batch
def set_up_eval(self):
self.interpolated_states = [
self._interpolate_state(
new_state=self.adapted_state[n], old_state=self.init_state)
for n in range(self.n_properties)
]
def run(self, fn: Callable[..., Sequence[chex.Array]],
property_label: chex.Array, **fn_kwargs):
"""See parent."""
# Get the results for the initial parameters and state.
result = fn(self.init_params, self.init_state, **fn_kwargs)
# Compute the results for each set of properties.
for n in range(0, self.n_properties):
mask = property_label == n
if mask.sum() == 0:
continue
# And update the result.
result_prop = fn(self.init_params, self.interpolated_states[n],
**fn_kwargs)
result = [
_bt_mult(r, (1 - mask)) + _bt_mult(r_prop, mask)
for r, r_prop in zip(result, result_prop)
]
return result
| distribution_shift_framework-master | distribution_shift_framework/core/adapt.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Just train twice algorithm."""
import abc
from typing import Callable, Tuple
import chex
from distribution_shift_framework.core.datasets import data_utils
import jax.numpy as jnp
import optax
Learner = Tuple[Tuple[data_utils.ScalarDict, chex.Array], optax.OptState]
LearnerFN = Callable[..., Learner]
class Adapt(abc.ABC):
"""Encasuplates adapting parameters and state with auxiliary information.
Given some initial set of parameters and the loss to be optimized, this
set of classes is free to update the underlying parameters via adaptation
based on difficulty of samples (e.g. JTT) or via EWA.
"""
@abc.abstractmethod
def update(self, params: optax.Params, state: optax.OptState,
global_step: chex.Array):
"""Updates and returns the new parameters and state.
Args:
params: The parameters returned at this step.
state: The state returned at this step.
global_step: The training step.
Returns:
The updated params and state.
"""
@abc.abstractmethod
def __call__(self, fn: LearnerFN, params: optax.Params, state: optax.OptState,
global_step: chex.Array, inputs: data_utils.Batch,
rng: chex.PRNGKey) -> Tuple[data_utils.ScalarDict, chex.Array]:
"""Adapts the stored parameters according to the given information.
Args:
fn: The loss function.
params: The parameters of the model at this step.
state: The state of the model at this step.
global_step: The step in the training pipeline.
inputs: The inputs to the loss function.
rng: The random key
Returns:
The scalars and logits which have been appropriately adapted.
"""
class JTT(Adapt):
"""Implementation of JTT algorithm."""
def __init__(self, lmbda: float, num_steps_in_first_iter: int):
"""Implementation of JTT.
This algorithm first trains for some number of steps on the full training
set. After this first stage, the parameters at the end of this stage are
used to select the most difficult samples (those that are misclassified)
and penalize the loss more heavily for these examples.
Args:
lmbda: How much to upsample the misclassified examples.
num_steps_in_first_iter: How long to train on full dataset before
computing the error set and reweighting misclassified samples.
"""
super().__init__()
self.lmbda = lmbda
self.num_steps_in_first_iter = num_steps_in_first_iter
self.init_params = None
self.init_state = None
def update(self, params: optax.Params, state: optax.OptState,
global_step: chex.Array):
"""See parent."""
if global_step < self.num_steps_in_first_iter:
self.init_params = params
self.init_state = state
return params, state
return self.init_params, self.init_state
def set(self, params: optax.Params, state: optax.OptState):
self.init_params = params
self.init_state = state
def __call__(
self, fn: LearnerFN, params: optax.Params, state: optax.OptState,
old_params: optax.Params, old_state: optax.OptState,
global_step: chex.Array, inputs: data_utils.Batch,
rng: chex.PRNGKey) -> Learner:
"""See parent."""
# Get the correct predictions with the params from the 1st training stage.
(scalars, logits), g_state = fn(old_params, old_state, rng, inputs)
predicted_label = jnp.argmax(logits, axis=-1)
correct = jnp.equal(predicted_label, inputs['label']).astype(jnp.float32)
# And now use this to reweight the current loss.
(scalars, logits), g_state = fn(params, state, rng, inputs)
new_loss = ((1 - correct) * scalars['loss'] * self.lmbda +
correct * scalars['loss'])
# And return the correct loss for the stage of training.
in_first_stage = global_step < self.num_steps_in_first_iter
scalars['1stiter_loss'] = scalars['loss'].mean()
scalars['loss'] = (scalars['loss'] * in_first_stage + new_loss *
(1 - in_first_stage)).mean()
return (scalars, logits), g_state
| distribution_shift_framework-master | distribution_shift_framework/core/adapt_train.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Checkpointing code.
"""
import os
import pickle
from typing import Mapping, Optional
import jax
import ml_collections
import optax
import tensorflow as tf
def load_model(checkpoint_path: str) -> Mapping[str, optax.Params]:
with tf.io.gfile.GFile(checkpoint_path, 'rb') as f:
return pickle.load(f)
def save_model(checkpoint_path: str,
ckpt_dict: Mapping[str, optax.Params]):
with tf.io.gfile.GFile(checkpoint_path, 'wb') as f:
# Using protocol 4 as it's the default from Python 3.8 on.
pickle.dump(ckpt_dict, f, protocol=4)
def get_checkpoint_dir(config: ml_collections.ConfigDict) -> Optional[str]:
"""Constructs the checkpoint directory from the config."""
if config.checkpoint_dir is None:
return None
path = os.path.join(config.checkpoint_dir,
config.host_subdir.format(host_id=jax.process_index()))
if not tf.io.gfile.exists(path):
tf.io.gfile.makedirs(path)
return path
| distribution_shift_framework-master | distribution_shift_framework/core/checkpointing.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""List of standard evaluation metrics."""
from typing import Dict, Sequence
import chex
import jax
import jax.numpy as jnp
import numpy as np
import scipy.stats as stats
import sklearn
import sklearn.metrics
def pearson(predictions: chex.Array, labels: chex.Array) -> chex.Scalar:
"""Computes the Pearson correlation coefficient.
Assumes all inputs are numpy arrays.
Args:
predictions: The predicted class labels.
labels: The true class labels.
Returns:
cc: The predicted Pearson correlation coefficient.
"""
cc = stats.pearsonr(predictions, labels)[0]
return cc
def f1_score(average: chex.Array,
predictions: chex.Array,
labels: chex.Array) -> chex.Scalar:
"""Computes the F1 score.
Assumes all inputs are numpy arrays.
Args:
average: How to accumulate the f1 score (macro or weighted).
predictions: The predicted class labels.
labels: The true class labels.
Returns:
f1: The predicted f1 score.
"""
f1 = sklearn.metrics.f1_score(
predictions, labels, average=average, labels=np.unique(labels))
return f1
def recall_score(average: chex.Array,
predictions: chex.Array,
labels: chex.Array) -> chex.Scalar:
"""Computes the recall score.
Assumes all inputs are numpy arrays.
Args:
average: How to accumulate the recall score (macro or weighted).
predictions: The predicted class labels.
labels: The true class labels.
Returns:
recall: The predicted recall.
"""
recall = sklearn.metrics.recall_score(
predictions, labels, average=average, labels=np.unique(labels))
return recall
def top_k_accuracy(logits: chex.Array,
labels: chex.Array,
k: int) -> chex.Scalar:
"""Compute top_k_accuracy.
Args:
logits: The network predictions.
labels: The true class labels.
k: Accuracy at what k.
Returns:
top_k_accuracy: The top k accuracy.
"""
chex.assert_equal_shape_prefix([logits, labels], 1)
chex.assert_rank(logits, 2) # [bs, k]
chex.assert_rank(labels, 1) # [bs]
_, top_ks = jax.vmap(lambda x: jax.lax.top_k(x, k=k))(logits)
return jnp.mean(jnp.sum(top_ks == labels[:, None], axis=-1))
def compute_all_metrics(predictions: chex.Array, labels: chex.Array,
metrics: Sequence[str]) -> Dict[str, chex.Scalar]:
"""Computes a set of metrics given the predictions and labels.
Args:
predictions: A tensor of shape (N, *): the predicted values.
labels: A tensor of shape (N, *): the ground truth values.
metrics: A sequence of strings describing the metrics to be evaluated.
This can be one of 'pearson' (to compute the pearson correlation
coefficient), 'f1_{average}', 'recall_{average}'. For f1 and
recall the value {average} is defined in the numpy api:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html
Returns:
scalars: A dict containing (metric name, score) items with the metric name
and associated score as a float value.
"""
scalars = {}
for metric in metrics:
if metric == 'pearson':
scalars['pearson'] = pearson(predictions, labels)
elif 'f1' in metric:
scalars[metric] = f1_score(metric.split('_')[1], predictions, labels)
elif 'recall' in metric:
scalars[metric] = recall_score(metric.split('_')[1], predictions, labels)
return scalars
def top1_accuracy(labels, features, predictions, latents):
del features
del latents
return np.equal(predictions, labels).mean()
| distribution_shift_framework-master | distribution_shift_framework/core/metrics/metrics.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| distribution_shift_framework-master | distribution_shift_framework/core/metrics/__init__.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| distribution_shift_framework-master | distribution_shift_framework/core/datasets/__init__.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loader and preprocessing functions for the datasets."""
from typing import Optional, Sequence
import chex
from distribution_shift_framework.core.datasets import data_utils
import jax
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
def shapes3d_normalize(image: chex.Array) -> chex.Array:
return (image - .5) * 2
def shapes3d_preprocess(
mode: str = 'train'
) -> data_utils.TFPreprocessFn:
del mode
def _preprocess_fn(example):
example['image'] = tf.image.convert_image_dtype(
example['image'], dtype=tf.float32)
example['label'] = example['label_shape']
return example
return _preprocess_fn
def unbatched_load_shapes3d(subset: str = 'train',
valid_size: int = 10000,
test_size: int = 10000) -> data_utils.Dataset:
"""Loads the 3D Shapes dataset without batching."""
if subset == 'train':
ds = tfds.load(name='shapes3d', split=tfds.Split.TRAIN
).skip(valid_size + test_size)
elif subset == 'valid':
ds = tfds.load(name='shapes3d', split=tfds.Split.TRAIN
).skip(test_size).take(valid_size)
elif subset == 'train_and_valid':
ds = tfds.load(name='shapes3d', split=tfds.Split.TRAIN).skip(test_size)
elif subset == 'test':
ds = tfds.load(name='shapes3d', split=tfds.Split.TRAIN).take(test_size)
else:
raise ValueError('Unknown subset: "{}"'.format(subset))
return ds
def load_shapes3d(batch_sizes: Sequence[int],
subset: str = 'train',
is_training: bool = True,
num_samples: Optional[int] = None,
preprocess_fn: Optional[data_utils.PreprocessFnGen] = None,
transpose: bool = False,
valid_size: int = 10000,
test_size: int = 10000,
drop_remainder: bool = True,
local_cache: bool = True) -> data_utils.Dataset:
"""Loads the 3D Shapes dataset.
The 3D shapes dataset is available at https://github.com/deepmind/3d-shapes.
It consists of 4 different shapes which vary along 5 different axes:
- Floor hue: 10 colors with varying red, orange, yellow, green, blue
- Wall hue: 10 colors with varying red, orange, yellow, green, blue
- Object hue: 10 colors with varying red, orange, yellow, green, blue
- Scale: How large the object is.
- Shape: 4 values -- (cube, sphere, cylinder, and oblong).
- Orientation: Rotates the object around the vertical axis.
Args:
batch_sizes: Specifies how to batch examples. I.e., if batch_sizes = [8, 4]
then output images will have shapes (8, 4, height, width, 3).
subset: Specifies which subset (train, valid or train_and_valid) to use.
is_training: Whether to infinitely repeat and shuffle examples (`True`) or
not (`False`).
num_samples: The number of samples to crop each individual dataset variant
from the start, or `None` to use the full dataset.
preprocess_fn: Function mapped onto each example for pre-processing.
transpose: Whether to permute image dimensions NHWC -> HWCN to speed up
performance on TPUs.
valid_size: Size of the validation set to take from the training set.
test_size: Size of the validation set to take from the training set.
drop_remainder: Whether to drop the last batch(es) if they would not match
the shapes specified by `batch_sizes`.
local_cache: Whether to locally cache the dataset.
Returns:
ds: Fully configured dataset ready for training/evaluation.
"""
if preprocess_fn is None:
preprocess_fn = shapes3d_preprocess
ds = unbatched_load_shapes3d(subset=subset, valid_size=valid_size,
test_size=test_size)
total_batch_size = np.prod(batch_sizes)
if subset == 'valid' and valid_size < total_batch_size:
ds = ds.repeat().take(total_batch_size)
ds = batch_and_shuffle(ds, batch_sizes,
is_training=is_training,
transpose=transpose,
num_samples=num_samples,
preprocess_fn=preprocess_fn,
drop_remainder=drop_remainder,
local_cache=local_cache)
return ds
def small_norb_normalize(image: chex.Array) -> chex.Array:
return (image - .5) * 2
def small_norb_preprocess(
mode: str = 'train'
) -> data_utils.TFPreprocessFn:
del mode
def _preprocess_fn(example):
example['image'] = tf.image.convert_image_dtype(
example['image'], dtype=tf.float32)
example['label'] = example['label_category']
return example
return _preprocess_fn
def unbatched_load_small_norb(subset: str = 'train',
valid_size: int = 10000) -> data_utils.Dataset:
"""Load the small norb dataset."""
if subset == 'train':
ds = tfds.load(name='smallnorb', split=tfds.Split.TRAIN).skip(valid_size)
elif subset == 'valid':
ds = tfds.load(name='smallnorb', split=tfds.Split.TRAIN).take(valid_size)
elif subset == 'train_and_valid':
ds = tfds.load(name='smallnorb', split=tfds.Split.TRAIN)
elif subset == 'test':
ds = tfds.load(name='smallnorb', split=tfds.Split.TEST)
else:
raise ValueError('Unknown subset: "{}"'.format(subset))
return ds
def load_small_norb(batch_sizes: Sequence[int],
subset: str = 'train',
is_training: bool = True,
num_samples: Optional[int] = None,
preprocess_fn: Optional[data_utils.PreprocessFnGen] = None,
transpose: bool = False,
valid_size: int = 1000,
drop_remainder: bool = True,
local_cache: bool = True) -> data_utils.Dataset:
"""Loads the small norb dataset.
The norb dataset is available at:
https://cs.nyu.edu/~ylclab/data/norb-v1.0-small/.
It consists of 5 categories (Animals, People, Airplanes, Trucks, and Cars).
These categories have 5 instances (different animals, airplanes, or types of
cars).
They vary by (which are consistent across categories and instances):
1. Elevation
2. Azimuth
3. Lighting
Args:
batch_sizes: Specifies how to batch examples. I.e., if batch_sizes = [8, 4]
then output images will have shapes (8, 4, height, width, 3).
subset: Specifies the subset (train, valid, test or train_and_valid) to use.
is_training: Whether to infinitely repeat and shuffle examples (`True`) or
not (`False`).
num_samples: The number of samples to crop each individual dataset variant
from the start, or `None` to use the full dataset.
preprocess_fn: Function mapped onto each example for pre-processing.
transpose: Whether to permute image dimensions NHWC -> HWCN to speed up
performance on TPUs.
valid_size: The size of the validation set.
drop_remainder: Whether to drop the last batch(es) if they would not match
the shapes specified by `batch_sizes`.
local_cache: Whether to locally cache the dataset.
Returns:
ds: Fully configured dataset ready for training/evaluation.
"""
if preprocess_fn is None:
preprocess_fn = small_norb_preprocess
ds = unbatched_load_small_norb(subset=subset, valid_size=valid_size)
total_batch_size = np.prod(batch_sizes)
if subset == 'valid' and valid_size < total_batch_size:
ds = ds.repeat().take(total_batch_size)
ds = batch_and_shuffle(ds, batch_sizes,
is_training=is_training,
transpose=transpose,
num_samples=num_samples,
preprocess_fn=preprocess_fn,
drop_remainder=drop_remainder,
local_cache=local_cache)
return ds
def dsprites_normalize(image: chex.Array) -> chex.Array:
return (image - .5) * 2
def dsprites_preprocess(
mode: str = 'train'
) -> data_utils.TFPreprocessFn:
del mode
def _preprocess_fn(example):
example['image'] = tf.image.convert_image_dtype(
example['image'], dtype=tf.float32) * 255.
example['label'] = example['label_shape']
return example
return _preprocess_fn
def unbatched_load_dsprites(subset: str = 'train',
valid_size: int = 10000,
test_size: int = 10000) -> data_utils.Dataset:
"""Loads the dsprites dataset without batching and prefetching."""
if subset == 'train':
ds = tfds.load(name='dsprites',
split=tfds.Split.TRAIN).skip(valid_size + test_size)
elif subset == 'valid':
ds = tfds.load(name='dsprites',
split=tfds.Split.TRAIN).skip(test_size).take(valid_size)
elif subset == 'train_and_valid':
ds = tfds.load(name='dsprites', split=tfds.Split.TRAIN).skip(test_size)
elif subset == 'test':
ds = tfds.load(name='dsprites', split=tfds.Split.TRAIN).take(test_size)
else:
raise ValueError('Unknown subset: "{}"'.format(subset))
return ds
def load_dsprites(batch_sizes: Sequence[int],
subset: str = 'train',
is_training: bool = True,
num_samples: Optional[int] = None,
preprocess_fn: Optional[data_utils.PreprocessFnGen] = None,
transpose: bool = False,
valid_size: int = 10000,
test_size: int = 10000,
drop_remainder: bool = True,
local_cache: bool = True) -> data_utils.Dataset:
"""Loads the dsprites dataset.
The dsprites dataset is available at:
https://github.com/deepmind/dsprites-dataset.
It consists of 3 shapes (heart, ellipse and square).
They vary by (which are consistent across categories and instances):
1. Scale (6 values)
2. Orientation: 40 values (rotates around the center of the object)
3. Position (X): 32 values
4. Position (Y): 32 values
Args:
batch_sizes: Specifies how to batch examples. I.e., if batch_sizes = [8, 4]
then output images will have shapes (8, 4, height, width, 3).
subset: Specifies the subset (train, valid, test or train_and_valid) to use.
is_training: Whether to infinitely repeat and shuffle examples (`True`) or
not (`False`).
num_samples: The number of samples to crop each individual dataset variant
from the start, or `None` to use the full dataset.
preprocess_fn: Function mapped onto each example for pre-processing.
transpose: Whether to permute image dimensions NHWC -> HWCN to speed up
performance on TPUs.
valid_size: The size of the validation set.
test_size: The size of the test set.
drop_remainder: Whether to drop the last batch(es) if they would not match
the shapes specified by `batch_sizes`.
local_cache: Whether to locally cache the dataset.
Returns:
ds: Fully configured dataset ready for training/evaluation.
"""
if preprocess_fn is None:
preprocess_fn = dsprites_preprocess
ds = unbatched_load_dsprites(subset=subset, valid_size=valid_size,
test_size=test_size)
total_batch_size = np.prod(batch_sizes)
if subset == 'valid' and valid_size < total_batch_size:
ds = ds.repeat().take(total_batch_size)
ds = batch_and_shuffle(ds, batch_sizes,
is_training=is_training,
transpose=transpose,
num_samples=num_samples,
preprocess_fn=preprocess_fn,
drop_remainder=drop_remainder,
local_cache=local_cache)
return ds
def batch_and_shuffle(
ds: data_utils.Dataset,
batch_sizes: Sequence[int],
preprocess_fn: Optional[data_utils.PreprocessFnGen] = None,
is_training: bool = True,
num_samples: Optional[int] = None,
transpose: bool = False,
drop_remainder: bool = True,
local_cache: bool = False) -> data_utils.Dataset:
"""Performs post-processing on datasets (i.e., batching, transposing).
Args:
ds: The dataset.
batch_sizes: Specifies how to batch examples. I.e., if batch_sizes = [8, 4]
then output images will have shapes (8, 4, height, width, 3).
preprocess_fn: Function mapped onto each example for pre-processing.
is_training: Whether to infinitely repeat and shuffle examples (`True`) or
not (`False`).
num_samples: The number of samples to crop each individual dataset variant
from the start, or `None` to use the full dataset.
transpose: Whether to permute image dimensions NHWC -> HWCN to speed up
performance on TPUs.
drop_remainder: Whether to drop the last batch(es) if they would not match
the shapes specified by `batch_sizes`.
local_cache: Whether to locally cache the dataset.
Returns:
ds: Dataset with all the post-processing applied.
"""
if num_samples:
ds = ds.take(num_samples)
if local_cache:
ds = ds.cache()
if is_training:
ds = ds.repeat()
total_batch_size = np.prod(batch_sizes)
shuffle_buffer = 10 * total_batch_size
ds = ds.shuffle(buffer_size=shuffle_buffer, seed=jax.process_index())
if preprocess_fn is not None:
ds = ds.map(preprocess_fn('train' if is_training else 'test'),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
for i, batch_size in enumerate(reversed(batch_sizes)):
ds = ds.batch(batch_size, drop_remainder=drop_remainder)
if i == 0 and transpose:
ds = ds.map(data_utils.transpose_fn) # NHWC -> HWCN.
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
return ds
| distribution_shift_framework-master | distribution_shift_framework/core/datasets/data_loaders.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data utility functions."""
import enum
from typing import Any, Callable, Dict, Generator, Iterable, Mapping, Optional, Sequence, Tuple, Union
import chex
import jax
import ml_collections
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
# Type Aliases
Batch = Dict[str, np.ndarray]
ScalarDict = Dict[str, chex.Array]
# Objects that can be treated like tensors in TF2.
TFTensorLike = Union[np.ndarray, tf.Tensor, tf.Variable]
# pytype: disable=not-supported-yet
TFTensorNest = Union[TFTensorLike, Iterable['TFTensorNest'],
Mapping[str, 'TFTensorNest']]
# pytype: enable=not-supported-yet
PreprocessFnGen = Callable[[str], Callable[[chex.ArrayTree], chex.ArrayTree]]
TFPreprocessFn = Callable[[TFTensorNest], TFTensorNest]
Dataset = tf.data.Dataset
# Disentanglement datasets.
SHAPES3D_PROPERTIES = {
'label_scale': tuple(range(8)),
'label_orientation': tuple(range(15)),
'label_floor_hue': tuple(range(10)),
'label_object_hue': tuple(range(10)),
'label_wall_hue': tuple(range(10)),
'label_shape': tuple(range(4)),
'label_color':
tuple(range(3)) # Only added through preprocessing.
}
SMALL_NORB_PROPERTIES = {
'label_azimuth': tuple(range(18)),
'label_elevation': tuple(range(9)),
'label_lighting': tuple(range(6)),
'label_category': tuple(range(5)),
}
DSPRITES_PROPERTIES = {
'label_scale': tuple(range(6)),
'label_orientation': tuple(range(40)),
'label_x_position': tuple(range(32)),
'label_y_position': tuple(range(32)),
'label_shape': tuple(range(3)),
}
class DatasetNames(enum.Enum):
"""Names of the datasets."""
SHAPES3D = 'shapes3d'
SMALL_NORB = 'small_norb'
DSPRITES = 'dsprites'
class NumChannels(enum.Enum):
"""Number of channels of the images."""
SHAPES3D = 3
SMALL_NORB = 1
DSPRITES = 1
class Variance(enum.Enum):
"""Variance of the pixels in the images."""
SHAPES3D = 0.155252
SMALL_NORB = 0.031452
DSPRITES = 0.04068864749147259
class ImageSize(enum.Enum):
"""Size of the images."""
SHAPES3D = 64
SMALL_NORB = 96
DSPRITES = 64
def is_disentanglement_dataset(dataset_name: str) -> bool:
return dataset_name in (DatasetNames.SHAPES3D.value,
DatasetNames.SMALL_NORB.value,
DatasetNames.DSPRITES.value)
def get_dataset_constants(dataset_name: str,
label: str = 'label',
variant: Optional[str] = None) -> Mapping[str, Any]:
"""Returns a dictionary with several constants for the dataset."""
if variant:
properties_name = f'{dataset_name.upper()}_{variant.upper()}_PROPERTIES'
else:
properties_name = f'{dataset_name.upper()}_PROPERTIES'
properties = globals()[properties_name]
num_channels = NumChannels[dataset_name.upper()].value
if dataset_name == DatasetNames.DSPRITES.value and label == 'label_color':
num_classes = 3
else:
num_classes = len(properties[label])
return {
'properties': properties,
'num_channels': num_channels,
'num_classes': num_classes,
'variance': Variance[dataset_name.upper()].value,
'image_size': ImageSize[dataset_name.upper()].value
}
def transpose_fn(batch: Batch) -> Batch:
# Transpose for performance on TPU.
batch = dict(**batch)
batch['image'] = tf.transpose(batch['image'], (1, 2, 3, 0))
return batch
def load_dataset(is_training: bool,
batch_dims: Sequence[int],
transpose: bool,
data_kwargs: Optional[ml_collections.ConfigDict] = None
) -> Generator[Batch, None, None]:
"""Wrapper to load a dataset."""
data_loader = data_kwargs['loader']
batch_kwd = getattr(data_kwargs, 'batch_kwd', 'batch_sizes')
batch_kwargs = {batch_kwd: batch_dims}
dataset = data_loader(
is_training=is_training,
transpose=transpose,
**batch_kwargs,
**data_kwargs['load_kwargs'])
is_numpy = getattr(data_kwargs, 'is_numpy', False)
if not is_numpy:
dataset = iter(tfds.as_numpy(dataset))
return dataset
def resize(image: chex.Array, size: Tuple[int, int]) -> chex.Array:
"""Resizes a batch of images using bilinear interpolation."""
return jax.image.resize(image,
(image.shape[0], size[0], size[1], image.shape[3]),
method='bilinear', antialias=False)
| distribution_shift_framework-master | distribution_shift_framework/core/datasets/data_utils.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the low data versions for the disentanglement datasets."""
from typing import Callable, Optional, Sequence
from distribution_shift_framework.core.datasets import data_utils
import jax
import ml_collections
import tensorflow.compat.v2 as tf
def _create_filter_fn(filter_string: str) -> Callable[..., bool]:
"""Creates a filter function based on the string.
Given a string of
"key11:val11:comp11^key12:val12:comp1b|...^keyNk:valNk:compNk"
the string is parsed as the OR of several AND statements. The ORs are at the
top level (denoted by |), and divide into a set of AND statements.
The AND values are denoted by ^ and operate at the bottom level.
Fof each "keyij:valij:compij" pairing, keyij is the key in the dataset,
valij is the value the key is compared against and compij is the tensorflow
comparison function: e.g. less, less_equal, equal, greater_equal, greater.
Note that parentheses and infinite depth are *not* supported yet.
Example 1: for dSprites: "label_scale:3:equal".
This will select all samples from dSprites where the label_scale parameter is
equal to 3.
Example 2: for Shapes3D:
"wall_hue_value:0.3:less_equal^floor_hue_value:0.3:less_equal".
This will select all samples from Shapes3D where the wall hue and floor hue
are less than or equal to 0.3.
Example 3: for smallNORB:
('label_azimuth:7:less^label_category:0:equal|'
'label_azimuth:7:greater_equal^label_category:0:not_equal').
This will select all samples from smallNORB which either have azimuth of less
than 7 and category 0 or azimuth of greater or equal 7 and a category other
than 0.
Args:
filter_string: The filter string that is used to make the filter function.
Returns:
filter_fn: A function that takes a batch and returns True or False if it
matches the filter string.
"""
all_comparisons = filter_string.split('|')
def filter_fn(x):
or_filter = False
# Iterate over all the OR comparisons.
for or_comparison in all_comparisons:
and_comparisons = or_comparison.split('^')
and_filter = True
# Iterate over all the AND comparisons.
for and_comparison in and_comparisons:
key, value, comp = and_comparison.split(':')
if value in x.keys():
value = x[value]
else:
value = tf.cast(float(value), x[key].dtype)
bool_fn = getattr(tf, comp)
# Accumulate the and comparisons.
and_filter = tf.logical_and(and_filter, bool_fn(x[key], value))
# Accumulate the or comparisons.
or_filter = tf.logical_or(or_filter, and_filter)
return or_filter
return filter_fn
def load_data(batch_sizes: Sequence[int],
dataset_loader: Callable[..., data_utils.Dataset],
num_samples: str,
filter_fns: str,
dataset_kwargs: ml_collections.ConfigDict,
shuffle_pre_sampling: bool = False,
shuffle_pre_sample_seed: int = 0,
local_cache: bool = True,
is_training: bool = True,
transpose: bool = True,
drop_remainder: bool = True,
prefilter: Optional[Callable[..., bool]] = None,
preprocess_fn: Optional[data_utils.PreprocessFnGen] = None,
shuffle_buffer: Optional[int] = 100_000,
weights: Optional[Sequence[float]] = None) -> data_utils.Dataset:
"""A low data wrapper around a tfds dataset.
This wrapper creates a set of datasets according to the parameters. For each
filtering function and number of samples, the dataset defined by the
dataset_loader and **dataset_kwargs is filtered and the first N samples are
taken. All datasets are concatenated together and a sample is drawn with
equal probability from each dataset.
Args:
batch_sizes: Specifies how to batch examples. I.e., if batch_sizes = [8, 4]
then output images will have shapes (8, 4, height, width, 3).
dataset_loader: The tfds dataset loader.
num_samples: An string of the number of samples each returned dataset will
contain. I.e., if num_samples = '1,2,3' then the first filtering
operation will create a dataset with 1 sample, the second a dataset of 2
samples, and so on.
filter_fns: An iterable of the filtering functions for each part of the
dataset.
dataset_kwargs: A dict of the kwargs to pass to dataset_loader.
shuffle_pre_sampling: Whether to shuffle presampling and thereby get a
different set of samples.
shuffle_pre_sample_seed: What seed to use for presampling.
local_cache: Whether to cache the concatenated dataset. Good to do if the
dataset fits in memory.
is_training: Whether this is train or test.
transpose: Whether to permute image dimensions NHWC -> HWCN to speed up
performance on TPUs.
drop_remainder: Whether to drop the last batch(es) if they would not match
the shapes specified by `batch_sizes`.
prefilter: Filter to apply to the dataset.
preprocess_fn: Function mapped onto each example for pre-processing.
shuffle_buffer: How big the buffer for shuffling the images is.
weights: The probabilities to select samples from each dataset.
Returns:
A tf.Dataset instance.
"""
ds = dataset_loader(**dataset_kwargs)
if preprocess_fn:
ds = ds.map(
preprocess_fn('train' if is_training else 'test'),
num_parallel_calls=tf.data.AUTOTUNE)
if prefilter:
ds.filter(prefilter)
filter_fns = filter_fns.split(',')
num_samples = [int(n) for n in num_samples.split(',')]
assert len(filter_fns) == len(num_samples)
all_ds = []
for filter_fn, n_sample in zip(filter_fns, num_samples):
if filter_fn != 'True':
filter_fn = _create_filter_fn(filter_fn)
filtered_ds = ds.filter(filter_fn)
else:
filtered_ds = ds
if shuffle_pre_sampling:
filtered_ds = filtered_ds.shuffle(
buffer_size=shuffle_buffer, seed=shuffle_pre_sample_seed)
if n_sample:
filtered_ds = filtered_ds.take(n_sample)
if local_cache or n_sample:
filtered_ds = filtered_ds.cache()
if is_training:
filtered_ds = filtered_ds.repeat()
shuffle_buffer = (
min(n_sample, shuffle_buffer) if n_sample > 0 else shuffle_buffer)
filtered_ds = filtered_ds.shuffle(
buffer_size=shuffle_buffer, seed=jax.process_index())
all_ds.append(filtered_ds)
ds = tf.data.Dataset.sample_from_datasets(
all_ds, weights=weights, seed=None)
for i, batch_size in enumerate(reversed(batch_sizes)):
ds = ds.batch(batch_size, drop_remainder=drop_remainder)
if i == 0 and transpose:
ds = ds.map(data_utils.transpose_fn) # NHWC -> HWCN.
return ds
| distribution_shift_framework-master | distribution_shift_framework/core/datasets/lowdata_wrapper.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Invariant risk minimization for minimizing loss."""
from typing import Tuple
import chex
from distribution_shift_framework.core.algorithms import base
from distribution_shift_framework.core.algorithms import losses
from distribution_shift_framework.core.datasets import data_utils
import haiku as hk
import jax.numpy as jnp
class IRM(base.LearningAlgorithm):
"""Computes the invariant risk.
This learning algorithm is based on that of Arjovosky et al. Invariant Risk
Minimization. https://arxiv.org/abs/1907.02893.
It enforces that the optimal classifiers for representations with different
properties are the same.
"""
def __init__(self,
lambda_penalty: float = 1.,
loss_fn: base.LossFn = losses.softmax_cross_entropy,
name: str = 'invariant_risk'):
super().__init__(loss_fn=loss_fn, name=name)
self.penalty_weight = lambda_penalty
def _apply_loss(self, weights, logits, targets):
return self.loss_fn(logits * weights, targets, reduction='mean')
def __call__(self,
logits: chex.Array,
targets: chex.Array,
property_vs: chex.Array,
reduction: str = 'mean'
) -> Tuple[data_utils.ScalarDict, chex.Array]:
assert len(targets.shape) == 2
erm = 0
penalty = 0
# For each property, estimate the weights of an optimal classifier.
for property_v in range(property_vs.shape[-1]):
if len(property_vs.shape) == 2:
# One hot encoding.
mask = jnp.argmax(property_vs, axis=-1)[..., None] == property_v
masked_logits = mask * logits
masked_targets = mask * targets
else:
raise ValueError(
f'Properties have an unexpected shape: {property_vs.shape}.')
weights = jnp.ones((1,))
# Compute empirical risk.
erm += self._apply_loss(weights, masked_logits, masked_targets)
# Compute penalty.
grad_fn = hk.grad(self._apply_loss, argnums=0)
grad_1 = grad_fn(weights, masked_logits[::2], masked_targets[::2])
grad_2 = grad_fn(weights, masked_logits[1::2], masked_targets[1::2])
penalty += (grad_1 * grad_2).sum()
# How well are we estimating the labels?
top1_acc = (jnp.argmax(logits, axis=-1) == jnp.argmax(targets,
axis=-1)).mean()
return {
'loss': erm + self.penalty_weight * penalty,
'erm': erm,
'penalty': penalty,
'top1_acc': top1_acc
}, logits
| distribution_shift_framework-master | distribution_shift_framework/core/algorithms/irm.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Empirical risk minimization for minimizing loss."""
import abc
from typing import Tuple
import chex
from distribution_shift_framework.core.algorithms import base
from distribution_shift_framework.core.algorithms import losses
from distribution_shift_framework.core.datasets import data_utils
import jax
import jax.numpy as jnp
class ERM(base.LearningAlgorithm):
"""Computes the empirical risk."""
def __init__(self,
loss_fn: base.LossFn = losses.softmax_cross_entropy,
name: str = 'empirical_risk'):
super().__init__(loss_fn=loss_fn, name=name)
def __call__(self,
logits: chex.Array,
targets: chex.Array,
reduction: str = 'mean',
**unused_kwargs) -> Tuple[data_utils.ScalarDict, chex.Array]:
loss = self.loss_fn(logits, targets, reduction=reduction)
return {'loss': loss}, logits
class AbstractMMD(base.LearningAlgorithm):
"""Base class for the CORAL and MMD algorithms."""
def __init__(self,
mmd_weight: float = 1.,
loss_fn: base.LossFn = losses.softmax_cross_entropy,
name: str = 'coral'):
super().__init__(loss_fn=loss_fn, name=name)
self.mmd_weight = mmd_weight
@abc.abstractmethod
def _mmd(self, x: chex.Array, x_mask: chex.Array, y: chex.Array,
y_mask: chex.Array) -> chex.Array:
"""Computes the MMD between two sets of masked features.
Args:
x: The first set of features.
x_mask: Which of the x features should be considered.
y: The second set of features.
y_mask: Which of the y features should be considered.
Returns:
A tuple of the mean and covariance.
"""
pass
def __call__(self,
logits: chex.Array,
targets: chex.Array,
property_vs: chex.Array,
reduction: str = 'mean'
) -> Tuple[data_utils.ScalarDict, chex.Array]:
"""Compute the MMD loss where the domains are given by the properties."""
pnum = property_vs.shape[-1]
if len(property_vs.shape) != 2:
raise ValueError(
f'Properties have an unexpected shape: {property_vs.shape}.')
# For each label, compute the difference in domain shift against all the
# others.
mmd_loss = {'loss': 0}
property_pairs = []
for i, property_v1 in enumerate(range(pnum)):
for property_v2 in range(i + 1, pnum):
property_pairs += [(property_v1, property_v2)]
def compute_pair_loss(mmd_loss, pair_vs):
property_v1, property_v2 = pair_vs
# One hot encoding.
mask1 = jnp.argmax(property_vs, axis=-1)[..., None] == property_v1
mask2 = jnp.argmax(targets, axis=-1)[..., None] == property_v2
loss = jax.lax.cond(
jnp.minimum(mask1.sum(), mask2.sum()) > 1,
lambda a: self._mmd(*a),
lambda _: jnp.zeros(()),
operand=(logits, mask1, logits, mask2))
t_mmd_loss = {'loss': loss}
mmd_loss = jax.tree_map(jnp.add, mmd_loss, t_mmd_loss)
return (mmd_loss, 0)
mmd_loss, _ = jax.lax.scan(compute_pair_loss, mmd_loss,
jnp.array(property_pairs))
erm = self.loss_fn(logits, targets, reduction=reduction)
# How well are we estimating the labels?
top1_acc = (jnp.argmax(logits, axis=-1) == jnp.argmax(targets,
axis=-1)).mean()
loss = mmd_loss['loss'] / (pnum * (pnum - 1)) * self.mmd_weight + erm
mmd_loss['loss'] = loss
mmd_loss['erm'] = erm
mmd_loss['top1_acc'] = top1_acc
return mmd_loss, logits
class CORAL(AbstractMMD):
"""The CORAL algorithm.
Computes the empirical risk and enforces that feature distributions match
across distributions (by minimizing the maximum mean discrepancy).
"""
def __init__(self,
coral_weight: float = 1.,
loss_fn: base.LossFn = losses.softmax_cross_entropy,
name: str = 'coral'):
super().__init__(loss_fn=loss_fn, name=name, mmd_weight=coral_weight)
def _mmd(self, x: chex.Array, x_mask: chex.Array, y: chex.Array,
y_mask: chex.Array) -> chex.Array:
"""Computes the MMD between two sets of masked features.
Args:
x: The first set of features.
x_mask: Which of the x features should be considered.
y: The second set of features.
y_mask: Which of the y features should be considered.
Returns:
A tuple of the mean and covariance.
"""
mean_x = (x * x_mask).sum(0, keepdims=True) / x_mask.sum()
mean_y = (y * y_mask).sum(0, keepdims=True) / y_mask.sum()
cent_x = (x - mean_x) * x_mask
cent_y = (y - mean_y) * y_mask
# Compute the covariances of the inputs.
cova_x = cent_x.T.dot(cent_x) / (x_mask.sum() - 1)
cova_y = cent_y.T.dot(cent_y) / (y_mask.sum() - 1)
d_x = x_mask.sum()
d_y = y_mask.sum()
mean_mse = ((mean_x - mean_y)**2).mean()
cov_mse = ((cova_x - cova_y)**2 / (4 * d_x * d_y)).mean()
return mean_mse + cov_mse
| distribution_shift_framework-master | distribution_shift_framework/core/algorithms/erm.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adversarial training of latent values."""
from typing import Optional, Sequence, Tuple
import chex
from distribution_shift_framework.core.algorithms import base
from distribution_shift_framework.core.algorithms import losses
from distribution_shift_framework.core.datasets import data_utils
import haiku as hk
import jax.numpy as jnp
class DANN(base.LearningAlgorithm):
"""Uses adversarial training to train a property agnostic representation.
Based on the work of Ganin et al. Domain-Adversarial Training of Neural
Networks. https://jmlr.org/papers/volume17/15-239/15-239.pdf.
This learnign setup takes a set of logits, property values, and targets. It
then enforces that the logits contain *no* information about the set of
properties.
"""
def __init__(self,
loss_fn: base.LossFn = losses.softmax_cross_entropy,
property_loss_fn: base.LossFn = losses.softmax_cross_entropy,
mlp_output_sizes: Sequence[int] = (),
name: str = 'DANN'):
super().__init__(loss_fn=loss_fn, name=name)
# Implicit assumptions in the code require classification.
assert loss_fn == losses.softmax_cross_entropy
assert property_loss_fn == losses.softmax_cross_entropy
self.mlp_output_sizes = mlp_output_sizes
self.property_loss_fn = property_loss_fn
def __call__(self,
logits: chex.Array,
targets: chex.Array,
property_vs: chex.Array,
reduction: str = 'mean'
) -> Tuple[data_utils.ScalarDict, chex.Array]:
###################
# Standard loss.
###################
# Compute the regular loss function.
erm = self.loss_fn(logits, targets, reduction=reduction)
return {'loss': erm}, logits
def adversary(self,
logits: chex.Array,
property_vs: chex.Array,
reduction: str = 'mean',
targets: Optional[chex.Array] = None) -> data_utils.ScalarDict:
###################
# Adversarial loss.
###################
adv_net = hk.nets.MLP(
tuple(self.mlp_output_sizes) + (property_vs.shape[-1],))
# Get logits for estimating the property.
adv_logits = adv_net(logits)
# Enforce that the representation encodes nothing about the property values.
adv_loss = self.property_loss_fn(
adv_logits, property_vs, reduction=reduction)
# How well are we estimating the property value?
prop_top1_acc = (jnp.argmax(adv_logits,
axis=-1) == jnp.argmax(property_vs,
axis=-1)).mean()
return {'loss': adv_loss, 'prop_top1_acc': prop_top1_acc}
| distribution_shift_framework-master | distribution_shift_framework/core/algorithms/adversarial.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Learning algorithms."""
from distribution_shift_framework.core.algorithms.adversarial import DANN
from distribution_shift_framework.core.algorithms.erm import CORAL
from distribution_shift_framework.core.algorithms.erm import ERM
from distribution_shift_framework.core.algorithms.irm import IRM
from distribution_shift_framework.core.algorithms.sagnet import SagNet
# Learning algorithms.
__all__ = (
'CORAL',
'DANN',
'ERM',
'IRM',
'SagNet',
)
| distribution_shift_framework-master | distribution_shift_framework/core/algorithms/__init__.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training representations to be style agnostic.."""
from typing import Any, Mapping, Optional, Tuple
import chex
from distribution_shift_framework.core.algorithms import base
from distribution_shift_framework.core.algorithms import losses
from distribution_shift_framework.core.datasets import data_utils
import haiku as hk
import jax
import jax.numpy as jnp
import ml_collections
class SagNet(base.LearningAlgorithm):
"""Implemenets a SagNet https://arxiv.org/pdf/1910.11645.pdf.
This is a method for training networks to be invariant to style for
improved domain generalization.
"""
def __init__(self,
loss_fn: base.LossFn = losses.softmax_cross_entropy,
content_net_fn=hk.nets.MLP,
content_net_kwargs: Mapping[str,
Any] = (ml_collections.ConfigDict(
dict(output_sizes=(64, 64,
64)))),
style_net_fn=hk.nets.MLP,
style_net_kwargs: Mapping[str, Any] = ml_collections.ConfigDict(
dict(output_size=(64, 64, 64))),
name: str = 'SagNet',
**kwargs):
super().__init__(loss_fn=loss_fn, name=name)
self._content_net_fn = content_net_fn
self._content_net_kwargs = content_net_kwargs
self._style_net_fn = style_net_fn
self._style_net_kwargs = style_net_kwargs
def _randomize(self, features, interpolate=False, eps=1e-5):
"""Apply the ADAIN style operator (https://arxiv.org/abs/1703.06868)."""
b = features.shape[0]
alpha = jax.random.uniform(hk.next_rng_key(),
(b,) + (1,) * len(features.shape[1:]))
is_image_shape = len(features.shape) == 4
if is_image_shape:
# Features is an image of with shape BHWC.
b, h, w, c = features.shape
features = jnp.transpose(features, axes=(0, 3, 1, 2)).view(b, c, -1)
mean = jnp.mean(features, axis=(-1,), keepdims=True)
variance = jnp.var(features, axis=(-1,), keepdims=True)
features = (features - mean) / jnp.sqrt(variance + eps)
idx_swap = jax.random.permutation(hk.next_rng_key(), jnp.arange(b))
if interpolate:
mean = alpha * mean + (1 - alpha) * mean[idx_swap, ...]
variance = alpha * variance + (1 - alpha) * variance[idx_swap, ...]
else:
features = jax.lax.stop_gradient(features[idx_swap, ...])
features = features * jnp.sqrt(variance + eps) + mean
if is_image_shape:
features = jnp.transpose(features, axes=(0, 2, 1)).view(b, h, w, c)
return features
def _content_pred(self, features):
features = self._randomize(features, True)
return self._content_net_fn(**self._content_net_kwargs)(features)
def _style_pred(self, features):
features = self._randomize(features, False)
return self._style_net_fn(**self._style_net_kwargs)(features)
def __call__(self,
logits: chex.Array,
targets: chex.Array,
property_vs: chex.Array,
reduction: str = 'mean'
) -> Tuple[data_utils.ScalarDict, chex.Array]:
"""Train the content network."""
if len(logits.shape) == 4:
logits = jnp.mean(logits, axis=(1, 2))
preds = self._content_pred(logits)
loss_content = self.loss_fn(preds, targets)
# How well are we estimating the content?
top1_acc = (jnp.argmax(preds, axis=-1) == jnp.argmax(targets,
axis=-1)).mean()
return {'loss': loss_content, 'top1_acc': top1_acc}, preds
def adversary(self,
logits: chex.Array,
property_vs: chex.Array,
reduction: str = 'mean',
targets: Optional[chex.Array] = None) -> data_utils.ScalarDict:
"""Train the adversary which aims to predict style."""
if len(logits.shape) == 4:
logits = jnp.mean(logits, axis=(1, 2))
preds = self._style_pred(logits)
loss_style = self.loss_fn(preds, targets)
# How well are we estimating the style?
top1_acc = (jnp.argmax(preds, axis=-1) == jnp.argmax(targets,
axis=-1)).mean()
return {'loss': loss_style, 'style_top1acc': top1_acc}
| distribution_shift_framework-master | distribution_shift_framework/core/algorithms/sagnet.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common Losses to be used to train a model."""
import chex
import jax.numpy as jnp
import optax
def softmax_cross_entropy(logits: chex.Array,
labels: chex.Array,
reduction: str = 'sum') -> chex.Array:
"""Computes softmax cross entropy given logits and one-hot class labels.
Args:
logits: Logit output values.
labels: Ground truth one-hot-encoded labels.
reduction: Type of reduction to apply to loss.
Returns:
Loss value. If `reduction` is `none`, this has the same shape as `labels`;
otherwise, it is scalar.
Raises:
ValueError: If the type of `reduction` is unsupported.
"""
x = optax.softmax_cross_entropy(logits, labels)
if reduction == 'none' or reduction is None:
return jnp.asarray(x)
elif reduction == 'sum':
return jnp.asarray(x).sum()
elif reduction == 'mean':
return jnp.mean(jnp.asarray(x))
else:
raise ValueError('Unsupported reduction option.')
| distribution_shift_framework-master | distribution_shift_framework/core/algorithms/losses.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for learning algorithms."""
import abc
from typing import Callable, Optional, Tuple
import chex
from distribution_shift_framework.core.datasets import data_utils
import haiku as hk
LossFn = Callable[..., chex.Array]
class LearningAlgorithm(hk.Module):
"""Class to encapsulate a learning algorithm."""
def __init__(self, loss_fn: LossFn, name: str = 'DANN', **kwargs):
"""Initializes the algorithm with the given loss function."""
super().__init__(name=name)
self.loss_fn = loss_fn
@abc.abstractmethod
def __call__(
self,
logits: chex.Array,
targets: chex.Array,
reduction: str = 'mean',
property_vs: Optional[chex.Array] = None
) -> Tuple[data_utils.ScalarDict, chex.Array]:
"""The loss function of the learning algorithm.
Args:
logits: The predicted logits input to the training algorithm.
targets: The ground truth value to estimate.
reduction: How to combine the loss for different samples.
property_vs: An optional set of properties of the input data.
Returns:
scalars: A dictionary of key and scalar estimates. The key `loss`
is the loss that should be minimized.
preds: The raw softmax predictions.
"""
pass
def adversary(self,
logits: chex.Array,
property_vs: chex.Array,
reduction: str = 'mean',
targets: Optional[chex.Array] = None) -> data_utils.ScalarDict:
"""The adversarial loss function.
If la = LearningAlgorithm(), this function is applied in a min-max game
with la(). The model is trained to minimize the loss arising from la(),
while maximizing the loss from the adversary (la.adversary()). The
adversarial part of the model tries to minimize this loss.
Args:
logits: The predicted value input to the training algorithm.
property_vs: An set of properties of the input data.
reduction: How to combine the loss for different samples.
targets: The ground truth value to estimate (optional).
Returns:
scalars: A dictionary of key and scalar estimates. The key `adv_loss` is
the value that should be minimized (for the adversary) and maximized (
for the model). If empty, this learning algorithm has no adversary.
"""
# Do nothing.
return {}
| distribution_shift_framework-master | distribution_shift_framework/core/algorithms/base.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| distribution_shift_framework-master | distribution_shift_framework/core/model_zoo/__init__.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A reimplementation of resnet that exposes intermediate values."""
from typing import Mapping, Optional, Sequence, Union
import chex
import haiku as hk
import jax
import jax.numpy as jnp
class BlockV1(hk.Module):
"""ResNet V1 block with optional bottleneck."""
def __init__(
self,
channels: int,
stride: Union[int, Sequence[int]],
use_projection: bool,
bn_config: Mapping[str, float],
bottleneck: bool,
name: Optional[str] = None,
):
super().__init__(name=name)
self.use_projection = use_projection
bn_config = dict(bn_config)
bn_config.setdefault('create_scale', True)
bn_config.setdefault('create_offset', True)
bn_config.setdefault('decay_rate', 0.999)
if self.use_projection:
self.proj_conv = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=stride,
with_bias=False,
padding='SAME',
name='shortcut_conv')
self.proj_batchnorm = hk.BatchNorm(name='shortcut_batchnorm', **bn_config)
channel_div = 4 if bottleneck else 1
conv_0 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=1 if bottleneck else 3,
stride=1,
with_bias=False,
padding='SAME',
name='conv_0')
bn_0 = hk.BatchNorm(name='batchnorm_0', **bn_config)
conv_1 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=3,
stride=stride,
with_bias=False,
padding='SAME',
name='conv_1')
bn_1 = hk.BatchNorm(name='batchnorm_1', **bn_config)
layers = ((conv_0, bn_0), (conv_1, bn_1))
if bottleneck:
conv_2 = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding='SAME',
name='conv_2')
bn_2 = hk.BatchNorm(name='batchnorm_2', scale_init=jnp.zeros, **bn_config)
layers = layers + ((conv_2, bn_2),)
self.layers = layers
def __call__(self,
inputs: chex.Array,
is_training: bool,
test_local_stats: bool) -> chex.Array:
out = shortcut = inputs
if self.use_projection:
shortcut = self.proj_conv(shortcut)
shortcut = self.proj_batchnorm(shortcut, is_training, test_local_stats)
for i, (conv_i, bn_i) in enumerate(self.layers):
out = conv_i(out)
out = bn_i(out, is_training, test_local_stats)
if i < len(self.layers) - 1: # Don't apply relu on last layer
out = jax.nn.relu(out)
return jax.nn.relu(out + shortcut)
class BlockV2(hk.Module):
"""ResNet V2 block with optional bottleneck."""
def __init__(
self,
channels: int,
stride: Union[int, Sequence[int]],
use_projection: bool,
bn_config: Mapping[str, float],
bottleneck: bool,
name: Optional[str] = None,
):
super().__init__(name=name)
self.use_projection = use_projection
bn_config = dict(bn_config)
bn_config.setdefault('create_scale', True)
bn_config.setdefault('create_offset', True)
if self.use_projection:
self.proj_conv = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=stride,
with_bias=False,
padding='SAME',
name='shortcut_conv')
channel_div = 4 if bottleneck else 1
conv_0 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=1 if bottleneck else 3,
stride=1,
with_bias=False,
padding='SAME',
name='conv_0')
bn_0 = hk.BatchNorm(name='batchnorm_0', **bn_config)
conv_1 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=3,
stride=stride,
with_bias=False,
padding='SAME',
name='conv_1')
bn_1 = hk.BatchNorm(name='batchnorm_1', **bn_config)
layers = ((conv_0, bn_0), (conv_1, bn_1))
if bottleneck:
conv_2 = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding='SAME',
name='conv_2')
# NOTE: Some implementations of ResNet50 v2 suggest initializing
# gamma/scale here to zeros.
bn_2 = hk.BatchNorm(name='batchnorm_2', **bn_config)
layers = layers + ((conv_2, bn_2),)
self.layers = layers
def __call__(self,
inputs: chex.Array,
is_training: bool,
test_local_stats: bool) -> chex.Array:
x = shortcut = inputs
for i, (conv_i, bn_i) in enumerate(self.layers):
x = bn_i(x, is_training, test_local_stats)
x = jax.nn.relu(x)
if i == 0 and self.use_projection:
shortcut = self.proj_conv(x)
x = conv_i(x)
return x + shortcut
class BlockGroup(hk.Module):
"""Higher level block for ResNet implementation."""
def __init__(
self,
channels: int,
num_blocks: int,
stride: Union[int, Sequence[int]],
bn_config: Mapping[str, float],
resnet_v2: bool,
bottleneck: bool,
use_projection: bool,
name: Optional[str] = None,
):
super().__init__(name=name)
block_cls = BlockV2 if resnet_v2 else BlockV1
self.blocks = []
for i in range(num_blocks):
self.blocks.append(
block_cls(
channels=channels,
stride=(1 if i else stride),
use_projection=(i == 0 and use_projection),
bottleneck=bottleneck,
bn_config=bn_config,
name=f'block_{i}'))
def __call__(self,
inputs: chex.Array,
is_training: bool,
test_local_stats: bool) -> chex.Array:
out = inputs
for block in self.blocks:
out = block(out, is_training, test_local_stats)
return out
def check_length(length: int, value: Sequence[int], name: str):
if len(value) != length:
raise ValueError(f'`{name}` must be of length 4 not {len(value)}')
class ResNet(hk.Module):
"""ResNet model."""
BlockGroup = BlockGroup # pylint: disable=invalid-name
BlockV1 = BlockV1 # pylint: disable=invalid-name
BlockV2 = BlockV2 # pylint: disable=invalid-name
def __init__(
self,
blocks_per_group: Sequence[int],
num_classes: int,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
bottleneck: bool = True,
channels_per_group: Sequence[int] = (256, 512, 1024, 2048, 2048),
use_projection: Sequence[bool] = (True, True, True, True),
name: Optional[str] = None,
):
"""Constructs a ResNet model.
Args:
blocks_per_group: A sequence of length 4 that indicates the number of
blocks created in each group.
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers. By default the
``decay_rate`` is ``0.9`` and ``eps`` is ``1e-5``.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
``False``.
bottleneck: Whether the block should bottleneck or not. Defaults to
``True``.
channels_per_group: A sequence of length 4 that indicates the number of
channels used for each block in each group.
use_projection: A sequence of length 4 that indicates whether each
residual block should use projection.
name: Name of the module.
"""
super().__init__(name=name)
self.resnet_v2 = resnet_v2
bn_config = dict(bn_config or {})
bn_config.setdefault('decay_rate', 0.9)
bn_config.setdefault('eps', 1e-5)
bn_config.setdefault('create_scale', True)
bn_config.setdefault('create_offset', True)
logits_config = dict({})
logits_config.setdefault('w_init', jnp.zeros)
logits_config.setdefault('name', 'logits')
# Number of blocks in each group for ResNet.
check_length(4, blocks_per_group, 'blocks_per_group')
check_length(4, channels_per_group, 'channels_per_group')
self.initial_conv = hk.Conv2D(
output_channels=64,
kernel_shape=7,
stride=2,
with_bias=False,
padding='SAME',
name='initial_conv')
if not self.resnet_v2:
self.initial_batchnorm = hk.BatchNorm(
name='initial_batchnorm', **bn_config)
self.block_groups = []
strides = (1, 2, 2, 1)
for i in range(4):
self.block_groups.append(
BlockGroup(
channels=channels_per_group[i],
num_blocks=blocks_per_group[i],
stride=strides[i],
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=bottleneck,
use_projection=use_projection[i],
name=f'block_group_{i}'))
def __call__(self,
inputs: chex.Array,
is_training: bool,
test_local_stats: bool = False) -> chex.Array:
out = inputs
out = self.initial_conv(out)
if not self.resnet_v2:
out = self.initial_batchnorm(out, is_training, test_local_stats)
out = jax.nn.relu(out)
out = hk.max_pool(
out, window_shape=(1, 3, 3, 1), strides=(1, 2, 2, 1), padding='SAME')
for block_group in self.block_groups:
out = block_group(out, is_training, test_local_stats)
return out
class ResNet18(ResNet):
"""ResNet18."""
def __init__(self,
num_classes: int,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
``False``.
name: Name of the module.
"""
super().__init__(
blocks_per_group=(2, 2, 2, 2),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=False,
channels_per_group=(64, 128, 256, 2048),
use_projection=(False, True, True, True),
name=name)
class ResNet34(ResNet):
"""ResNet34."""
def __init__(self,
num_classes: int,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
``False``.
name: Name of the module.
"""
super().__init__(
blocks_per_group=(3, 4, 6, 3),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=False,
channels_per_group=(64, 128, 256, 512),
use_projection=(False, True, True, True),
name=name)
class ResNet50(ResNet):
"""ResNet50."""
def __init__(self,
num_classes: int,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
``False``.
name: Name of the module.
"""
super().__init__(
blocks_per_group=(3, 4, 6, 3),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=True,
name=name)
class ResNet101(ResNet):
"""ResNet101."""
def __init__(self,
num_classes: int,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
``False``.
name: Name of the module.
"""
super().__init__(
blocks_per_group=(3, 4, 23, 3),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=True,
name=name)
class ResNet152(ResNet):
"""ResNet152."""
def __init__(self,
num_classes: int,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
``False``.
name: Name of the module.
"""
super().__init__(
blocks_per_group=(3, 8, 36, 3),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=True,
name=name)
class ResNet200(ResNet):
"""ResNet200."""
def __init__(self,
num_classes: int,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
``False``.
name: Name of the module.
"""
super().__init__(
blocks_per_group=(3, 24, 36, 3),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=True,
name=name)
| distribution_shift_framework-master | distribution_shift_framework/core/model_zoo/resnet.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| distribution_shift_framework-master | distribution_shift_framework/core/pix/__init__.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Color conversion utilities.
These used to be in the dm_pix library but have been removed. I've added them
back here for the time being.
"""
from typing import Tuple
import chex
import jax.numpy as jnp
def split_channels(
image: chex.Array,
channel_axis: int,
) -> Tuple[chex.Array, chex.Array, chex.Array]:
chex.assert_axis_dimension(image, axis=channel_axis, expected=3)
split_axes = jnp.split(image, 3, axis=channel_axis)
return tuple(map(lambda x: jnp.squeeze(x, axis=channel_axis), split_axes))
def rgb_to_hsv(
image_rgb: chex.Array,
*,
channel_axis: int = -1,
) -> chex.Array:
"""Converts an image from RGB to HSV.
Args:
image_rgb: an RGB image, with float values in range [0, 1]. Behavior outside
of these bounds is not guaranteed.
channel_axis: the channel axis. image_rgb should have 3 layers along this
axis.
Returns:
An HSV image, with float values in range [0, 1], stacked along channel_axis.
"""
red, green, blue = split_channels(image_rgb, channel_axis)
return jnp.stack(
rgb_planes_to_hsv_planes(red, green, blue), axis=channel_axis)
def rgb_planes_to_hsv_planes(
red: chex.Array,
green: chex.Array,
blue: chex.Array,
) -> Tuple[chex.Array, chex.Array, chex.Array]:
"""Converts red, green, blue color planes to hue, saturation, value planes.
All planes should have the same shape, with float values in range [0, 1].
Behavior outside of these bounds is not guaranteed.
Reference implementation:
https://github.com/tensorflow/tensorflow/blob/262f4ad303c78a99e0974c4b17892db2255738a0/tensorflow/compiler/tf2xla/kernels/image_ops.cc#L36-L68
Args:
red: the red color plane.
green: the red color plane.
blue: the red color plane.
Returns:
A tuple of (hue, saturation, value) planes, as float values in range [0, 1].
"""
value = jnp.maximum(jnp.maximum(red, green), blue)
minimum = jnp.minimum(jnp.minimum(red, green), blue)
range_ = value - minimum
saturation = jnp.where(value > 0, range_ / value, 0.)
norm = 1. / (6. * range_)
hue = jnp.where(value == green,
norm * (blue - red) + 2. / 6.,
norm * (red - green) + 4. / 6.)
hue = jnp.where(value == red, norm * (green - blue), hue)
hue = jnp.where(range_ > 0, hue, 0.) + (hue < 0.)
return hue, saturation, value
def hsv_planes_to_rgb_planes(
hue: chex.Array,
saturation: chex.Array,
value: chex.Array,
) -> Tuple[chex.Array, chex.Array, chex.Array]:
"""Converts hue, saturation, value planes to red, green, blue color planes.
All planes should have the same shape, with float values in range [0, 1].
Behavior outside of these bounds is not guaranteed.
Reference implementation:
https://github.com/tensorflow/tensorflow/blob/262f4ad303c78a99e0974c4b17892db2255738a0/tensorflow/compiler/tf2xla/kernels/image_ops.cc#L71-L94
Args:
hue: the hue plane (wrapping).
saturation: the saturation plane.
value: the value plane.
Returns:
A tuple of (red, green, blue) planes, as float values in range [0, 1].
"""
dh = (hue % 1.0) * 6. # Wrap when hue >= 360°.
dr = jnp.clip(jnp.abs(dh - 3.) - 1., 0., 1.)
dg = jnp.clip(2. - jnp.abs(dh - 2.), 0., 1.)
db = jnp.clip(2. - jnp.abs(dh - 4.), 0., 1.)
one_minus_s = 1. - saturation
red = value * (one_minus_s + saturation * dr)
green = value * (one_minus_s + saturation * dg)
blue = value * (one_minus_s + saturation * db)
return red, green, blue
| distribution_shift_framework-master | distribution_shift_framework/core/pix/color_conversion.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides image augmentation functions.
All functions expect float-encoded images, with values between 0 and 1, but
do not clip their outputs.
"""
import chex
from distribution_shift_framework.core.pix import color_conversion
import jax
import jax.numpy as jnp
def _auto_contrast(image: chex.Array, cutoff: int = 0) -> chex.Array:
"""The auto contrast transform: remove top/bottom % and rescale histogram.
Args:
image: an RGB image given as a float tensor in [0, 1].
cutoff: what % of higher/lower pixels to remove
Returns:
The new image with auto contrast applied.
"""
im_rgbs = []
indices = jnp.arange(0, 256, 1)
for rgb in range(0, image.shape[2]):
im_rgb = image[:, :, rgb:rgb + 1]
hist = jnp.histogram(im_rgb, bins=256, range=(0, 1))[0]
hist_cumsum = hist.cumsum()
# Determine % samples
cut_lower = hist_cumsum[-1] * cutoff // 100
cut_higher = hist_cumsum[-1] * (100 - cutoff) // 100
# The lower offset
offset_lo = (hist_cumsum < cut_lower) * indices
offset_lo = offset_lo.max() / 256.
# The higher offset
offset_hi = (hist_cumsum <= cut_higher) * indices
offset_hi = offset_hi.max() / 256.
# Remove cutoff% samples from low/hi end
im_rgb = (im_rgb - offset_lo).clip(0, 1) + offset_lo
im_rgb = (im_rgb + 1 - offset_hi).clip(0, 1) - (1 - offset_hi)
# And renormalize
offset = (offset_hi - offset_lo) < 1 / 256.
im_rgb = (im_rgb - offset_lo) / (offset_hi - offset_lo + offset)
# And return
im_rgbs.append(im_rgb)
return jnp.concatenate(im_rgbs, axis=2)
def auto_contrast(image: chex.Array, cutoff: chex.Array) -> chex.Array:
if len(image.shape) < 4:
return _auto_contrast(image, cutoff)
else:
return jax.vmap(_auto_contrast)(image, cutoff.astype(jnp.int32))
def _equalize(image: chex.Array) -> chex.Array:
"""The equalize transform: make histogram cover full scale.
Args:
image: an RGB image given as a float tensor in [0, 1].
Returns:
The equalized image.
"""
im_rgbs = []
im = (image * 255).astype(jnp.int32).clip(0, 255)
for rgb in range(0, im.shape[2]):
im_rgb = im[:, :, rgb:rgb + 1]
hist = jnp.histogram(im_rgb, bins=256, range=(0, 256))[0]
last_nonzero_value = hist.sum() - hist.cumsum()
last_nonzero_value = last_nonzero_value + last_nonzero_value.max() * (
last_nonzero_value == 0)
step = (hist.sum() - last_nonzero_value.min()) // 255
n = step // 2
im_rgb_new = jnp.zeros((im_rgb.shape), dtype=im_rgb.dtype)
def for_loop(i, values):
(im, n, hist, step, im_rgb) = values
im = im + (n // step) * (im_rgb == i)
return (im, n + hist[i], hist, step, im_rgb)
result, _, _, _, _ = jax.lax.fori_loop(0, 256, for_loop,
(im_rgb_new, n, hist, step, im_rgb))
im_rgbs.append(result.astype(jnp.float32) / 255.)
return jnp.concatenate(im_rgbs, 2)
def equalize(image: chex.Array, unused_cutoff: chex.Array) -> chex.Array:
if len(image.shape) < 4:
return _equalize(image)
else:
return jax.vmap(_equalize)(image)
def _posterize(image: chex.Array, bits: chex.Array) -> chex.Array:
"""The posterize transform: remove least significant bits.
Args:
image: an RGB image given as a float tensor in [0, 1].
bits: how many bits to ignore.
Returns:
The posterized image.
"""
mask = ~(2**(8 - bits) - 1)
image = (image * 255).astype(jnp.int32).clip(0, 255)
image = jnp.bitwise_and(image, mask)
return image.astype(jnp.float32) / 255.
def posterize(image: chex.Array, bits: chex.Array) -> chex.Array:
if len(image.shape) < 4:
return _posterize(image, bits)
else:
return jax.vmap(_posterize)(image, bits.astype(jnp.uint8))
def _solarize(image: chex.Array, threshold: chex.Array) -> chex.Array:
"""The solarization transformation: pixels > threshold are inverted.
Args:
image: an RGB image given as a float tensor in [0, 1].
threshold: the threshold in [0, 1] above which to invert the image.
Returns:
The solarized image.
"""
image = (1 - image) * (image >= threshold) + image * (image < threshold)
return image
def solarize(image: chex.Array, threshold: chex.Array) -> chex.Array:
if len(image.shape) < 4:
return _solarize(image, threshold)
else:
return jax.vmap(_solarize)(image, threshold)
def adjust_color(image: chex.Array,
factor: chex.Numeric,
channel: int = 0,
channel_axis: int = -1) -> chex.Array:
"""Shifts the color of an RGB by a given multiplicative amount.
Args:
image: an RGB image, given as a float tensor in [0, 1].
factor: the (additive) amount to shift the RGB by.
channel: the RGB channel to manipulate
channel_axis: the index of the channel axis.
Returns:
The color adjusted image.
"""
red, green, blue = color_conversion.split_channels(image, channel_axis)
if channel == 0:
red = jnp.clip(red + factor, 0., 1.)
elif channel == 1:
green = jnp.clip(green + factor, 0., 1.)
else:
blue = jnp.clip(blue + factor, 0., 1.)
return jnp.stack((red, green, blue), axis=channel_axis)
| distribution_shift_framework-master | distribution_shift_framework/core/pix/augment.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of post(-augmentation) processing steps."""
from typing import Tuple
import chex
import jax
def mixup(images: chex.Array,
labels: chex.Array,
alpha: float = 1.,
beta: float = 1.,
rng: chex.PRNGKey = None) -> Tuple[chex.Array, chex.Array]:
"""Interpolating two images to create a new image.
Source: https://arxiv.org/abs/1710.09412
Args:
images: Minibatch of images.
labels: One-hot encoded labels for minibatch.
alpha: Alpha parameter for the beta law which samples the interpolation
weight.
beta: Beta parameter for the beta law which samples the interpolation
weight.
rng: Random number generator state.
Returns:
Images resulting from the interpolation of pairs of images
and their corresponding weighted labels.
"""
assert labels.shape == 2, 'Labels need to represent one-hot encodings.'
batch_size = images.shape[0]
lmbda_rng, rng = jax.random.split(rng)
lmbda = jax.random.beta(lmbda_rng, a=alpha, b=beta, shape=())
idx = jax.random.permutation(rng, batch_size)
images_a = images
images_b = images[idx, :, :, :]
images = lmbda * images_a + (1. - lmbda) * images_b[idx, :]
labels = lmbda * labels + (1. - lmbda) * labels[idx, :]
return images, labels
| distribution_shift_framework-master | distribution_shift_framework/core/pix/postprocessing.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the ImageNet-C corruptions for sanity checks and eval.
All severity values are taken from ImageNet-C at
https://github.com/hendrycks/robustness/blob/master/ImageNet-C/create_c/make_imagenet_c.py
"""
import chex
from distribution_shift_framework.core.pix import color_conversion
import dm_pix
import jax
import jax.numpy as jnp
import numpy as np
def scale_image(image: chex.Array, z_factor: chex.Numeric) -> chex.Array:
"""Resizes an image."""
# And then resize
b, h, w, c = image.shape
resize_x = jax.image.scale_and_translate(
image,
shape=(b, int(h * z_factor), int(w * z_factor), c),
method='bilinear',
antialias=False,
scale=jnp.ones((2,)) * z_factor,
translation=jnp.zeros((2,)),
spatial_dims=(1, 2))
return resize_x
def zoom_blur(image: chex.Array, severity: int = 1, rng: chex.PRNGKey = None
) -> chex.Array:
"""The zoom blur corruption from ImageNet-C."""
del rng
c = [
np.arange(1, 1.11, 0.01),
np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.02),
np.arange(1, 1.26, 0.02),
np.arange(1, 1.31, 0.03)
][severity - 1]
_, h, w, _ = image.shape
image_zoomed = jnp.zeros_like(image)
for zoom_factor in c:
t_image_zoomed = scale_image(image, zoom_factor)
b = int(h * zoom_factor - h) // 2
t_image_zoomed = t_image_zoomed[:, b:b + h, b:b + w, :]
image_zoomed += t_image_zoomed
image_zoomed = (image_zoomed + image) / (c.shape[0] + 1)
return image_zoomed
def gaussian_blur(image: chex.Array,
severity: int = 1,
rng: chex.PRNGKey = None) -> chex.Array:
"""Gaussian blur corruption for ImageNet-C."""
del rng
c = [1, 2, 3, 4, 6][severity - 1]
return dm_pix.gaussian_blur(image, sigma=c, kernel_size=image.shape[1])
def speckle_noise(image: chex.Array,
severity: int = 1,
rng: chex.PRNGKey = None) -> chex.Array:
"""Speckle noise corruption in ImageNet-C."""
c = [.15, .2, 0.35, 0.45, 0.6][severity - 1]
image = image + image * jax.random.normal(rng, shape=image.shape) * c
return jnp.clip(image, a_min=0, a_max=1)
def impulse_noise(image: chex.Array,
severity: int = 1,
rng: chex.PRNGKey = None) -> chex.Array:
"""Impulse noise corruption in ImageNet-C."""
c = [.03, .06, .09, 0.17, 0.27][severity - 1]
x = jnp.clip(image, 0, 1)
p = c
q = 0.5
out = x
flipped = jax.random.choice(
rng, 2, shape=x.shape, p=jax.numpy.array([1 - p, p]))
salted = jax.random.choice(
rng, 2, shape=x.shape, p=jax.numpy.array([1 - q, q]))
peppered = 1 - salted
mask = flipped * salted
out = out * (1 - mask) + mask
mask = flipped * peppered
out = out * (1 - mask)
return jnp.clip(out, a_min=0, a_max=1)
def shot_noise(image: chex.Array, severity: int = 1, rng: chex.PRNGKey = None
) -> chex.Array:
"""Shot noise in ImageNet-C corruptions."""
c = [60, 25, 12, 5, 3][severity - 1]
x = jnp.clip(image, 0, 1)
x = jax.random.poisson(rng, lam=x * c, shape=x.shape) / c
return jnp.clip(x, a_min=0, a_max=1)
def gaussian_noise(image: chex.Array,
severity: int = 1,
rng: chex.PRNGKey = None) -> chex.Array:
"""Gaussian noise in ImageNet-C corruptions."""
c = [.08, .12, 0.18, 0.26, 0.38][severity - 1]
x = image + jax.random.normal(rng, shape=image.shape) * c
return jnp.clip(x, a_min=0, a_max=1)
def brightness(image: chex.Array, severity: int = 1, rng: chex.PRNGKey = None
) -> chex.Array:
"""The brightness corruption from ImageNet-C."""
del rng
c = [.1, .2, .3, .4, .5][severity - 1]
x = jnp.clip(image, 0, 1)
hsv = color_conversion.rgb_to_hsv(x)
h, s, v = color_conversion.split_channels(hsv, -1)
v = jnp.clip(v + c, 0, 1)
rgb_adjusted = color_conversion.hsv_planes_to_rgb_planes(h, s, v)
rgb = jnp.stack(rgb_adjusted, axis=-1)
return rgb
def saturate(image: chex.Array, severity: int = 1, rng: chex.PRNGKey = None
) -> chex.Array:
"""The saturation corruption from ImageNet-C."""
del rng
c = [(0.3, 0), (0.1, 0), (2, 0), (5, 0.1), (20, 0.2)][severity - 1]
x = jnp.clip(image, 0, 1)
hsv = color_conversion.rgb_to_hsv(x)
h, s, v = color_conversion.split_channels(hsv, -1)
s = jnp.clip(s * c[0] + c[1], 0, 1)
rgb_adjusted = color_conversion.hsv_planes_to_rgb_planes(h, s, v)
rgb = jnp.stack(rgb_adjusted, axis=-1)
return rgb
def contrast(image: chex.Array, severity: int = 1, rng: chex.PRNGKey = None
) -> chex.Array:
"""The contrast corruption from ImageNet-C."""
del rng
c = [0.4, .3, .2, .1, .05][severity - 1]
return dm_pix.adjust_contrast(image, factor=c)
| distribution_shift_framework-master | distribution_shift_framework/core/pix/corruptions.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configs for disentanglement datasets."""
import itertools
from typing import Any, Callable, Optional, Sequence
from distribution_shift_framework.core import hyper
from distribution_shift_framework.core.datasets import data_loaders
from distribution_shift_framework.core.datasets import data_utils
from distribution_shift_framework.core.datasets import lowdata_wrapper
import ml_collections
import tensorflow.compat.v2 as tf
_VALID_COLORS = ((
1,
0,
0,
), (0, 1, 0), (0, 0, 1))
_EXP = 'config.experiment_kwargs.config'
_TRAIN_SPLIT = 'train'
_TEST_SPLIT = 'valid'
_ExampleFn = Callable[[tf.train.Example], tf.train.Example]
def _color_preprocess(mode: str,
preprocess: Optional[Callable[[str], _ExampleFn]] = None,
label: str = 'label') -> _ExampleFn:
"""Preprocessing function to add colour to white pixels in a binary image."""
def _color_fn(example: tf.train.Example) -> tf.train.Example:
if preprocess is not None:
example = preprocess(mode)(example)
example['image'] = tf.repeat(example['image'], 3, axis=2)
example['image'] = tf.cast(example['image'], tf.float32)
# Choose a random color.
color_id = tf.random.uniform(
shape=(), minval=0, maxval=len(_VALID_COLORS), dtype=tf.int64)
example['label_color'] = color_id
colors = tf.constant(_VALID_COLORS, dtype=tf.float32)[color_id]
example['image'] = example['image'] * colors
example['label'] = example[label]
example['fairness_features'] = {
k: v for k, v in example.items() if k.startswith('label_')
}
return example
return _color_fn
def _get_base_config(dataset_name: str, label: str, property_label: str
) -> ml_collections.ConfigDict:
"""Get base config."""
data = ml_collections.ConfigDict()
data.name = dataset_name
data.test = dataset_name
dataset_constants = data_utils.get_dataset_constants(dataset_name, label)
data.label_property = property_label
data.label = label
data.n_classes = dataset_constants['num_classes']
data.num_channels = dataset_constants['num_channels']
data.image_size = dataset_constants['image_size']
data.variance = dataset_constants['variance']
if dataset_name != data_utils.DatasetNames.DSPRITES.value or (
property_label != 'label_color'):
data.prop_values = dataset_constants['properties'][property_label]
data.n_properties = len(data.prop_values)
if dataset_name == data_utils.DatasetNames.DSPRITES.value and (
label == 'label_color' or property_label == 'label_color'):
data.num_channels = 3
if label == 'label_color':
data.n_classes = 3
if property_label == 'label_color':
data.prop_values = (0, 1, 2)
data.n_properties = 3
return data
def _get_filter_fns(values: Sequence[Any],
perc_property: float,
property_name: str) -> str:
cutoff = max(int((len(values) - 1) * perc_property), 0)
cutoff = values[cutoff]
filter_fns = (f'{property_name}:{cutoff}:less_equal,'
f'{property_name}:{cutoff}:greater')
return filter_fns
def get_data_config(dataset_name: str, label: str, property_label: str
) -> ml_collections.ConfigDict:
"""Get config for a given setup."""
data = _get_base_config(dataset_name, label, property_label)
dataset_loader = getattr(data_loaders, f'unbatched_load_{dataset_name}', '')
preprocess_fn = getattr(data_loaders, f'{dataset_name}_preprocess', '')
full_dataset_loader = getattr(data_loaders, f'load_{dataset_name}', '')
data.train_kwargs = ml_collections.ConfigDict()
data.train_kwargs.loader = lowdata_wrapper.load_data
data.train_kwargs.load_kwargs = dict()
data.train_kwargs.load_kwargs.dataset_loader = dataset_loader
data.train_kwargs.load_kwargs.weights = [1.]
data.train_kwargs.load_kwargs.dataset_kwargs = dict(subset=_TRAIN_SPLIT)
data.train_kwargs.load_kwargs.preprocess_fn = preprocess_fn
# Set up filters and number of samples.
data.train_kwargs.load_kwargs.num_samples = '0'
# A string to define how the dataset is filtered (not a boolean value).
data.train_kwargs.load_kwargs.filter_fns = 'True'
data.test_kwargs = ml_collections.ConfigDict()
data.test_kwargs.loader = full_dataset_loader
data.test_kwargs.load_kwargs = dict(subset=_TEST_SPLIT)
if dataset_name == data_utils.DatasetNames.DSPRITES.value and (
label == 'label_color' or property_label == 'label_color'):
# Make the images different colours, as opposed to block and white.
preprocess = data.train_kwargs.load_kwargs.preprocess_fn
data.train_kwargs.load_kwargs.preprocess_fn = (
lambda m: _color_preprocess(m, preprocess, label))
data.test_kwargs.load_kwargs.preprocess_fn = (
lambda m: _color_preprocess(m, None, label))
return data
def get_alldata_config(dataset_name: str, label: str, property_label: str
) -> ml_collections.ConfigDict:
"""Config when using the full dataset."""
loader = getattr(data_loaders, f'load_{dataset_name}', '')
data = _get_base_config(dataset_name, label, property_label)
data.train_kwargs = ml_collections.ConfigDict()
data.train_kwargs.loader = loader
data.train_kwargs.load_kwargs = dict(subset=_TRAIN_SPLIT)
data.test_kwargs = ml_collections.ConfigDict()
data.test_kwargs.loader = loader
data.test_kwargs.load_kwargs = dict(subset=_TEST_SPLIT)
return data
def get_renderers(datatype: str,
dataset_name: str,
label: str,
property_label: str) -> ml_collections.ConfigDict:
if len(datatype.split('.')) > 1:
renderer, _ = datatype.split('.')
else:
renderer = datatype
return globals()[f'get_{renderer}_renderers'](
dataset_name, label=label, property_label=property_label)
def get_renderer_sweep(datatype: str) -> hyper.Sweep:
if len(datatype.split('.')) > 1:
_, sweep = datatype.split('.')
else:
sweep = datatype
return globals()[f'get_{sweep}_sweep']()
def get_resample_sweep() -> hyper.Sweep:
"""Sweep over the resampling operation of the different datasets."""
ratios = [1e-3]
n_samples = [1_000_000]
ratio_samples = list(itertools.product(ratios, n_samples))
ratio_samples_sweep = hyper.sweep(
f'{_EXP}.data.train_kwargs.load_kwargs.num_samples',
[f'{n_s},{int(max(1, n_s * r))}' for r, n_s in ratio_samples])
resample_weights = hyper.sweep(
f'{_EXP}.data.train_kwargs.load_kwargs.weights',
[[1 - i, i] for i in [1e-4, 1e-3, 1e-2, 1e-1, 0.5]])
return hyper.product([ratio_samples_sweep, resample_weights])
def get_fixeddata_sweep() -> hyper.Sweep:
"""Sweep over the amount of data and noise present."""
ratios = [1e-3]
n_samples = [1000, 10_000, 100_000, 1_000_000]
ratio_samples = list(itertools.product(ratios, n_samples))
ratio_samples_sweep = hyper.sweep(
f'{_EXP}.data.train_kwargs.load_kwargs.num_samples',
[f'{n_s},{int(max(1, n_s * r))}' for r, n_s in ratio_samples])
return ratio_samples_sweep
def get_noise_sweep() -> hyper.Sweep:
return hyper.sweep(f'{_EXP}.training.label_noise',
[i / float(10.) for i in list(range(7, 11))])
def get_lowdata_sweep() -> hyper.Sweep:
return hyper.sweep(
f'{_EXP}.data.train_kwargs.load_kwargs.num_samples',
[f'0,{n_s}' for n_s in [1, 5, 10, 50, 100, 500, 1000, 5000, 10_000]])
def get_ood_sweep() -> hyper.Sweep:
return hyper.sweep(f'{_EXP}.data.train_kwargs.load_kwargs.weights',
[[1., 0.]])
def get_base_renderers(dataset_name: str,
label: str = 'color',
property_label: str = 'shape'
) -> ml_collections.ConfigDict:
"""Get base config for the given dataset, label and property value."""
data = get_data_config(dataset_name, label, property_label)
data.train_kwargs.load_kwargs.filter_fns = 'True'
data.train_kwargs.load_kwargs.num_samples = '0'
data.train_kwargs.load_kwargs.weights = [1.]
return data
def get_ood_renderers(dataset_name: str,
label: str = 'color',
property_label: str = 'shape'
) -> ml_collections.ConfigDict:
"""Get OOD config for the given dataset, label and property value."""
data = get_data_config(dataset_name, label, property_label)
perc_props_in_train = 0.7 if dataset_name in ('dsprites') else 0.2
data.train_kwargs.load_kwargs.filter_fns = _get_filter_fns(
data.prop_values, perc_props_in_train, property_label)
data.train_kwargs.load_kwargs.weights = [1., 0.]
data.train_kwargs.load_kwargs.num_samples = '0,1000'
return data
def get_correlated_renderers(dataset_name: str,
label: str = 'color',
property_label: str = 'shape'
) -> ml_collections.ConfigDict:
"""Get correlated config for the given dataset, label and property value."""
data = get_data_config(dataset_name, label, property_label)
data.train_kwargs.load_kwargs.filter_fns = (
f'{label}:{property_label}:equal,True')
data.train_kwargs.load_kwargs.weights = [0.5, 0.5]
num_samples = '0,500' if dataset_name == 'dsprites' else '0,50'
data.train_kwargs.load_kwargs.num_samples = num_samples
data.train_kwargs.load_kwargs.shuffle_pre_sampling = True
data.train_kwargs.load_kwargs.shuffle_pre_sample_seed = 0
return data
def get_lowdata_renderers(dataset_name: str,
label: str = 'color',
property_label: str = 'shape'
) -> ml_collections.ConfigDict:
"""Get lowdata config for the given dataset, label and property value."""
data = get_ood_renderers(dataset_name, label, property_label)
data.train_kwargs.load_kwargs.weights = [0.5, 0.5]
data.train_kwargs.load_kwargs.num_samples = '0,10'
data.train_kwargs.load_kwargs.shuffle_pre_sampling = True
data.train_kwargs.load_kwargs.shuffle_pre_sample_seed = 0
return data
| distribution_shift_framework-master | distribution_shift_framework/configs/disentanglement_config.py |
#!/usr/bin/python
#
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| distribution_shift_framework-master | distribution_shift_framework/configs/__init__.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""SolutionData classes used to log solution process."""
import abc
import math
from typing import Any, Callable, Dict, List, Optional
from absl import logging
class BaseSolutionData(abc.ABC):
"""Base class for SolutionData.
This class encapsulates information that were logged during the solving
process. This includes primal bound improvements, the current best feasible
solution, and the elapsed time at each improvement.
"""
@abc.abstractproperty
def objective_type(self) -> Callable[[Any, Any], Any]:
raise NotImplementedError('objective_type property has to be implemented')
@abc.abstractproperty
def primal_bounds(self) -> List[float]:
raise NotImplementedError('primal_bounds property has to be implemented')
@abc.abstractproperty
def calibrated_time(self) -> List[Optional[float]]:
raise NotImplementedError('calibrated_time property has to be implemented')
@abc.abstractproperty
def time_in_seconds(self) -> List[float]:
raise NotImplementedError('time_in_seconds property has to be implemented')
@abc.abstractproperty
def original_solutions(self) -> List[Any]:
raise NotImplementedError(
'original_solutions property has to be implemented')
@abc.abstractproperty
def best_original_solution(
self) -> Optional[Any]:
raise NotImplementedError(
'best_original_solution property has to be implemented')
@abc.abstractproperty
def elapsed_real_time(self) -> float:
raise NotImplementedError(
'elapsed_real_time property has to be implemented')
@abc.abstractproperty
def elapsed_calibrated_time(self) -> Optional[float]:
raise NotImplementedError(
'elapsed_calibrated_time property has to be implemented')
@abc.abstractmethod
def _write(self, log_entry: Dict[str, Any], force_save_sol: bool):
raise NotImplementedError('write method has to be implemented')
def write(self, log_entry: Dict[str, Any], force_save_sol: bool = False):
if ((log_entry['best_primal_point'] is not None or
log_entry['primal_bound'] is not None) and
abs(log_entry['primal_bound']) < 1e19):
self._write(log_entry, force_save_sol)
class SolutionData(BaseSolutionData):
"""This is a basic implementation of BaseSolutionData."""
def __init__(self,
objective_type: Callable[[Any, Any], Any],
write_intermediate_sols: bool = False):
"""The key solution process logging class.
Args:
objective_type: decides if the objective should be decreasing or
increasing.
write_intermediate_sols: controls if we're recording intermediate
solutions of the solve (this is necessary for joint evals with
DeepBrancher)
"""
self._objective_type = objective_type
self._write_intermediate_sols = write_intermediate_sols
self._primal_bounds = []
self._calibrated_time = []
self._time_in_seconds = []
self._original_solutions = []
def _ensure_valid_primal_bound_update(self):
"""Ensures that primal bounds are monotonic, repairs them and logs a warning if not."""
# Given the logging logic, solutions should be monotonically improving.
if len(self._primal_bounds) > 1:
better_bound = self._objective_type(
self._primal_bounds[-1], self._primal_bounds[-2])
if not math.isclose(self._primal_bounds[-1], better_bound,
rel_tol=1e-5, abs_tol=1e-5):
logging.warn('Primal bounds were not be monotonic: %d and %d',
self._primal_bounds[-1], self._primal_bounds[-2])
self._primal_bounds[-1] = better_bound
@property
def objective_type(self) -> Callable[[Any, Any], Any]:
return self._objective_type
@property
def primal_bounds(self) -> List[float]:
return self._primal_bounds
@property
def calibrated_time(self) -> List[Optional[float]]:
return self._calibrated_time
@property
def time_in_seconds(self) -> List[float]:
return self._time_in_seconds
@property
def original_solutions(self) -> List[Any]:
return [sol for sol in self._original_solutions if sol is not None]
@property
def best_original_solution(
self) -> Optional[Any]:
best_orig_sol = None
if self._original_solutions and self._original_solutions[-1] is not None:
best_orig_sol = self._original_solutions[-1]
return best_orig_sol
@property
def elapsed_real_time(self) -> float:
elapsed_time = 0.0
if self._time_in_seconds:
elapsed_time = self._time_in_seconds[-1]
return elapsed_time
@property
def elapsed_calibrated_time(self) -> Optional[float]:
elapsed_time = 0.0
if self._calibrated_time:
elapsed_time = self._calibrated_time[-1]
return elapsed_time
def _write(self, log_entry: Dict[str, Any], force_save_sol: bool):
"""Log a new solution (better primal bound) for this particular instance.
Args:
log_entry: the dictionary with logging information.
force_save_sol: to be used for final solutions to be recorded even if
write_intermediate_sols is off. Otherwise we would record no actual
solutions in the SolutionData.
"""
sol = log_entry['best_primal_point']
if sol:
assert math.isclose(sol.objective_value, log_entry['primal_bound'])
self._time_in_seconds.append(log_entry['solving_time'])
self._calibrated_time.append(log_entry['solving_time_calibrated'])
self._primal_bounds.append(log_entry['primal_bound'])
if self._write_intermediate_sols or force_save_sol:
if sol is None:
raise ValueError('Trying to write full solution on None')
self._original_solutions.append(sol)
else:
self._original_solutions.append(None)
self._ensure_valid_primal_bound_update()
| neural_lns-main | solution_data.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A light GNN model for training NeuralLNS."""
from typing import List
from graph_nets import graphs
import sonnet as snt
import tensorflow.compat.v2 as tf
from neural_lns import layer_norm
GT_SPEC = graphs.GraphsTuple(
nodes=tf.TensorSpec(shape=(None, 34), dtype=tf.float32, name='nodes'),
edges=tf.TensorSpec(shape=(None, 1), dtype=tf.float32, name='edges'),
receivers=tf.TensorSpec(shape=(None,), dtype=tf.int64, name='receivers'),
senders=tf.TensorSpec(shape=(None,), dtype=tf.int64, name='senders'),
globals=tf.TensorSpec(shape=(), dtype=tf.float32, name='globals'),
n_node=tf.TensorSpec(shape=(None,), dtype=tf.int32, name='n_node'),
n_edge=tf.TensorSpec(shape=(None,), dtype=tf.int32, name='n_edge'))
def get_adjacency_matrix(graph: graphs.GraphsTuple) -> tf.SparseTensor:
upper = tf.stack([graph.senders, graph.receivers], axis=1)
lower = tf.stack([graph.receivers, graph.senders], axis=1)
indices = tf.concat([upper, lower], axis=0)
values = tf.squeeze(tf.concat([graph.edges, graph.edges], axis=0))
dense_shape = tf.cast(
tf.stack([graph.n_node[0], graph.n_node[0]], axis=0),
dtype=tf.int64)
adj = tf.sparse.SparseTensor(indices, values, dense_shape)
return tf.sparse.reorder(adj)
class LightGNNLayer(snt.Module):
"""A single layer of a GCN."""
def __init__(self,
node_model_hidden_sizes: List[int],
name=None):
super(LightGNNLayer, self).__init__(name=name)
self._node_model_hidden_sizes = node_model_hidden_sizes
@snt.once
def _initialize(self):
self._mlp = snt.nets.MLP(self._node_model_hidden_sizes,
activate_final=False)
def __call__(self,
input_nodes: tf.Tensor,
adj_mat: tf.SparseTensor,
is_training: bool) -> tf.Tensor:
self._initialize()
updated_nodes = self._mlp(input_nodes)
combined_nodes = tf.sparse.sparse_dense_matmul(adj_mat, updated_nodes)
return combined_nodes
class LightGNN(snt.Module):
"""A stack of LightGNNLayers."""
def __init__(self,
n_layers: int,
node_model_hidden_sizes: List[int],
output_model_hidden_sizes: List[int],
dropout: float = 0.0,
name=None,
**unused_args):
super(LightGNN, self).__init__(name=name)
self._n_layers = n_layers
self._node_model_hidden_sizes = node_model_hidden_sizes
self._output_model_hidden_sizes = output_model_hidden_sizes
self._dropout = dropout
@snt.once
def _initialize(self):
self._layers = []
for i in range(self._n_layers):
layer = LightGNNLayer(
self._node_model_hidden_sizes,
name='layer_%d' % i)
# Wrapper to apply layer normalisation and dropout
layer = layer_norm.ResidualDropoutWrapper(
layer, dropout_rate=self._dropout)
self._layers.append(layer)
self._input_embedding_model = snt.Linear(
self._node_model_hidden_sizes[-1], name='input_embedding')
self.output_model = snt.nets.MLP(self._output_model_hidden_sizes,
name='output_model')
def encode_graph(self,
graph: graphs.GraphsTuple,
is_training: bool) -> tf.Tensor:
self._initialize()
adj = get_adjacency_matrix(graph)
nodes = self._input_embedding_model(graph.nodes)
for layer in self._layers:
nodes = layer(nodes, adj, is_training=is_training)
return nodes
def __call__(self,
graph: graphs.GraphsTuple,
is_training: bool,
node_indices: tf.Tensor,
labels: tf.Tensor,
**unused_args) -> tf.Tensor:
n = tf.shape(labels)[0]
b = tf.shape(labels)[1]
nodes = self.encode_graph(graph, is_training)
all_logits = self.output_model(nodes)
logits = tf.expand_dims(tf.gather(all_logits, node_indices), axis=-1)
logits = tf.broadcast_to(logits, [n, b, 1])
return logits
@tf.function(input_signature=[
GT_SPEC,
tf.TensorSpec(shape=(None,), dtype=tf.int32, name='node_indices')
])
def greedy_sample(self, graph, node_indices):
nodes = self.encode_graph(graph, False)
logits = self.output_model(nodes)
probas = tf.math.sigmoid(tf.gather(logits, node_indices))
sample = tf.round(probas)
return sample, probas
@tf.function(input_signature=[
GT_SPEC,
tf.TensorSpec(shape=(None,), dtype=tf.int32),
tf.TensorSpec(shape=(None, None), dtype=tf.float32)
])
def predict_logits(self,
graph: graphs.GraphsTuple,
node_indices: tf.Tensor,
labels: tf.Tensor) -> tf.Tensor:
return self(graph, False, node_indices, labels)
def save_model(self, output_dir: str):
"""Saves a model to output directory."""
tf.saved_model.save(
self, output_dir, signatures={'greedy_sample': self.greedy_sample})
class NeuralLnsLightGNN(LightGNN):
"""A stack of LightGNNLayers."""
def __init__(self,
n_layers: int,
node_model_hidden_sizes: List[int],
output_model_hidden_sizes: List[int],
dropout: float = 0.0,
name=None,
**unused_args):
super().__init__(n_layers, node_model_hidden_sizes,
output_model_hidden_sizes,
dropout)
@tf.function(input_signature=[
GT_SPEC,
tf.TensorSpec(shape=(None,), dtype=tf.int32, name='node_indices')
])
def greedy_sample(self, graph, node_indices):
nodes = self.encode_graph(graph, False)
logits = self.output_model(nodes)
probas = tf.math.sigmoid(tf.gather(logits, node_indices))
sample = tf.round(probas)
return sample, probas
@tf.function(input_signature=[
GT_SPEC,
tf.TensorSpec(shape=(None,), dtype=tf.int32),
tf.TensorSpec(shape=(None, None), dtype=tf.float32)
])
def predict_logits(self,
graph: graphs.GraphsTuple,
node_indices: tf.Tensor,
labels: tf.Tensor) -> tf.Tensor:
return self(graph, False, node_indices, labels)
def get_model(**params):
return NeuralLnsLightGNN(**params)
| neural_lns-main | light_gnn.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MIP utility functions."""
import copy
import dataclasses
import enum
import math
from typing import Any, List
from absl import logging
import numpy as np
from neural_lns import sampling
class MPSolverResponseStatus(enum.Enum):
"""Enum of solver statuses."""
OPTIMAL = 0
FEASIBLE = 1
NOT_SOLVED = 2
INFEASIBLE = 3
UNBOUNDED = 4
INFEASIBLE_OR_UNBOUNDED = 5
STOPPED = 6
UNKNOWN = 7
FAILED = 8
BESTSOLLIMIT = 9
@dataclasses.dataclass
class MPVariable:
"""MPVariable contains all the information related to a single variable."""
# Lower and upper bounds; lower_bound must be <= upper_bound.
lower_bound: float = -math.inf
upper_bound: float = math.inf
# The coefficient of the variable in the objective. Must be finite.
objective_coefficient: float = 0.0
# True if the variable is constrained to be integer.
is_integer: bool = True
# The name of the variable.
name: str = ""
@dataclasses.dataclass
class MPConstraint:
"""MPConstraint contains all the information related to a single constraint."""
# var_index[i] is the variable index (w.r.t. to "variable" field of
# MPModel) of the i-th linear term involved in this constraint, and
# coefficient[i] is its coefficient. Only the terms with non-zero
# coefficients need to appear. var_index may not contain duplicates.
var_index: List[int] = dataclasses.field(default_factory=list)
coefficient: List[float] = dataclasses.field(default_factory=list)
# lower_bound must be <= upper_bound.
lower_bound: float = -math.inf
upper_bound: float = math.inf
# The name of the constraint.
name: str = ""
@dataclasses.dataclass
class MPModel:
"""MPModel fully encodes a Mixed-Integer Linear Programming model."""
# All the variables appearing in the model.
variable: List[MPVariable] = dataclasses.field(default_factory=list)
# All the constraints appearing in the model.
constraint: List[MPConstraint] = dataclasses.field(default_factory=list)
# True if the problem is a maximization problem. Minimize by default.
maximize: bool = False
# Offset for the objective function. Must be finite.
objective_offset: float = 0.0
# Name of the model.
name: str = ""
@dataclasses.dataclass
class MPSolutionResponse:
"""Class for solution response from the solver."""
# Objective value corresponding to the "variable_value" below, taking into
# account the source "objective_offset" and "objective_coefficient".
objective_value: float
# Variable values in the same order as the MPModel.variable field.
# This is a dense representation. These are set iff 'status' is OPTIMAL or
# FEASIBLE.
variable_value: List[float]
# Human-readable status string.
status_str: str
# Result of the optimization.
status: MPSolverResponseStatus = MPSolverResponseStatus.UNKNOWN
def tighten_variable_bounds(mip: Any,
names: List[str],
lbs: List[float],
ubs: List[float]):
"""Tightens variables of the given MIP in-place.
Args:
mip: Input MIP.
names: List of variable names to tighten.
lbs: List of lower bounds, in same order as names.
ubs: List of lower bounds, in same order as names.
"""
if len(names) != len(lbs) or len(lbs) != len(ubs):
raise ValueError(
"Names, lower and upper bounds should have the same length")
name_to_bounds = {}
for name, lb, ub in zip(names, lbs, ubs):
name = name.decode() if isinstance(name, bytes) else name
name_to_bounds[name] = (lb, ub)
c = 0
for v in mip.variable:
name = v.name.decode() if isinstance(v.name, bytes) else v.name
if name in name_to_bounds:
lb, ub = name_to_bounds[name]
v.lower_bound = max(lb, v.lower_bound)
v.upper_bound = min(ub, v.upper_bound)
c += 1
logging.info("Tightened %s vars", c)
def is_var_binary(variable: Any) -> bool:
"""Checks whether a given variable is binary."""
lb_is_zero = np.isclose(variable.lower_bound, 0)
ub_is_one = np.isclose(variable.upper_bound, 1)
return variable.is_integer and lb_is_zero and ub_is_one
def add_binary_invalid_cut(mip: Any,
names: List[str],
values: List[int],
weights: List[float],
depth: float):
"""Adds a weighted binary invalid cut to the given MIP in-place.
Given a binary assignment for all or some of the binary variables, adds
a constraint in the form:
sum_{i in zeros} w_i * x_i + sum_{j in ones} w_j * (1-x_j) <= d
The first summation is over variables predicted to be zeros, the second
summation is over variables predicted to be ones. d is the maximum distance
allowed for a solution to be away from predicted assignment.
Args:
mip: Input MIP.
names: Binary variable names.
values: Predicted values of binary variables.
weights: Weights associated with cost inccured by reversing prediction.
depth: The amount of cost allowed to be incurred by flipping
assignments.
"""
assert len(names) == len(values) == len(weights)
name_to_idx = {}
for i, v in enumerate(mip.variable):
name = v.name.decode() if isinstance(v.name, bytes) else v.name
name_to_idx[name] = i
ub = depth
var_index = []
coeffs = []
for name, val, w in zip(names, values, weights):
name = name.decode() if isinstance(name, bytes) else name
assert is_var_binary(mip.variable[name_to_idx[name]])
var_index.append(name_to_idx[name])
if val == 1:
ub -= w
coeffs.append(-w)
else:
coeffs.append(w)
constraint = mip.constraint.add()
constraint.var_index.extend(var_index)
constraint.coefficient.extend(coeffs)
constraint.upper_bound = ub
constraint.name = "weighted_invalid_cut"
def make_sub_mip(mip: Any, assignment: sampling.Assignment):
"""Creates a sub-MIP by tightening variables and applying cut."""
sub_mip = copy.deepcopy(mip)
tighten_variable_bounds(sub_mip, assignment.names,
assignment.lower_bounds, assignment.upper_bounds)
return sub_mip
| neural_lns-main | mip_utils.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Solvers used to solve MIPs."""
import abc
import collections as py_collections
from typing import Any, Dict, Optional, Tuple
from absl import logging
import ml_collections
import numpy as np
from neural_lns import calibration
from neural_lns import data_utils
from neural_lns import local_branching_data_generation as lns_data_gen
from neural_lns import mip_utils
from neural_lns import preprocessor
from neural_lns import sampling
from neural_lns import solution_data
from neural_lns import solving_utils
class BaseSolver(abc.ABC):
"""Base class for solvers.
This class encapsulates an overall MIP solver / primal heuristic. We provide
three implementations, one wrapping around any classical solver (e.g. SCIP),
one implementing Neural Diving, and one implementing Neural Neighbourhood
Selection.
The API simply exposes a solve method which solves a given MIP and logs all
solution information inside a BaseSolutionData object.
"""
def __init__(self,
solver_config: ml_collections.ConfigDict,
sampler: Optional[sampling.BaseSampler] = None):
self._solver_config = solver_config
self._sampler = sampler
@abc.abstractmethod
def solve(
self, mip: Any, sol_data: solution_data.BaseSolutionData,
timer: calibration.Timer
) -> Tuple[solution_data.BaseSolutionData, Dict[str, Any]]:
raise NotImplementedError('solve method should be implemented')
class SCIPSolver(BaseSolver):
"""Agent that solves MIP with SCIP."""
def solve(
self, mip: Any, sol_data: solution_data.BaseSolutionData,
timer: calibration.Timer
) -> Tuple[solution_data.BaseSolutionData, Dict[str, Any]]:
status, sol_data, _ = scip_solve(
mip=mip,
scip_solve_config=self._solver_config,
sol_data=sol_data,
timer=timer)
stats = {}
stats['solution_status'] = str(status)
return sol_data, stats
class NeuralDivingSolver(BaseSolver):
"""Solver that implements Neural Diving."""
def solve(
self, mip: Any, sol_data: solution_data.BaseSolutionData,
timer: calibration.Timer
) -> Tuple[solution_data.BaseSolutionData, Dict[str, Any]]:
sub_mip, stats = predict_and_create_sub_mip(
mip, self._sampler, self._solver_config.predict_config)
status, sol_data, sub_mip_sol = scip_solve(
sub_mip, self._solver_config.scip_solver_config, sol_data, timer)
if self._solver_config.enable_restart:
status, sol_data, _ = scip_solve(
mip, self._solver_config.restart_scip_solver_config, sol_data, timer,
sub_mip_sol)
stats['solution_status'] = str(status)
return sol_data, stats
class NeuralNSSolver(BaseSolver):
"""Solver that implements Neural Neighbourhood Selection."""
def solve(
self, mip: Any, sol_data: solution_data.BaseSolutionData,
timer: calibration.Timer
) -> Tuple[solution_data.BaseSolutionData, Dict[str, Any]]:
# First run Neural Diving to get an initial incumbent solution:
diving_config = self._solver_config.diving_config
sampler_name = diving_config.predict_config.sampler_config.name
if sampler_name not in sampling.SAMPLER_DICT:
# Just run pure SCIP on original MIP in this case:
status, sol_data, incumbent_sol = scip_solve(
mip, diving_config.scip_solver_config, sol_data, timer)
else:
diving_sampler = sampling.SAMPLER_DICT[
diving_config.predict_config.sampler_config.name](
self._solver_config.diving_model)
sub_mip, _ = predict_and_create_sub_mip(mip, diving_sampler,
diving_config.predict_config)
_, sol_data, incumbent_sol = scip_solve(sub_mip,
diving_config.scip_solver_config,
sol_data, timer)
if incumbent_sol is None:
logging.warn('Did not find incumbent solution for MIP: %s, skipping',
mip.name)
return sol_data, {}
past_incumbents = py_collections.deque([incumbent_sol])
# Extract the root features here, as we need to expand by adding the values
# of the incumbent solution, which the model also saw during training.
root_features = data_utils.get_features(mip)
if root_features is None:
logging.warn('Could not extract features from MIP: %s, skipping',
mip.name)
return sol_data, {}
dummy_columns = np.zeros((root_features['variable_features'].shape[0],
2 * lns_data_gen.NUM_PAST_INCUMBENTS + 1),
dtype=root_features['variable_features'].dtype)
root_features['variable_features'] = np.concatenate(
[root_features['variable_features'], dummy_columns], axis=1)
# Enhance the features with the incumbent solution:
features = lns_data_gen.enhance_root_features(root_features,
past_incumbents, mip)
# The last column of enhanced features masks out continuous variables
num_integer_variables = np.sum(features['variable_features'][:, -1])
current_neighbourhood_size = int(self._solver_config.perc_unassigned_vars *
num_integer_variables)
sampler_params = self._solver_config.predict_config.sampler_config.params
update_dict = {'num_unassigned_vars': int(current_neighbourhood_size)}
sampler_params.update(update_dict)
# Keep and return stats from first step
sub_mip, stats = predict_and_create_lns_sub_mip(
mip, self._sampler, features.copy(), self._solver_config.predict_config)
for s in range(self._solver_config.num_solve_steps):
incumbent_sol = past_incumbents[0]
status, sol_data, improved_sol = scip_solve(
sub_mip, self._solver_config.scip_solver_config, sol_data, timer,
incumbent_sol)
logging.info('NLNS step: %s, solution status: %s', s, status)
if status in (mip_utils.MPSolverResponseStatus.OPTIMAL,
mip_utils.MPSolverResponseStatus.INFEASIBLE,
mip_utils.MPSolverResponseStatus.BESTSOLLIMIT):
current_neighbourhood_size = min(
current_neighbourhood_size * self._solver_config.temperature,
0.6 * num_integer_variables)
else:
current_neighbourhood_size = max(
current_neighbourhood_size // self._solver_config.temperature, 20)
logging.info('Updated neighbourhood size to: %s',
int(current_neighbourhood_size))
sampler_params = self._solver_config.predict_config.sampler_config.params
update_dict = {'num_unassigned_vars': int(current_neighbourhood_size)}
sampler_params.update(update_dict)
logging.info('%s', self._solver_config.predict_config)
if improved_sol is None:
break
# Add improved solution to buffer.
past_incumbents.appendleft(improved_sol)
if len(past_incumbents) > lns_data_gen.NUM_PAST_INCUMBENTS:
past_incumbents.pop()
# Recompute the last two columns of the features with new incumbent:
features = lns_data_gen.enhance_root_features(root_features,
past_incumbents, mip)
# Compute the next sub-MIP based on new incumbent:
sub_mip, _ = predict_and_create_lns_sub_mip(
mip, self._sampler, features.copy(),
self._solver_config.predict_config)
stats['solution_status'] = str(status)
return sol_data, stats
def scip_solve(
mip: Any,
scip_solve_config: ml_collections.ConfigDict,
sol_data: solution_data.BaseSolutionData,
timer: calibration.Timer,
best_known_sol: Optional[Any] = None
) -> Tuple[mip_utils.MPSolverResponseStatus, solution_data.BaseSolutionData,
Optional[Any]]:
"""Uses SCIP to solve the MIP and writes solutions to SolutionData.
Args:
mip: MIP to be solved
scip_solve_config: config for SCIPWrapper
sol_data: SolutionData to write solving data to
timer: timer to use to record real elapsed time and (possibly) calibrated
elapsed time
best_known_sol: previously known solution for the MIP to start solving
process with
Returns:
Status of the solving process.
SolutionData with filled solution data. All solutions are converted to the
original space according to SolutionDataWrapper transform functions
Best solution to the MIP passed to SCIP, not retransformed by SolutionData
to the original space (this is convenient for restarts)
"""
# Initialize SCIP solver and load the MIP
mip_solver = solving_utils.Solver()
mip_solver.load_model(mip)
# Try to load the best known solution to SCIP
if best_known_sol is not None:
added = mip_solver.add_solution(best_known_sol)
if added:
logging.info('Added solution to SCIP with obj: %f',
best_known_sol.objective_value)
else:
logging.warn('Could not add solution to SCIP')
# Solve the MIP with given config
try:
status = mip_solver.solve(scip_solve_config.params)
finally:
best_solution = mip_solver.get_best_solution()
# Add final solution to the solution data
if best_solution is not None:
log_entry = {}
log_entry['best_primal_point'] = best_solution
log_entry['primal_bound'] = best_solution.objective_value
log_entry['solving_time'] = timer.elapsed_real_time
log_entry['solving_time_calibrated'] = timer.elapsed_calibrated_time
sol_data.write(log_entry, force_save_sol=True)
return status, sol_data, best_solution
def predict_and_create_sub_mip(
mip: Any, sampler: sampling.BaseSampler,
config: ml_collections.ConfigDict) -> Tuple[Any, Dict[str, Any]]:
"""Takes in a MIP and a config and outputs a sub-MIP.
If the MIP is found infeasible or trivially optimal during feature extraction,
then no SuperMIP reductions are applied and the original MIP is passed back.
Args:
mip: MIP that is used to produce a sub-MIP
sampler: Sampler used to produce predictions
config: config used to feature extraction and model sampling
Returns:
(sub-)MIP
Dict with assignment stats:
num_variables_tightened: how many variables were tightened in an assigment
num_variables_cut: how many variables were used in an invalid cut, usually
0 (if cut was enabled) or all of them (if cut was disabled).
"""
# Step 1: Extract MIP features
features = data_utils.get_features(
mip, solver_params=config.extract_features_scip_config)
if features is None:
logging.warn('Could not extract features from MIP: %s, skipping', mip.name)
return mip, {}
# Step 2: Perform sampling
node_indices = features['binary_variable_indices']
var_names = features['variable_names']
variable_lbs = features['variable_lbs']
variable_ubs = features['variable_ubs']
graphs_tuple = data_utils.get_graphs_tuple(features)
assignment = sampler.sample(graphs_tuple, var_names, variable_lbs,
variable_ubs, node_indices,
**config.sampler_config.params)
sub_mip = mip_utils.make_sub_mip(mip, assignment)
return sub_mip
def predict_and_create_lns_sub_mip(
mip: Any, sampler: sampling.BaseSampler, features: Any,
config: ml_collections.ConfigDict) -> Tuple[Any, Dict[str, Any]]:
"""Produces a sub-MIP for LNS derived from model predictions.
This function uses the provided sampler to predict which binary variables of
the MIP should be unassigned. From this prediction we derive a sub-MIP where
the remaining variables are fixed to the values provided in `incumbent_dict`.
Args:
mip: MIP that is used to produce a sub-MIP
sampler: SuperMIP sampler used to produce predictions
features: Model features used for sampling.
config: config used to feature extraction and model sampling
Returns:
(sub-)MIP
Dict with assignment stats:
num_variables_tightened: how many variables were tightened in an assigment
num_variables_cut: how many variables were used in an invalid cut, usually
0 (if cut was enabled) or all of them (if cut was disabled).
"""
node_indices = features['binary_variable_indices']
var_names = features['variable_names']
var_values = np.asarray([var_name.decode() for var_name in var_names])
graphs_tuple = data_utils.get_graphs_tuple(features)
assignment = sampler.sample(graphs_tuple, var_names, var_values, node_indices,
**config.sampler_config.params)
sub_mip = mip_utils.make_sub_mip(mip, assignment)
return sub_mip
SOLVING_AGENT_DICT = {
'scip': SCIPSolver,
'neural_diving': NeuralDivingSolver,
'neural_ns': NeuralNSSolver,
}
def run_solver(
mip: Any, solver_running_config: ml_collections.ConfigDict,
solver: BaseSolver
) -> Tuple[solution_data.BaseSolutionData, Dict[str, Any]]:
"""End-to-end MIP solving with a Solver.
Args:
mip: MIP that is used to produce a sub-MIP
solver_running_config: config to run the provided solver
solver: initialized solver to be used
Returns:
SolutionData
Dict with additional stats:
solution_status: the returned status by the solver.
elapsed_time_seconds: end-to-end time for instance in real time seconds.
elapsed_time_calibrated: end-to-end time for instance in calibrated time.
And for NeuralDivingSolver, additionally:
num_variables_tightened: how many variables were tightened in an
assigment
num_variables_cut: how many variables were used in an invalid cut,
usually 0 (if cut was enabled) or all of them (if cut was disabled).
"""
# Stage 1: set up a timer
timer = calibration.Timer()
timer.start_and_wait()
# Stage 2: presolve the original MIP instance
presolver = None
presolved_mip = mip
if solver_running_config.preprocessor_configs is not None:
presolver = preprocessor.Preprocessor(
solver_running_config.preprocessor_configs)
_, presolved_mip = presolver.presolve(mip)
# Stage 3: setup solution data
objective_type = max if mip.maximize else min
sol_data = solution_data.SolutionData(
objective_type=objective_type,
write_intermediate_sols=solver_running_config.write_intermediate_sols)
if presolver is not None:
sol_data = solution_data.SolutionDataWrapper(
sol_data, sol_transform_fn=presolver.get_original_solution)
# Stage 4: Solve MIP
sol_data, solve_stats = solver.solve(presolved_mip, sol_data, timer)
timer.terminate_and_wait()
solve_stats['elapsed_time_seconds'] = timer.elapsed_real_time
solve_stats['elapsed_time_calibrated'] = timer.elapsed_calibrated_time
return sol_data, solve_stats
| neural_lns-main | solvers.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Wrapper APIs for MIP preprocessing."""
import abc
from typing import Optional, Tuple
from neural_lns import mip_utils
class Preprocessor(abc.ABC):
"""Class describing the API used to access a MIP presolver.
This class should be used as a wrapper around any general presolving method
MIPs, e.g. the presolver used in SCIP. The API only needs to expose a
presolve method that turns a MPModel into a presolved MPModel, as well as a
get_original_solution method that turns a solution to the presolved model to
one a solution to the original.
"""
def __init__(self, *args, **kwargs):
"""Initializes the preprocessor."""
def presolve(
self, mip: mip_utils.MPModel
) -> Tuple[mip_utils.MPSolverResponseStatus, Optional[mip_utils.MPModel]]:
"""Presolve the given MIP as MPModel.
Args:
mip: MPModel for MIP instance to presolve.
Returns:
status: A Status returned by the presolver.
result: The MPModel of the presolved problem.
"""
raise NotImplementedError('presolve method has to be implemented')
def get_original_solution(
self,
solution: mip_utils.MPSolutionResponse) -> mip_utils.MPSolutionResponse:
raise NotImplementedError(
'get_original_solution method has to be implemented')
| neural_lns-main | preprocessor.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility functions for feature extraction."""
import functools
from typing import Any, Dict, NamedTuple, Optional
from graph_nets import graphs
import ml_collections
import tensorflow.compat.v2 as tf
from neural_lns import mip_utils
from neural_lns import preprocessor
from neural_lns import solving_utils
BIAS_FEATURE_INDEX = 1
SOLUTION_FEATURE_INDEX = 14
BINARY_FEATURE_INDEX = 15
# Number of variable features without incumbent features.
NUM_ROOT_VARIABLE_FEATURES = 19
# Number of past incumbents to include in features.
NUM_PAST_INCUMBENTS = 3
# Total number of variable features.
NUM_VARIABLE_FEATURES = NUM_ROOT_VARIABLE_FEATURES + 2 * NUM_PAST_INCUMBENTS + 1
_INDICATOR_DIM = 1
_CON_FEATURE_DIM = 5
ORDER_TO_FEATURE_INDEX = {
'coefficient': 6,
'fractionality': 11,
}
# SCIP feature extraction parameters
SCIP_FEATURE_EXTRACTION_PARAMS = ml_collections.ConfigDict({
'seed': 42,
'time_limit_seconds': 60 * 10,
'separating_maxroundsroot': 0, # No cuts
'conflict_enable': False, # No additional cuts
'heuristics_emphasis': 'off', # No heuristics
})
class DatasetTuple(NamedTuple):
state: Dict[str, tf.Tensor]
graphs_tuple: graphs.GraphsTuple
labels: tf.Tensor
integer_labels: tf.Tensor
integer_node_indices: tf.Tensor
def get_dataset_feature_metadata() -> Dict[str, tf.io.VarLenFeature]:
"""Returns the schema of the data for writing Neural LNS datasets."""
features = {
'constraint_features': tf.io.VarLenFeature(dtype=tf.string),
'edge_features': tf.io.VarLenFeature(dtype=tf.string),
'edge_indices': tf.io.VarLenFeature(dtype=tf.string),
'variable_features': tf.io.VarLenFeature(dtype=tf.string),
'variable_lbs': tf.io.VarLenFeature(dtype=tf.float32),
'variable_ubs': tf.io.VarLenFeature(dtype=tf.float32),
'constraint_feature_names': tf.io.VarLenFeature(dtype=tf.string),
'variable_feature_names': tf.io.VarLenFeature(dtype=tf.string),
'edge_features_names': tf.io.VarLenFeature(dtype=tf.string),
'variable_names': tf.io.VarLenFeature(dtype=tf.string),
'binary_variable_indices': tf.io.VarLenFeature(dtype=tf.int64),
'all_integer_variable_indices': tf.io.VarLenFeature(dtype=tf.int64),
'model_maximize': tf.io.VarLenFeature(dtype=tf.int64),
'best_solution_labels': tf.io.VarLenFeature(dtype=tf.float32),
}
return features
def bnb_node_state_to_model_inputs(
state: Dict[str, Any],
node_depth: Optional[int] = None) -> graphs.GraphsTuple:
"""Convert a branch-and-bound node state into model inputs.
Args:
state: State information.
node_depth: Depth of this search state.
Returns:
graph_tuple: The graph structure information.
"""
variable_features = tf.where(
tf.math.is_nan(state['variable_features']),
tf.zeros_like(state['variable_features']),
state['variable_features'])
n_variables = tf.shape(variable_features)[0]
variable_feature_dim = tf.shape(variable_features)[1]
n_constraints = tf.shape(state['constraint_features'])[0]
constraint_feature_dim = tf.shape(state['constraint_features'])[1]
n_nodes = n_variables + n_constraints
tf.Assert(constraint_feature_dim == _CON_FEATURE_DIM,
[constraint_feature_dim])
padded_variables = tf.pad(
variable_features,
[[0, 0], [0, constraint_feature_dim]],
'CONSTANT') # + constraint_feature_dim
# Pad again with 1 to indicate variable corresponds to vertex.
padded_variables = tf.pad(
padded_variables,
[[0, 0], [0, _INDICATOR_DIM]],
'CONSTANT', constant_values=1.0) # + 1
padded_constraints = tf.pad(
state['constraint_features'],
[[0, 0], [variable_feature_dim, _INDICATOR_DIM]],
'CONSTANT') # + variable_feature_dim + 1
nodes = tf.concat([padded_variables, padded_constraints], axis=0)
edge_indices = tf.concat(
[state['edge_indices'][:, :1] + tf.cast(n_variables, dtype=tf.int64),
state['edge_indices'][:, 1:]], axis=1)
edge_features = state['edge_features']
node_features_dim = NUM_VARIABLE_FEATURES + _CON_FEATURE_DIM + 3
graph_tuple = graphs.GraphsTuple(
nodes=tf.cast(tf.reshape(nodes, [-1, node_features_dim]),
dtype=tf.float32),
edges=tf.cast(edge_features, dtype=tf.float32),
globals=tf.cast(node_depth, dtype=tf.float32),
receivers=edge_indices[:, 0], # constraint
senders=edge_indices[:, 1], # variables
n_node=tf.reshape(n_nodes, [1]),
n_edge=tf.reshape(tf.shape(state['edge_features'])[0], [1]))
return graph_tuple
def convert_to_minimization(gt: graphs.GraphsTuple, state: Dict[str, Any]):
"""Changes the sign of the objective coefficients of all variable nodes.
Args:
gt: Input graph.
state: Raw feature dictionary.
Returns:
graphs.GraphsTuple with updated nodes.
"""
nodes = gt.nodes
if tf.cast(state['model_maximize'], bool):
num_vars = tf.shape(state['variable_features'])[0]
feature_idx = ORDER_TO_FEATURE_INDEX['coefficient']
indices = tf.stack([
tf.range(num_vars),
tf.broadcast_to(tf.constant(feature_idx), shape=[num_vars])
])
indices = tf.transpose(indices)
sign_change = tf.tensor_scatter_nd_update(
tf.ones_like(nodes), indices,
tf.broadcast_to(tf.constant(-1.0), shape=[num_vars]))
nodes = nodes * sign_change
return gt.replace(nodes=nodes)
def get_graphs_tuple(state: Dict[str, Any]) -> graphs.GraphsTuple:
"""Converts feature state into GraphsTuple."""
state_with_bounds = state.copy()
state_with_bounds['variable_features'] = tf.concat([
state['variable_features'],
tf.expand_dims(state['variable_lbs'], -1),
tf.expand_dims(state['variable_ubs'], -1)
], -1)
graphs_tuple = bnb_node_state_to_model_inputs(
state_with_bounds, node_depth=1)
graphs_tuple = convert_to_minimization(graphs_tuple, state_with_bounds)
return graphs_tuple
def get_features(
mip: mip_utils.MPModel,
solver_params: ml_collections.ConfigDict = SCIP_FEATURE_EXTRACTION_PARAMS
) -> Optional[Dict[str, Any]]:
"""Extracts and preprocesses the features from the root of B&B tree."""
mip_solver = solving_utils.Solver()
presolver = preprocessor.Preprocessor()
_, mip = presolver.presolve(mip)
status = mip_solver.load_model(mip)
features = None
if status == mip_utils.MPSolverResponseStatus.NOT_SOLVED:
features = mip_solver.extract_lp_features_at_root(solver_params)
if features is not None and mip is not None:
features['model_maximize'] = mip.maximize
return features
def apply_feature_scaling(state, labels):
"""Scale variable bounds, solutions, coefficients and biases by sol norm.
Out goal here is to scale continuous variables in such a way that we wouldn't
change the integer feasible solutions to the MIP.
In order to achieve that, we have to ensure that all constraints are scaled
appropriately:
a^Tx <= b can be rescaled without changes in the integer solutions via:
(s * a_int)^Tx_int + a_cont^T(x_cont * s) <= s * b
where
- s = ||x_cont||^2,
- a_int/cont are constraints coefficients corresponding to integer or
continuous variables,
- x_int/cont - solution values corresponding to integer or continuous
variables.
Args:
state: dictionary with tensors corresponding to a single MIP instance
labels: tensor with feasible solutions, including integer and continuous
variables.
Returns:
state: dictionary with scaled tensors
labels: tensor with scaled continuous solution values
"""
sol = state['variable_features'][:, SOLUTION_FEATURE_INDEX]
is_binary = state['variable_features'][:, BINARY_FEATURE_INDEX]
is_non_integer = ~tf.cast(is_binary, tf.bool)
continuous_sol = tf.boolean_mask(sol, is_non_integer)
norm = tf.norm(continuous_sol)
lbs = state['variable_lbs']
ubs = state['variable_ubs']
state['variable_lbs'] = tf.where(is_non_integer, lbs / norm, lbs)
state['variable_ubs'] = tf.where(is_non_integer, ubs / norm, ubs)
scaled_sol = tf.where(is_non_integer, sol / norm, sol)
variable_features = tf.concat(
[state['variable_features'][:, :SOLUTION_FEATURE_INDEX],
tf.expand_dims(scaled_sol, axis=-1),
state['variable_features'][:, SOLUTION_FEATURE_INDEX + 1:]],
axis=1)
state['variable_features'] = variable_features
senders = state['edge_indices'][:, 1]
is_integer_edge = tf.gather(~is_non_integer, senders)
edges = tf.squeeze(state['edge_features'])
scaled_edges = tf.where(is_integer_edge, edges / norm, edges)
state['edge_features'] = tf.reshape(scaled_edges, [-1, 1])
biases = state['constraint_features'][:, BIAS_FEATURE_INDEX]
scaled_biases = biases / norm
state['constraint_features'] = tf.concat([
state['constraint_features'][:, :BIAS_FEATURE_INDEX],
tf.reshape(scaled_biases, [-1, 1]),
state['constraint_features'][:, BIAS_FEATURE_INDEX + 1:],
], axis=1)
is_non_integer = tf.reshape(is_non_integer, [-1, 1])
scaled_labels = tf.where(is_non_integer, labels / norm, labels)
return state, scaled_labels
def decode_fn(record_bytes):
"""Decode a tf.train.Example.
The list of (feature_name, feature_dtype, feature_ndim) is:
[('variable_features', tf.float32, 2),
('binary_variable_indices', tf.int64, 1),
('model_maximize', tf.bool, 0),
('variable_names', tf.string, 1),
('constraint_features', tf.float32, 2),
('best_solution_labels', tf.float32, 1),
('variable_lbs', tf.float32, 1),
('edge_indices', tf.int64, 2),
('all_integer_variable_indices', tf.int64, 1),
('edge_features_names', tf.string, 0),
('variable_feature_names', tf.string, 0),
('constraint_feature_names', tf.string, 0),
('variable_ubs', tf.float32, 1),
('edge_features', tf.float32, 2)]
Args:
record_bytes: Serialised example.
Returns:
Deserialised example.
"""
example = tf.io.parse_single_example(
# Data
record_bytes,
# Schema
get_dataset_feature_metadata()
)
# Parse all 2-D tensors and cast to the right dtype
parsed_example = {}
parsed_example['variable_features'] = tf.io.parse_tensor(tf.sparse.to_dense(
example['variable_features'])[0], out_type=tf.float32)
parsed_example['constraint_features'] = tf.io.parse_tensor(tf.sparse.to_dense(
example['constraint_features'])[0], out_type=tf.float32)
parsed_example['edge_indices'] = tf.io.parse_tensor(tf.sparse.to_dense(
example['edge_indices'])[0], out_type=tf.int64)
parsed_example['edge_features'] = tf.io.parse_tensor(tf.sparse.to_dense(
example['edge_features'])[0], out_type=tf.float32)
# Convert the remaining features to dense.
for key, value in example.items():
if key not in parsed_example:
parsed_example[key] = tf.sparse.to_dense(value)
return parsed_example
def extract_data(state: Dict[str, Any], scale_features: bool = False):
"""Create a DatasetTuple for each MIP instance."""
num_vars = len(state['best_solution_labels'])
labels = tf.reshape(state['best_solution_labels'], [num_vars, -1])
if scale_features:
state, labels = apply_feature_scaling(state, labels)
if 'features_extraction_time' not in state:
state['features_extraction_time'] = tf.constant(
[], dtype=tf.float32)
graphs_tuple = get_graphs_tuple(state)
node_indices = tf.cast(state['binary_variable_indices'], tf.int32)
# We allow filtering out instances that are invalid.
valid_example = (tf.size(labels) > 0)
if valid_example:
int_labels = tf.gather(labels, node_indices)
int_labels = tf.cast(tf.round(int_labels), tf.int32)
int_labels = tf.cast(tf.expand_dims(int_labels, axis=-1), tf.int32)
else:
int_labels = tf.constant([], shape=[0, 0, 0], dtype=tf.int32)
labels = tf.constant([], shape=[0, 0], dtype=tf.float32)
return DatasetTuple(
state=state,
graphs_tuple=graphs_tuple,
integer_node_indices=node_indices,
labels=labels,
integer_labels=int_labels)
def get_dataset(input_path: str,
scale_features: bool = False,
shuffle_size: int = 1000,
num_epochs: Optional[int] = None) -> tf.data.Dataset:
"""Makes a tf.Dataset with correct preprocessing."""
ds = tf.data.TFRecordDataset([input_path]).repeat(num_epochs)
if shuffle_size > 0:
ds = ds.shuffle(shuffle_size, reshuffle_each_iteration=True)
data_fn = functools.partial(extract_data, scale_features=scale_features)
return ds.map(decode_fn).map(data_fn)
| neural_lns-main | data_utils.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Library with functions required to generate LNS imitation data for one MIP."""
import collections as py_collections
import os
import pickle
from typing import Any, Dict, Optional, Sequence, Text
from absl import logging
import ml_collections
import numpy as np
from neural_lns import data_utils
from neural_lns import local_branching_expert
from neural_lns import mip_utils
# LP feature extraction needs to fully process the root node, so allow enough
# time for that.
MIN_SOLVE_TIME = 1800
# SCIP solving parameters
SCIP_SOLVING_PARAMS = ml_collections.ConfigDict({
'seed': 42,
'time_limit_seconds': 1800,
'relative_gap': 0
})
def get_incumbent(
instance_name: Text,
dataset: Text,
solution_index: int) -> Optional[mip_utils.MPSolutionResponse]:
"""Tries to retrieve a solution for the MIP from corresponding pickle file."""
instance_path = os.path.join(dataset, instance_name)
solutions = pickle.load(open(instance_path, 'rb'))
if len(solutions) <= solution_index:
raise ValueError(
f'Fewer than {solution_index+1} solutions found for {instance_name}')
else:
solution = solutions[solution_index]
return solution
def get_flipped_vars(mip: mip_utils.MPModel,
incumbent: mip_utils.MPSolutionResponse,
improved: mip_utils.MPSolutionResponse,
var_names: np.ndarray) -> np.ndarray:
"""Returns an array indicating which binary variables were flipped."""
is_flipped = {}
# Note that non-binary variables are always assigned a 0.
for idx, variable in enumerate(mip.variable):
if (mip_utils.is_var_binary(variable) and round(
incumbent.variable_value[idx]) != round(improved.variable_value[idx])):
is_flipped[variable.name] = 1.0
else:
is_flipped[variable.name] = 0.0
# Make sure the array has the variables in the order in which they appear in
# the features.
is_flipped_reordered = np.zeros(len(var_names), dtype=np.bool)
for idx, var_name in enumerate(var_names):
if 'Constant' in var_name.decode():
is_flipped_reordered[idx] = 0.0
else:
is_flipped_reordered[idx] = is_flipped[var_name.decode()]
return is_flipped_reordered
def enhance_root_features(
root_features: Dict[str, Any],
incumbents: Sequence[Any],
lp_sol: Optional[Any] = None
) -> Dict[str, Any]:
"""Adds incumbent var values and integer mask to the feature array.
This accepts a list of up to NUM_PAST_INCUMBENTS past incumbents,
sorted from most recent to least. Each incumbent will introduce two columns
to the features: The first column represents the incumbent variable values,
and the second one is a all-ones column indicating that the incumbent is
present in the features.
A final column is added to the end that masks out continuous variables.
Args:
root_features: Root features without incumbent information.
incumbents: List of past incumbents, ordered by most recent first.
lp_sol: solution to the LP relaxation of the LNS MIP solved by the expert.
Returns:
Updated features dict.
"""
if len(incumbents) > data_utils.NUM_PAST_INCUMBENTS:
raise ValueError(
f'The number of past incumbents is not sufficient: {len(incumbents)}')
# Fill columns corresponding to incumbents
for idx, incumbent in enumerate(incumbents):
column = data_utils.NUM_ROOT_VARIABLE_FEATURES + 2 * idx
incumbent_values = np.array(
[incumbent[var_name.decode()]
for var_name in root_features['variable_names']],
dtype=root_features['variable_features'].dtype)
# Override features column corresponding to incumbent values.
root_features['variable_features'][:, column] = incumbent_values
# Override features column corresponding to incumbent presence indicator.
root_features['variable_features'][:, column + 1] = np.ones(
len(incumbent_values))
if lp_sol is not None:
lp_sol_values = np.array([
lp_sol[var_name.decode()]
for var_name in root_features['variable_names']
],
dtype=root_features['variable_features'].dtype)
lp_sol_column_index = data_utils.NUM_ROOT_VARIABLE_FEATURES + 2 * len(
incumbents)
root_features['variable_features'][:, lp_sol_column_index] = lp_sol_values
# The last column masks out the continuous variables.
integer_values_mask = np.ones(len(root_features['variable_names']),
dtype=root_features['variable_features'].dtype)
for idx, _ in enumerate(integer_values_mask):
if idx not in root_features['all_integer_variable_indices']:
integer_values_mask[idx] = 0.0
root_features['variable_features'][:, -1] = integer_values_mask
return root_features
def generate_data_for_instance(
instance_name: Text,
dataset: Text,
neighbourhood_size: int = 20,
percentage: bool = False,
sequence_length: int = 10,
add_incumbent_to_scip: bool = True,
solution_index: int = 0,
scip_params: ml_collections.ConfigDict = SCIP_SOLVING_PARAMS,
num_var_features: int = data_utils.NUM_VARIABLE_FEATURES) -> int:
"""Generates data from which we learn to imitate the expert.
This loads a MIP instance from a pickle file and generates the expert data.
Args:
instance_name: The name of the MIP instance.
dataset: Dataset name that the instance belongs to.
neighbourhood_size: Maximum Hamming dist to search.
percentage: Whether neighbourhood_size should be treated as a percentage
of total number of variables.
sequence_length: How many consecutive improvements to do.
add_incumbent_to_scip: Whether to feed SCIP the incumbent solution.
solution_index: Which of the solutions to use as the first incumbent.
scip_params: Dictionary of SCIP parameters to use.
num_var_features: Number of features, NUM_VARIABLE_FEATURES or
NUM_VARIABLE_FEATURES_LP.
Returns:
status: 1 if expert data generation was successful, 0 otherwise.
"""
mip = pickle.load(open(os.path.join(dataset, instance_name), 'rb'))
if percentage:
num_integer = 0
for var in mip.variable:
if var.is_integer:
num_integer += 1
neighbourhood_size = int(num_integer * neighbourhood_size / 100)
try:
incumbent = get_incumbent(instance_name, dataset, solution_index)
except ValueError:
logging.warning('No solution found for %s', instance_name)
return 0
root_features = data_utils.get_features(mip, scip_params)
if root_features is None or root_features['variable_features'] is None:
logging.warning('No root features found for %s', instance_name)
return 0
# Append dummy columns to the variable features, which is where we will put
# the past incumbent solutions and the mask for assigned values at each step.
num_extra_var_features = num_var_features - data_utils.NUM_ROOT_VARIABLE_FEATURES
dummy_columns = np.zeros((root_features['variable_features'].shape[0],
num_extra_var_features),
dtype=root_features['variable_features'].dtype)
if root_features is not None:
root_features['variable_features'] = np.concatenate(
[root_features['variable_features'], dummy_columns], axis=1)
assert root_features['variable_features'].shape[
1] == data_utils.NUM_VARIABLE_FEATURES
status = 1
past_incumbents = py_collections.deque([incumbent])
for step in range(sequence_length):
incumbent = past_incumbents[0]
improved_sol = local_branching_expert.improve_solution(
mip, incumbent, neighbourhood_size, scip_params,
add_incumbent_to_scip=add_incumbent_to_scip)
lp_sol = local_branching_expert.get_lns_lp_solution(
mip, incumbent, neighbourhood_size, scip_params)
if improved_sol is None:
# In case of solver failure, print a warning and break.
logging.warning('Solver failed for MIP %s at step %d ',
instance_name, step)
status = 0
break
# Add features corresponding to the incumbent solution and integer mask.
# NB This will overwrite the last column of the variable features.
features = enhance_root_features(root_features, past_incumbents, lp_sol)
# Figure out which variables were flipped between incumbent and improved.
features['best_solution_labels'] = get_flipped_vars(
mip, incumbent, improved_sol, features['variable_names'])
# Add new incumbent to incumbent list, and prune to size if necessary
past_incumbents.appendleft(improved_sol)
if len(past_incumbents) > data_utils.NUM_PAST_INCUMBENTS:
past_incumbents.pop()
return status
| neural_lns-main | local_branching_data_generation.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sampling strategies for Neural LNS."""
import abc
from typing import Any, List, NamedTuple, Optional
from graph_nets import graphs
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
class Assignment(NamedTuple):
names: List[str]
lower_bounds: List[int]
upper_bounds: List[int]
def sample_probas(model: Any, gt: graphs.GraphsTuple,
node_indices: np.ndarray) -> np.ndarray:
"""Obtain variable probabilities from a conditionally independent model.
Args:
model: SavedModel to sample from.
gt: GraphsTuple of input MIP.
node_indices: Indices of variables to predict.
Returns:
np.ndarray of probabilities of the sample.
"""
_, probas = model.greedy_sample(gt, node_indices)
return probas.numpy()
class BaseSampler(metaclass=abc.ABCMeta):
"""Abstract class for samplers."""
def __init__(self, model_path: Optional[str] = None):
"""Initialization.
Args:
model_path: Model path to load for prediction/sampling.
"""
self.model_path = model_path
self.model = None
@abc.abstractmethod
def sample(
self,
graphs_tuple: graphs.GraphsTuple,
var_names: np.ndarray,
lbs: np.ndarray,
ubs: np.ndarray,
node_indices: np.ndarray,
**kwargs) -> Assignment:
"""Returns a sample assignment for given inputs.
Args:
graphs_tuple: Input MIP with features.
var_names: Names of MIP variables.
lbs: Lower bounds of variables.
ubs: Upper bounds of variables.
node_indices: Node indices of the binary nodes.
**kwargs: Sampler specific arguments.
Returns:
A single Assignment.
"""
return Assignment([], [], [])
class RandomSampler(BaseSampler):
"""Sampler that returns assignments after randomly unassigning variables.
This sampler returns an assignment obtained from leaving a random set of k
variables unassigned, and fixing the rest.
The value of k is the minimum of the number of provided node indices and
a num_unassigned_vars parameter. In other words, the variables that were
selected for flipping are the ones left unassigned.
"""
def sample(self,
graphs_tuple: graphs.GraphsTuple,
var_names: np.ndarray,
var_values: np.ndarray,
node_indices: np.ndarray,
num_unassigned_vars: int) -> Assignment:
"""Sampling.
Args:
graphs_tuple: GraphsTuple to produce samples for.
var_names: Variable names array.
var_values: Variable values.
node_indices: Node indices array for which to produce predictions.
num_unassigned_vars: The number of variables to keep free in the submip.
Returns:
Sampler's Assignment.
"""
flattened_indices = np.squeeze(node_indices)
num_top_vars = np.min([num_unassigned_vars, np.size(flattened_indices)])
# The following gives us the indices of the variables to unassign.
# We randomly select binary indices assuming a uniform distribution.
top_indices = set(np.random.choice(
flattened_indices, size=(num_top_vars,), replace=False))
accept_mask = []
for idx in range(len(var_names)):
# Fix all binary vars except the ones selected to be unassigned above.
# Leave the non-binary vars unfixed, too.
fix_var = False if idx in top_indices or idx not in node_indices else True
accept_mask.append(fix_var)
var_names_to_assign = []
var_values_to_assign = []
for accept, val, name in zip(accept_mask, var_values, var_names):
if accept:
var_name = name.decode() if isinstance(name, bytes) else name
var_names_to_assign.append(var_name)
var_values_to_assign.append(val)
return Assignment(
var_names_to_assign, var_values_to_assign, var_values_to_assign)
class RepeatedCompetitionSampler(BaseSampler):
"""Sampler that repeatedly samples from the topK not yet unassigned variables.
"""
def __init__(self, model_path: str):
super().__init__(model_path)
self.model = tf.saved_model.load(self.model_path)
def sample(self,
graphs_tuple: graphs.GraphsTuple,
var_names: np.ndarray,
var_values: np.ndarray,
node_indices: np.ndarray,
num_unassigned_vars: int,
probability_power: Optional[float] = None,
eps: float = 0.) -> Assignment:
"""Sampling.
Args:
graphs_tuple: GraphsTuple to produce samples for.
var_names: Variable names array.
var_values: Variable values.
node_indices: Node indices array for which to produce predictions.
num_unassigned_vars: The number of variables to keep free in the submip.
probability_power: powers the probabilities to smoothen the distribution,
works similarly to temperature.
eps: a number to add to all probabilities.
Returns:
Sampler's assignment.
"""
proba = sample_probas(self.model, graphs_tuple, node_indices)
proba = np.squeeze(proba) + eps
num_top_vars = np.min([num_unassigned_vars, len(proba)])
unfixed_variables = set()
for _ in range(num_top_vars):
# NB `proba` has the probabilities for the variables corresponding to
# `node_indices` only. So the result of `argsort` gives us the indices of
# the right indices in `node_indices`.
round_proba = proba.copy()
if probability_power is not None:
np.power(round_proba, probability_power, out=round_proba)
np.divide(round_proba, round_proba.sum(), out=round_proba)
var_idx = tfp.distributions.Categorical(probs=round_proba).sample()
unfixed_variables.add(var_idx.numpy())
proba[var_idx] = 0.
accept_mask = []
for idx in range(len(var_names)):
# Fix all binary vars except the ones with highest flip prediction.
# Leave the non-binary vars unfixed, too.
fix_var = idx not in unfixed_variables and idx in node_indices
accept_mask.append(fix_var)
var_names_to_assign = []
var_values_to_assign = []
for accept, val, name in zip(accept_mask, var_values, var_names):
if accept:
var_name = name.decode() if isinstance(name, bytes) else name
var_names_to_assign.append(var_name)
var_values_to_assign.append(val)
return Assignment(
var_names_to_assign, var_values_to_assign, var_values_to_assign)
SAMPLER_DICT = {
'random': RandomSampler,
'competition': RepeatedCompetitionSampler,
}
| neural_lns-main | sampling.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Training script for Neural Neighbourhood Search."""
import timeit
from typing import List, Tuple
from absl import app
from absl import flags
from absl import logging
import ml_collections
from ml_collections.config_flags import config_flags
import sonnet as snt
import tensorflow.compat.v2 as tf
from neural_lns import data_utils
from neural_lns import light_gnn
FLAGS = flags.FLAGS
config_flags.DEFINE_config_file(
'config', 'neural_lns/config_train.py',
'Training configuration.')
MIN_LEARNING_RATE = 1e-5
def train_and_evaluate(
train_datasets: List[Tuple[str, str]],
valid_datasets: List[Tuple[str, str]],
strategy: tf.distribute.Strategy, learning_rate: float,
model_dir: str, use_tf_function: bool, decay_steps: int,
num_train_steps: int, num_train_run_steps: int, eval_every_steps: int,
eval_steps: int, grad_clip_norm: float,
model_config: ml_collections.ConfigDict):
"""The main training and evaluation loop."""
if eval_every_steps % num_train_run_steps != 0:
raise ValueError(
'eval_every_steps is not divisible by num_train_run_steps')
train_ds_all = []
for path, _ in train_datasets:
train_ds = data_utils.get_dataset(path)
train_ds_all.append(train_ds)
train_data = tf.data.Dataset.sample_from_datasets(train_ds_all)
valid_ds_all = []
for path, _ in valid_datasets:
valid_ds = data_utils.get_dataset(path)
valid_ds_all.append(valid_ds)
valid_data = tf.data.Dataset.sample_from_datasets(valid_ds_all)
with strategy.scope():
model = light_gnn.get_model(**model_config.params)
global_step = tf.Variable(
0, trainable=False, name='global_step', dtype=tf.int64)
lr_schedule = tf.optimizers.schedules.ExponentialDecay(
initial_learning_rate=learning_rate,
decay_steps=decay_steps,
decay_rate=0.9)
optimizer = snt.optimizers.Adam(learning_rate)
train_acc_metric = tf.keras.metrics.BinaryAccuracy(threshold=0.5)
valid_acc_metric = tf.keras.metrics.BinaryAccuracy(threshold=0.5)
train_auc_metric = tf.keras.metrics.AUC()
valid_auc_metric = tf.keras.metrics.AUC()
def train_step(train_inputs):
"""Perform a single training step. Returns the loss."""
# step_fn is replicated when running with TPUStrategy.
def step_fn(ds_tuple: data_utils.DatasetTuple):
logging.info('retracing step_fn')
with tf.GradientTape() as tape:
logits = model(
ds_tuple.graphs_tuple,
is_training=True,
node_indices=ds_tuple.integer_node_indices,
labels=ds_tuple.integer_labels)
local_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.cast(ds_tuple.integer_labels, tf.float32),
logits=logits)
tf.assert_equal(tf.shape(ds_tuple.integer_labels), tf.shape(logits))
local_loss = tf.reduce_sum(local_loss, axis=[2], keepdims=True)
local_loss = tf.reduce_mean(local_loss, axis=[0])
local_loss = local_loss / strategy.num_replicas_in_sync
tf.print('Local loss', local_loss)
# We log AUC and ACC by comparing the greedy sample (always choose the
# value with highest probability) with the best solution.
_, proba = model.greedy_sample(ds_tuple.graphs_tuple,
ds_tuple.integer_node_indices)
proba = tf.reshape(proba, [-1])
best_label = tf.reshape(ds_tuple.integer_labels[:, 0, :], [-1])
train_acc_metric.update_state(best_label, proba)
train_auc_metric.update_state(best_label, proba)
replica_ctx = tf.distribute.get_replica_context()
grads = tape.gradient(local_loss, model.trainable_variables)
grads = replica_ctx.all_reduce('sum', grads)
if grad_clip_norm > 0:
grads, _ = tf.clip_by_global_norm(grads, grad_clip_norm)
lr = tf.maximum(lr_schedule(optimizer.step), MIN_LEARNING_RATE)
optimizer.learning_rate = lr
optimizer.apply(grads, model.trainable_variables)
global_step.assign_add(1)
return local_loss
losses = []
for _ in range(num_train_run_steps):
per_replica_losses = strategy.run(
step_fn, args=(next(train_inputs),))
loss = strategy.reduce(
tf.distribute.ReduceOp.SUM,
per_replica_losses,
axis=None)
losses.append(loss)
return tf.reduce_mean(losses)
def eval_step(eval_inputs):
def step_fn(ds_tuple: data_utils.DatasetTuple): # pylint: disable=missing-docstring
logits = model(
ds_tuple.graphs_tuple,
is_training=False,
node_indices=ds_tuple.integer_node_indices,
labels=ds_tuple.integer_labels)
local_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.cast(ds_tuple.integer_labels, tf.float32),
logits=logits)
local_loss = tf.reduce_sum(local_loss, axis=[2], keepdims=True)
local_loss = tf.reduce_mean(local_loss, axis=[0])
# We scale the local loss here so we can later sum the gradients.
# This is cheaper than averaging all gradients.
local_loss = local_loss / strategy.num_replicas_in_sync
# We log AUC and ACC by comparing the greedy sample (always choose the
# value with highest probability) with the best solution.
_, proba = model.greedy_sample(ds_tuple.graphs_tuple,
ds_tuple.integer_node_indices)
proba = tf.reshape(proba, [-1])
best_label = tf.reshape(ds_tuple.integer_labels[:, 0, :], [-1])
valid_acc_metric.update_state(best_label, proba)
valid_auc_metric.update_state(best_label, proba)
return local_loss
valid_losses = []
for _ in range(eval_steps):
valid_losses_per_replica = strategy.run(
step_fn, args=(next(eval_inputs),))
valid_loss = strategy.reduce(
tf.distribute.ReduceOp.SUM,
valid_losses_per_replica,
axis=None)
valid_losses.append(valid_loss)
return tf.reduce_mean(valid_losses)
if use_tf_function:
train_step = tf.function(train_step)
eval_step = tf.function(eval_step)
ckpt = tf.train.Checkpoint(
model=model, optimizer=optimizer, global_step=global_step)
ckpt_manager = tf.train.CheckpointManager(
checkpoint=ckpt, directory=model_dir, max_to_keep=5)
ckpt.restore(ckpt_manager.latest_checkpoint)
if ckpt_manager.latest_checkpoint:
logging.info('Restored from %s', ckpt_manager.latest_checkpoint)
else:
logging.info('Initializing from scratch.')
train_inputs = iter(train_data)
logging.info('Starting training...')
while global_step.numpy() < num_train_steps:
start = timeit.default_timer()
loss = train_step(train_inputs)
end = timeit.default_timer()
step = global_step.numpy()
train_acc = train_acc_metric.result().numpy()
train_auc = train_auc_metric.result().numpy()
train_acc_metric.reset_states()
train_auc_metric.reset_states()
logging.info(f'[{step}] loss = {loss.numpy():.4f}, ' +
f'acc = {train_acc:.4f} auc = {train_auc:.4f} ' +
f'steps_per_second = {num_train_run_steps / (end - start)}')
if step % eval_every_steps == 0:
model.save_model(model_dir)
eval_inputs = iter(valid_data)
valid_loss = eval_step(eval_inputs)
valid_acc = valid_acc_metric.result().numpy()
valid_auc = valid_auc_metric.result().numpy()
valid_acc_metric.reset_states()
valid_auc_metric.reset_states()
logging.info(f'[Valid: {step}] acc = ' +
f'{valid_acc:.4f} auc = {valid_auc:.4f} ' +
f'loss = {valid_loss:.4f}')
saved_ckpt = ckpt_manager.save()
logging.info('Saved checkpoint: %s', saved_ckpt)
def main(_):
flags_config = FLAGS.config
gpus = tf.config.experimental.list_logical_devices(device_type='GPU')
if gpus:
logging.info('Found GPUs: %s', gpus)
strategy = snt.distribute.Replicator([g.name for g in gpus])
else:
strategy = tf.distribute.OneDeviceStrategy('CPU')
logging.info('Distribution strategy: %s', strategy)
logging.info('Devices: %s', tf.config.list_physical_devices())
train_and_evaluate(
train_datasets=flags_config.train_datasets,
valid_datasets=flags_config.valid_datasets,
strategy=strategy,
learning_rate=flags_config.learning_rate,
model_dir=flags_config.work_unit_dir,
use_tf_function=True,
decay_steps=flags_config.decay_steps,
num_train_steps=flags_config.num_train_steps,
num_train_run_steps=flags_config.num_train_run_steps,
eval_every_steps=flags_config.eval_every_steps,
eval_steps=flags_config.eval_steps,
grad_clip_norm=flags_config.grad_clip_norm,
model_config=flags_config.model_config)
if __name__ == '__main__':
tf.enable_v2_behavior()
app.run(main)
| neural_lns-main | train.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Configuration parameters for Neural LNS training."""
import ml_collections
def get_light_gnn_model_config():
"""Current best LightGNN config."""
config = ml_collections.ConfigDict()
# Tunable parameters
config.params = ml_collections.ConfigDict()
config.params.n_layers = 2
config.params.node_model_hidden_sizes = [64, 64]
config.params.output_model_hidden_sizes = [32, 1]
config.params.dropout = 0.1
return config
def get_config():
"""Training configuration."""
config = ml_collections.ConfigDict()
config.work_unit_dir = '/tmp/models/'
# Training config
config.learning_rate = 1e-2
config.decay_steps = 300
config.num_train_run_steps = 10
config.num_train_steps = 1000
config.eval_every_steps = 500
config.eval_steps = 128
config.grad_clip_norm = 1.0
# Each entry is a pair of (<dataset_path>, <prefix>).
config.train_datasets = [
('/tmp/neural_lns/data/example.tfrecord', 'train'),
]
config.valid_datasets = [
('/tmp/neural_lns/data/example.tfrecord', 'valid'),
]
config.model_config = get_light_gnn_model_config()
return config
| neural_lns-main | config_train.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Timer API used during MIP solving."""
import abc
from typing import Optional
class Timer(abc.ABC):
"""Class describing the API that need to be implemented for the Timer.
This Timer class is used to time the duration of the MIP solving process, and
is used in solvers.py. The methods that need to be provided are:
- start_and_wait: This should start the timer, and waits for further calls.
- terminate_and_wait: This should terminate the timer, and waits for further
calls.
- elapsed_real_time: This method should return the elapsed time in seconds
since the last time
it was started.
- elapsed_calibrated_time: This method can be implemented to return version of
the elapsed time that is calibrated to the machine speed.
"""
def start_and_wait(self):
raise NotImplementedError('start_and_wait method has to be implemented')
def terminate_and_wait(self):
raise NotImplementedError('terminate_and_wait method has to be implemented')
def elapsed_real_time(self) -> float:
raise NotImplementedError(
'elapsed_real_time property has to be implemented')
def elapsed_calibrated_time(self) -> Optional[float]:
raise NotImplementedError(
'elapsed_calibrated_time property has to be implemented')
| neural_lns-main | calibration.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Expert for Neural Neighbourhood Selection based on local branching."""
import copy
from typing import Any, Optional
from absl import logging
import ml_collections
import numpy as np
from neural_lns import mip_utils
from neural_lns import solving_utils
def get_binary_local_branching_mip(mip: mip_utils.MPModel,
incumbent: mip_utils.MPSolutionResponse,
neighbourhood_size: int) -> Any:
"""Add binary local branching to the MIP model and returns the new MIP.
Args:
mip: Input MIP
incumbent: The incumbent solution.
neighbourhood_size: Maximum Hamming distance (across integer vars) from
incumbent.
Returns:
MIP with local branching constraints.
"""
lns_mip = copy.deepcopy(mip)
names = []
values = []
for variable, value in zip(mip.variable, incumbent.variable_value):
if mip_utils.is_var_binary(variable):
names.append(variable.name)
# Rounding to make sure the conversion to int is correct.
values.append(np.round(value))
weights = np.ones(len(names))
mip_utils.add_binary_invalid_cut(lns_mip, names, values, weights,
neighbourhood_size)
return lns_mip
def get_general_local_branching_mip(
mip: mip_utils.MPModel, incumbent: mip_utils.MPSolutionResponse,
neighbourhood_size: int) -> mip_utils.MPModel:
"""Add general local branching to the MIP model and returns the new MIP.
Details see slide 23 of
http://www.or.deis.unibo.it/research_pages/ORinstances/mic2003-lb.pdf
Args:
mip: Input MIP.
incumbent: The incumbent solution.
neighbourhood_size: Maximum Hamming distance (across integer vars) from
incumbent.
Returns:
MIP with local branching constraints.
"""
lns_mip = copy.deepcopy(mip)
orig_names = set([v.name for v in mip.variable])
name_to_aux_plus = {}
name_to_aux_minus = {}
# First, we add all new auxiliary variables to the new MIP.
# General integers add aux. variables v_plus and v_minus.
for v in mip.variable:
if v.is_integer and not mip_utils.is_var_binary(v):
# Find names for auxiliary vars that were not used in original names.
aux_plus_name = v.name + '_plus'
while aux_plus_name in orig_names:
aux_plus_name += '_'
aux_minus_name = v.name + '_minus'
while aux_minus_name in orig_names:
aux_minus_name += '_'
lns_mip.variable.append(
mip_utils.MPVariable(name=aux_plus_name, lower_bound=0))
name_to_aux_plus[v.name] = aux_plus_name
lns_mip.variable.append(
mip_utils.MPVariable(name=aux_minus_name, lower_bound=0))
name_to_aux_minus[v.name] = aux_minus_name
# Build index lookup table for all variables.
name_to_idx = {v.name: i for i, v in enumerate(lns_mip.variable)}
# Calculate weights and coefficients, and create local branching constraints.
var_index = []
coeffs = []
constraint_ub = neighbourhood_size
for v, val in zip(mip.variable, incumbent.variable_value):
if v.is_integer:
w = 1.0 / (v.upper_bound - v.lower_bound)
if np.isclose(val, v.lower_bound):
var_index.append(name_to_idx[v.name])
coeffs.append(w)
constraint_ub += (w * v.lower_bound)
elif np.isclose(val, v.upper_bound):
var_index.append(name_to_idx[v.name])
coeffs.append(-w)
constraint_ub -= (w * v.upper_bound)
else:
var_index.append(name_to_idx[name_to_aux_plus[v.name]])
coeffs.append(w)
var_index.append(name_to_idx[name_to_aux_minus[v.name]])
coeffs.append(w)
# Add auxiliary constraints for general integers.
if not mip_utils.is_var_binary(v):
aux_constraint = mip_utils.MPConstraint(
upper_bound=val, lower_bound=val, name='aux_constraint_' + v.name)
aux_constraint.var_index.extend([
name_to_idx[v.name], name_to_idx[name_to_aux_plus[v.name]],
name_to_idx[name_to_aux_minus[v.name]]
])
aux_constraint.coefficient.extend([1., 1., -1.])
lns_mip.constraint.append(aux_constraint)
# Add local branching constraint
constraint = mip_utils.MPConstraint(
upper_bound=constraint_ub, name='local_branching')
constraint.var_index.extend(var_index)
constraint.coefficient.extend(coeffs)
lns_mip.constraint.append(constraint)
return lns_mip
def get_lns_lp_solution(
mip: mip_utils.MPModel,
incumbent: mip_utils.MPSolutionResponse,
neighbourhood_size: int,
scip_params: ml_collections.ConfigDict,
binary_only: bool = True) -> Optional[mip_utils.MPSolutionResponse]:
"""Builds local branching MIP and solves its LP relaxation.
Args:
mip: Input MIP.
incumbent: The incumbent solution.
neighbourhood_size: Maximum Hamming distance (across integer vars) from
incumbent.
scip_params: SCIP parameters used in the solve.
binary_only: Whether to use binary or general local branching.
Returns:
The found solution (depending on time limit and SCIP
params, might not be as good as incumbent), or None if no solution found.
"""
if binary_only:
lns_mip = get_binary_local_branching_mip(mip, incumbent, neighbourhood_size)
else:
lns_mip = get_general_local_branching_mip(mip, incumbent,
neighbourhood_size)
# Solve LP corresponding to lns_mip
lp_solver = solving_utils.Solver()
lp = copy.deepcopy(lns_mip)
for var in lp.variable:
var.is_integer = False
lp_solver.load_model(lp)
lp_solver.solve(scip_params)
return lp_solver.get_best_solution()
def improve_solution(
mip: mip_utils.MPModel,
incumbent: mip_utils.MPSolutionResponse,
neighbourhood_size: int,
scip_params: ml_collections.ConfigDict,
binary_only: bool = True,
add_incumbent_to_scip: bool = True
) -> Optional[mip_utils.MPSolutionResponse]:
"""Defines an improvement step and solves it.
Args:
mip: Input MIP.
incumbent: The incumbent solution.
neighbourhood_size: Maximum Hamming distance (across integer vars) from
incumbent.
scip_params: SCIP parameters used in the solve.
binary_only: Whether to use binary or general local branching.
add_incumbent_to_scip: Whether to add the incumbent solution to SCIP.
Returns:
The found solution (depending on time limit and SCIP
params, might not be as good as incumbent), or None if no solution found.
Optionally also returns the SCIP stats from the solve call.
"""
if binary_only:
lns_mip = get_binary_local_branching_mip(mip, incumbent, neighbourhood_size)
else:
lns_mip = get_general_local_branching_mip(mip, incumbent,
neighbourhood_size)
mip_solver = solving_utils.Solver()
mip_solver.load_model(lns_mip)
if add_incumbent_to_scip:
added = mip_solver.add_solution(incumbent)
if added:
logging.info('Added known solution with objective value: %f',
incumbent.objective_value)
else:
logging.warn('Failed to add known solution to SCIP')
mip_solver.solve(scip_params)
return mip_solver.get_best_solution()
| neural_lns-main | local_branching_expert.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.