python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Norm-Free Residual Networks."""
# pylint: disable=invalid-name
import haiku as hk
import jax
import jax.numpy as jnp
from nfnets import base
class NF_ResNet(hk.Module):
"""Norm-Free preactivation ResNet."""
variant_dict = {'ResNet50': {'depth': [3, 4, 6, 3]},
'ResNet101': {'depth': [3, 4, 23, 3]},
'ResNet152': {'depth': [3, 8, 36, 3]},
'ResNet200': {'depth': [3, 24, 36, 3]},
'ResNet288': {'depth': [24, 24, 24, 24]},
'ResNet600': {'depth': [50, 50, 50, 50]},
}
def __init__(self, num_classes, variant='ResNet50', width=4,
alpha=0.2, stochdepth_rate=0.1, drop_rate=None,
activation='relu', fc_init=None, skipinit_gain=jnp.zeros,
use_se=False, se_ratio=0.25,
name='NF_ResNet'):
super().__init__(name=name)
self.num_classes = num_classes
self.variant = variant
self.width = width
# Get variant info
block_params = self.variant_dict[self.variant]
self.width_pattern = [item * self.width for item in [64, 128, 256, 512]]
self.depth_pattern = block_params['depth']
self.activation = base.nonlinearities[activation]
if drop_rate is None:
self.drop_rate = block_params['drop_rate']
else:
self.drop_rate = drop_rate
self.which_conv = base.WSConv2D
# Stem
ch = int(16 * self.width)
self.initial_conv = self.which_conv(ch, kernel_shape=7, stride=2,
padding='SAME', with_bias=False,
name='initial_conv')
# Body
self.blocks = []
expected_std = 1.0
num_blocks = sum(self.depth_pattern)
index = 0 # Overall block index
block_args = (self.width_pattern, self.depth_pattern, [1, 2, 2, 2])
for block_width, stage_depth, stride in zip(*block_args):
for block_index in range(stage_depth):
# Scalar pre-multiplier so each block sees an N(0,1) input at init
beta = 1./ expected_std
# Block stochastic depth drop-rate
block_stochdepth_rate = stochdepth_rate * index / num_blocks
self.blocks += [NFResBlock(ch, block_width,
stride=stride if block_index == 0 else 1,
beta=beta, alpha=alpha,
activation=self.activation,
which_conv=self.which_conv,
stochdepth_rate=block_stochdepth_rate,
skipinit_gain=skipinit_gain,
use_se=use_se,
se_ratio=se_ratio,
)]
ch = block_width
index += 1
# Reset expected std but still give it 1 block of growth
if block_index == 0:
expected_std = 1.0
expected_std = (expected_std **2 + alpha**2)**0.5
# Head. By default, initialize with N(0, 0.01)
if fc_init is None:
fc_init = hk.initializers.RandomNormal(0.01, 0)
self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True)
def __call__(self, x, is_training=True, return_metrics=False):
"""Return the output of the final layer without any [log-]softmax."""
# Stem
outputs = {}
out = self.initial_conv(x)
out = hk.max_pool(out, window_shape=(1, 3, 3, 1),
strides=(1, 2, 2, 1), padding='SAME')
if return_metrics:
outputs.update(base.signal_metrics(out, 0))
# Blocks
for i, block in enumerate(self.blocks):
out, res_avg_var = block(out, is_training=is_training)
if return_metrics:
outputs.update(base.signal_metrics(out, i + 1))
outputs[f'res_avg_var_{i}'] = res_avg_var
# Final-conv->activation, pool, dropout, classify
pool = jnp.mean(self.activation(out), [1, 2])
outputs['pool'] = pool
# Optionally apply dropout
if self.drop_rate > 0.0 and is_training:
pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool)
outputs['logits'] = self.fc(pool)
return outputs
def count_flops(self, h, w):
flops = []
flops += [base.count_conv_flops(3, self.initial_conv, h, w)]
h, w = h / 2, w / 2
# Body FLOPs
for block in self.blocks:
flops += [block.count_flops(h, w)]
if block.stride > 1:
h, w = h / block.stride, w / block.stride
# Count flops for classifier
flops += [self.blocks[-1].out_ch * self.fc.output_size]
return flops, sum(flops)
class NFResBlock(hk.Module):
"""Normalizer-Free pre-activation ResNet Block."""
def __init__(self, in_ch, out_ch, bottleneck_ratio=0.25,
kernel_size=3, stride=1,
beta=1.0, alpha=0.2,
which_conv=base.WSConv2D, activation=jax.nn.relu,
skipinit_gain=jnp.zeros,
stochdepth_rate=None,
use_se=False, se_ratio=0.25,
name=None):
super().__init__(name=name)
self.in_ch, self.out_ch = in_ch, out_ch
self.kernel_size = kernel_size
self.activation = activation
self.beta, self.alpha = beta, alpha
self.skipinit_gain = skipinit_gain
self.use_se, self.se_ratio = use_se, se_ratio
# Bottleneck width
self.width = int(self.out_ch * bottleneck_ratio)
self.stride = stride
# Conv 0 (typically expansion conv)
self.conv0 = which_conv(self.width, kernel_shape=1, padding='SAME',
name='conv0')
# Grouped NxN conv
self.conv1 = which_conv(self.width, kernel_shape=kernel_size, stride=stride,
padding='SAME', name='conv1')
# Conv 2, typically projection conv
self.conv2 = which_conv(self.out_ch, kernel_shape=1, padding='SAME',
name='conv2')
# Use shortcut conv on channel change or downsample.
self.use_projection = stride > 1 or self.in_ch != self.out_ch
if self.use_projection:
self.conv_shortcut = which_conv(self.out_ch, kernel_shape=1,
stride=stride, padding='SAME',
name='conv_shortcut')
# Are we using stochastic depth?
self._has_stochdepth = (stochdepth_rate is not None and
stochdepth_rate > 0. and stochdepth_rate < 1.0)
if self._has_stochdepth:
self.stoch_depth = base.StochDepth(stochdepth_rate)
if self.use_se:
self.se = base.SqueezeExcite(self.out_ch, self.out_ch, self.se_ratio)
def __call__(self, x, is_training):
out = self.activation(x) * self.beta
shortcut = x
if self.use_projection: # Downsample with conv1x1
shortcut = self.conv_shortcut(out)
out = self.conv0(out)
out = self.conv1(self.activation(out))
out = self.conv2(self.activation(out))
if self.use_se:
out = 2 * self.se(out) * out
# Get average residual standard deviation for reporting metrics.
res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2]))
# Apply stochdepth if applicable.
if self._has_stochdepth:
out = self.stoch_depth(out, is_training)
# SkipInit Gain
out = out * hk.get_parameter('skip_gain', (), out.dtype,
init=self.skipinit_gain)
return out * self.alpha + shortcut, res_avg_var
def count_flops(self, h, w):
# Count conv FLOPs based on input HW
expand_flops = base.count_conv_flops(self.in_ch, self.conv0, h, w)
# If block is strided we decrease resolution here.
dw_flops = base.count_conv_flops(self.width, self.conv1, h, w)
if self.stride > 1:
h, w = h / self.stride, w / self.stride
if self.use_projection:
sc_flops = base.count_conv_flops(self.in_ch, self.conv_shortcut, h, w)
else:
sc_flops = 0
# SE flops happen on avg-pooled activations
se_flops = self.se.fc0.output_size * self.width
se_flops += self.se.fc0.output_size * self.se.fc1.output_size
contract_flops = base.count_conv_flops(self.width, self.conv2, h, w)
return sum([expand_flops, dw_flops, se_flops, contract_flops, sc_flops])
| deepmind-research-master | nfnets/nf_resnet.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizers and Schedulers, inspired by the PyTorch API."""
from collections import ChainMap # pylint:disable=g-importing-member
from typing import Callable, Mapping
import haiku as hk
import jax
import jax.numpy as jnp
import tree
from nfnets import utils
class Optimizer(object):
"""Optimizer base class."""
def __init__(self, params, defaults):
# Flag indicating if parameters have been broadcasted
self._broadcasted = False
# Optimizer hyperparameters; this is a dict to support using param_groups
self._hyperparameters = {}
# Mapping from model parameters to optimizer hyperparameters
self._params2hyperparams = {}
# Assign defaults
self._hyperparameters = dict(**defaults)
# Prepare parameter groups and mappings
self.create_param_groups(params, defaults)
# Join params at top-level if params is a list of groups
if isinstance(params, list):
if any(_is_non_empty_two_level_mapping(g['params']) for g in params):
params = hk.data_structures.merge(*[g['params'] for g in params])
else:
params = dict(ChainMap(*[g['params'] for g in params]))
# Prepare states
create_buffers = lambda k, v: self.create_buffers('/'.join(k), v)
self._states = tree.map_structure_with_path(create_buffers, params)
def add_hyperparam_group(self, group, suffix, defaults):
"""Adds new hyperparameters to the hyperparams dict."""
# Use default hyperparams unless overridden by group hyperparams
group_dict = {key: key for key in defaults if key not in group}
for key in group:
if key != 'params': # Reserved keyword 'params'
group_dict[key] = '%s_%s' % (key, suffix)
self._hyperparameters[group_dict[key]] = group[key]
# Set up params2hyperparams
def set_p2h(k, _):
self._params2hyperparams['/'.join(k)] = group_dict
tree.map_structure_with_path(set_p2h, group['params'])
def create_param_groups(self, params, defaults):
"""Creates param-hyperparam mappings."""
if isinstance(params, list):
for group_index, group in enumerate(params):
# Add group to hyperparams and get this group's full hyperparameters
self.add_hyperparam_group(group, group_index, defaults)
else:
mapping = {key: key for key in self._hyperparameters}
def set_p2h(k, _):
self._params2hyperparams['/'.join(k)] = mapping
tree.map_structure_with_path(set_p2h, params)
def create_buffers(self, name, params):
"""Method to be overridden by child classes."""
pass
def get_opt_params(self, param_name, itr):
"""Returns hyperparams corresponding to param_name."""
mapping = self._params2hyperparams[param_name]
output = {}
for key in mapping:
hyper = self._hyperparameters[mapping[key]]
# Handle the case where a hyper is a class, for hybrids
if isinstance(hyper, Callable) and not isinstance(hyper, type):
output[key] = hyper(itr)
else:
output[key] = hyper
return output
def get_hyper(self, param_name, hyper_name):
"""Get an individual hyperparam for a given param."""
mapping = self._params2hyperparams[param_name]
return self._hyperparameters[mapping[hyper_name]]
def plugin(self, states):
self._states = states
def states(self):
return self._states
def broadcast(self):
"""Brodcasts all buffers and parameters."""
self._broadcasted = True
for name, state in self._states.items():
self._states[name] = {key: utils.broadcast(state[key]) for key in state}
def gather(self):
"""Gathers state (if broadcasted) for saving."""
states = {}
for name in self._states:
state = self._states[name]
states[name] = {key: state[key] if state[key] is None else state[key][0]
for key in state}
return states
def __setattr__(self, name, value):
"""Overrides the object's set-attribute function to register states, etc."""
if '_hyperparameters' in self.__dict__ and name in self._hyperparameters:
self._hyperparameters[name] = value
elif '_states' in self.__dict__ and name in self._states:
self._states[name] = value
else:
object.__setattr__(self, name, value)
def __getattr__(self, name):
"""Override the object's get-attribute function to return states, etc."""
if '_hyperparameters' in self.__dict__ and name in self._hyperparameters:
return self._hyperparameters[name]
elif '_states' in self.__dict__ and name in self._states:
return self._states[name]
else:
object.__getattribute__(self, name)
def step(self, params, grads, states, itr=None):
"""Takes a single optimizer step.
Args:
params: a dict containing the parameters to be updated.
grads: a dict containing the gradients for each parameter in params.
states: a dict containing any optimizer buffers (momentum, etc) for
each parameter in params.
itr: an optional integer indicating the current step, for scheduling.
Returns:
The updated params and optimizer buffers.
"""
get_hyper = lambda k, v: self.get_opt_params('/'.join(k), itr)
hypers = tree.map_structure_with_path(get_hyper, params)
outs = tree.map_structure_up_to(params, self.update_param,
params, grads, states, hypers)
return utils.split_tree(outs, params, 2)
def _is_non_empty_two_level_mapping(obj):
instof = lambda t: lambda v: isinstance(v, t)
# Basically: isinstance(obj, Mapping[str, Mapping[str, Any]]) ...
return (isinstance(obj, Mapping) and all(map(instof(str), obj.keys())) and
all(map(instof(Mapping), obj.values())) and
all(map(lambda v: all(map(instof(str), v.keys())), obj.values())) and
# ... and has at least one leaf.
bool(obj) and any(map(bool, obj.values())))
class Schedule(object):
"""Hyperparameter scheduling objects."""
class CosineDecay(Schedule):
"""Cosine decay."""
def __init__(self, min_val, max_val, num_steps):
self.min_val = min_val
self.max_val = max_val
self.num_steps = num_steps
def __call__(self, itr):
cos = (1 + jnp.cos(jnp.pi * itr / self.num_steps))
return 0.5 * (self.max_val - self.min_val) * cos + self.min_val
class WarmupCosineDecay(Schedule):
"""Cosine decay with linear warmup."""
def __init__(self, start_val, min_val, max_val, num_steps, warmup_steps):
self.start_val = start_val
self.min_val = min_val
self.max_val = max_val
self.num_steps = num_steps
self.warmup_steps = warmup_steps
def __call__(self, itr):
warmup_val = ((self.max_val - self.start_val) * (itr / self.warmup_steps)
+ self.start_val)
cos_itr = (itr - self.warmup_steps) / (self.num_steps - self.warmup_steps)
cos = 1 + jnp.cos(jnp.pi * cos_itr)
cos_val = 0.5 * (self.max_val - self.min_val) * cos + self.min_val
# Select warmup_val if itr < warmup, else cosine val
values = jnp.array([warmup_val, cos_val])
index = jnp.sum(jnp.array(self.warmup_steps) < itr)
return jnp.take(values, index)
class WarmupExpDecay(Schedule):
"""Exponential step decay with linear warmup."""
def __init__(self, start_val, max_val, warmup_steps,
decay_factor, decay_interval):
self.start_val = start_val
self.max_val = max_val
self.warmup_steps = warmup_steps
self.decay_factor = decay_factor
self.decay_interval = decay_interval
def __call__(self, itr):
warmup_val = ((self.max_val - self.start_val) * (itr / self.warmup_steps)
+ self.start_val)
# How many decay steps have we taken?
num_decays = jnp.floor((itr - self.warmup_steps) / self.decay_interval)
exp_val = self.max_val * (self.decay_factor ** num_decays)
# Select warmup_val if itr < warmup, else exp_val
values = jnp.array([warmup_val, exp_val])
index = jnp.sum(jnp.array(self.warmup_steps) < itr)
return jnp.take(values, index)
class SGD(Optimizer):
"""Standard SGD with (nesterov) momentum and weight decay.
Attributes:
params: Either a dict mapping param names to JAX tensors, or a list where
each member of the list is a dict containing parameters
and hyperparameters, allowing one to specify param-specific hyperparams.
lr: Learning rate.
weight_decay: Weight decay parameter. Note that this is decay, not L2 reg.
momentum: Momentum parameter
dampening: Dampening parameter
nesterov: Bool indicating this optimizer will use the NAG formulation.
"""
defaults = {'weight_decay': None, 'momentum': None, 'dampening': 0,
'nesterov': None}
def __init__(self, params, lr, weight_decay=None,
momentum=None, dampening=0, nesterov=None):
super().__init__(
params, defaults={'lr': lr, 'weight_decay': weight_decay,
'momentum': momentum, 'dampening': dampening,
'nesterov': nesterov})
def create_buffers(self, name, param):
"""Prepares all momentum buffers for each parameter."""
state = {'step': jnp.zeros(jax.local_device_count())}
if self.get_hyper(name, 'momentum') is not None:
state['momentum'] = jnp.zeros_like(param)
return state
def update_param(self, param, grad, state, opt_params):
"""The actual update step for this optimizer."""
if param is None:
return param, state
# Apply weight decay
if opt_params.get('weight_decay') is not None:
grad = grad + param * opt_params['weight_decay']
# Update momentum buffers if needed
if 'momentum' in state:
state['momentum'] = (opt_params['momentum'] * state['momentum']
+ (1 - opt_params['dampening']) * grad)
if opt_params['nesterov'] is not None:
grad = grad + opt_params['momentum'] * state['momentum']
else:
grad = state['momentum']
state['step'] += 1
return param - opt_params['lr'] * grad, state
class Adam(Optimizer):
"""Adam optimizer, Kingma & Ba, arxiv.org/abs/1412.6980.
Args:
params (iterable): nested list of params to optimize
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (default: 0)
use_adamw (bool, optional): If not None, use decoupled weight decay
as in arxiv.org/abs/1711.05101. The paper version adds an additional
"schedule" hyperparameter eta, which we instead just replace with the
learning rate following the PyTorch implementation.
Note that this implementation will not instantiate a buffer if the
beta term for that buffer is passed in as None, thus conserving memory.
"""
defaults = {'beta1': 0.9, 'beta2': 0.999, 'weight_decay': None, 'eps': 1e-8,
'use_adamw': None}
def __init__(self, params, lr, beta1=0.9, beta2=0.999,
eps=1e-8, weight_decay=None, use_adamw=None):
super().__init__(params=params,
defaults={'lr': lr, 'beta1': beta1,
'beta2': beta2, 'eps': eps,
'weight_decay': weight_decay,
'use_adamw': use_adamw})
def create_buffers(self, name, param):
"""Prepare exp_avg and exp_avg_sq buffers."""
state = {'step': jnp.zeros(jax.local_device_count())}
if self.get_hyper(name, 'beta1') is not None:
state['exp_avg'] = jnp.zeros_like(param)
if self.get_hyper(name, 'beta2') is not None:
state['exp_avg_sq'] = jnp.zeros_like(param)
return state
def update_param(self, param, grad, state, opt_params):
"""The actual update step for this optimizer."""
if param is None:
return param, state
state['step'] = state['step'] + 1
# Apply weight decay
if opt_params.get('weight_decay') is not None:
if opt_params.get('use_adamw') is not None:
param = param * (1 - opt_params['lr'] * opt_params['weight_decay'])
else:
grad = grad + param * opt_params['weight_decay']
# First moment
if 'exp_avg' in state:
bias_correction1 = 1 - opt_params['beta1'] ** state['step']
state['exp_avg'] = (state['exp_avg'] * opt_params['beta1']
+ (1 - opt_params['beta1']) * grad)
step_size = opt_params['lr'] * state['exp_avg'] / bias_correction1
else:
step_size = opt_params['lr'] * grad
# Second moment
if 'exp_avg_sq' in state:
bias_correction2 = 1 - opt_params['beta2'] ** state['step']
state['exp_avg_sq'] = (state['exp_avg_sq'] * opt_params['beta2']
+ (1 - opt_params['beta2']) * grad * grad)
denom = jnp.sqrt(state['exp_avg_sq']) * jax.lax.rsqrt(bias_correction2)
denom = denom + opt_params['eps']
else:
denom = jnp.abs(grad) + opt_params['eps'] # Add eps to avoid divide-by-0
return param - step_size / denom, state
class RMSProp(Optimizer):
"""RMSProp optimizer, Tieleman and Hinton, ref: powerpoint slides.
Implements RMSProp as
rms = decay * rms{t-1} + (1-decay) * gradient ** 2
mom = momentum * mom{t-1} + learning_rate * g_t / sqrt(rms + epsilon)
param -= mom
Note that the rms buffer is initialized with ones as in TF, as opposed to
zeros as in all other implementations.
Args:
params (iterable): nested list of params to optimize
lr (float): learning rate (default: 1e-3)
decay (float): EMA decay rate for running estimate of squared gradient.
momentum (float or None): Use heavy ball momentum instead of instant grad.
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (NOT ADAMW (default: 0))
"""
defaults = {'weight_decay': None, 'eps': 1e-8}
def __init__(self, params, lr, decay, momentum, weight_decay=None, eps=1e-8):
super().__init__(params=params,
defaults={'lr': lr, 'decay': decay,
'momentum': momentum, 'eps': eps,
'weight_decay': weight_decay})
def create_buffers(self, name, param):
"""Prepare exp_avg and exp_avg_sq buffers."""
state = {'step': jnp.zeros(jax.local_device_count())}
state['rms'] = jnp.ones_like(param)
if self.get_hyper(name, 'momentum') is not None:
state['momentum'] = jnp.zeros_like(param)
return state
def update_param(self, param, grad, state, opt_params):
"""The actual update step for this optimizer."""
if param is None:
return param, state
state['step'] = state['step'] + 1
# Apply weight decay
if opt_params.get('weight_decay') is not None:
grad = grad + param * opt_params['weight_decay']
# EMA of the squared gradient
state['rms'] = (state['rms'] * opt_params['decay']
+ (1 - opt_params['decay']) * (grad ** 2))
scaled_grad = (opt_params['lr'] * grad
/ (state['rms'] + opt_params['eps']) ** 0.5)
if state['momentum'] is not None:
state['momentum'] = (state['momentum'] * opt_params['momentum']
+ scaled_grad)
step_size = state['momentum']
else:
step_size = scaled_grad
return param - step_size, state
class Fromage(Optimizer):
"""Fromage optimizer, Bernstein et al. arXiv.org/abs/2002.03432.
This version optionally includes weight decay.
Attributes:
params (iterable): nested list of params to optimize
lr (float): learning rate.
eps (float, optional): Minimum allowable norm. This term is required for
in case parameters are zero-initialized (default: 1e-5).
weight_decay (float, optional): weight decay (default: 0).
"""
defaults = {'weight_decay': None, 'eps': 1e-5}
def __init__(self, params, lr, weight_decay=None, eps=1e-5):
super().__init__(
params, defaults={'lr': lr, 'weight_decay': weight_decay, 'eps': eps})
def create_buffers(self, name, param): # pylint: disable=unused-argument
"""Prepares all momentum buffers for each parameter."""
return {'step': jnp.zeros(1)}
def update_param(self, param, grad, state, opt_params):
"""The actual update step for this optimizer."""
if param is None:
return param, state
if opt_params['weight_decay'] is not None:
grad = grad + param * opt_params['weight_decay']
grad_norm = jnp.maximum(jnp.linalg.norm(grad), opt_params['eps'])
param_norm = jnp.maximum(jnp.linalg.norm(param), opt_params['eps'])
mult = jax.lax.rsqrt(1 + opt_params['lr'] ** 2)
out = (param - opt_params['lr'] * grad * (param_norm / grad_norm)) * mult
return out, state
def compute_norm(x, axis, keepdims):
"""Returns norm over arbitrary axis."""
norm = jnp.sum(x ** 2, axis=axis, keepdims=keepdims) ** 0.5
return norm
def unitwise_norm(x):
"""Computes norms of each output unit separately, assuming (HW)IO weights."""
if len(jnp.squeeze(x).shape) <= 1: # Scalars and vectors
axis = None
keepdims = False
elif len(x.shape) in [2, 3]: # Linear layers of shape IO
axis = 0
keepdims = True
elif len(x.shape) == 4: # Conv kernels of shape HWIO
axis = [0, 1, 2,]
keepdims = True
else:
raise ValueError(f'Got a parameter with shape not in [1, 2, 3, 4]! {x}')
return compute_norm(x, axis, keepdims)
class SGD_AGC(Optimizer): # pylint:disable=invalid-name
"""SGD with Unit-Adaptive Gradient-Clipping.
References:
[Brock, Smith, De, Simonyan 2021] High-Performance Large-Scale Image
Recognition Without Normalization.
"""
defaults = {'weight_decay': None, 'momentum': None, 'dampening': 0,
'nesterov': None, 'clipping': 0.01, 'eps': 1e-3}
def __init__(self, params, lr, weight_decay=None,
momentum=None, dampening=0, nesterov=None,
clipping=0.01, eps=1e-3):
super().__init__(
params, defaults={'lr': lr, 'weight_decay': weight_decay,
'momentum': momentum, 'dampening': dampening,
'clipping': clipping, 'nesterov': nesterov,
'eps': eps})
def create_buffers(self, name, param):
return SGD.create_buffers(self, name, param)
def update_param(self, param, grad, state, opt_params):
"""Clips grads if necessary, then applies the optimizer update."""
if param is None:
return param, state
if opt_params['clipping'] is not None:
param_norm = jnp.maximum(unitwise_norm(param), opt_params['eps'])
grad_norm = unitwise_norm(grad)
max_norm = param_norm * opt_params['clipping']
# If grad norm > clipping * param_norm, rescale
trigger = grad_norm > max_norm
# Note the max(||G||, 1e-6) is technically unnecessary here, as
# the clipping shouldn't trigger if the grad norm is zero,
# but we include it in practice as a "just-in-case".
clipped_grad = grad * (max_norm / jnp.maximum(grad_norm, 1e-6))
grad = jnp.where(trigger, clipped_grad, grad)
return SGD.update_param(self, param, grad, state, opt_params)
class Hybrid(Optimizer):
"""Optimizer which permits passing param groups with different base opts.
The API for this class follows the case for any other optimizer where one
specifies a list of dicts with separate hyperparams, but in this case it
requires the user to also specify an 'opt' key for each group, such as e.g.
[{'params': params0, 'opt': optim.Adam, 'lr': 0.1}].
The user must also provide values for any arg in the selected optimizers which
does not have a default value associated
"""
def __init__(self, param_groups):
if any(['opt' not in group for group in param_groups]):
raise ValueError('All parameter groups must have an opt key!')
self.defaults = ChainMap(*[group['opt'].defaults for group in param_groups])
super().__init__(param_groups, defaults=dict(self.defaults))
def create_buffers(self, name, param):
return self.get_hyper(name, 'opt').create_buffers(self, name, param)
def update_param(self, param, grad, state, opt_params):
return opt_params['opt'].update_param(self, param, grad, state, opt_params)
| deepmind-research-master | nfnets/optim.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Architecture definitions for different models."""
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
# Model settings for NF-RegNets
nf_regnet_params = {
'B0': {'width': [48, 104, 208, 440], 'depth': [1, 3, 6, 6],
'train_imsize': 192, 'test_imsize': 224,
'drop_rate': 0.2},
'B1': {'width': [48, 104, 208, 440], 'depth': [2, 4, 7, 7],
'train_imsize': 224, 'test_imsize': 256,
'drop_rate': 0.2},
'B2': {'width': [56, 112, 232, 488], 'depth': [2, 4, 8, 8],
'train_imsize': 240, 'test_imsize': 272,
'drop_rate': 0.3},
'B3': {'width': [56, 128, 248, 528], 'depth': [2, 5, 9, 9],
'train_imsize': 288, 'test_imsize': 320,
'drop_rate': 0.3},
'B4': {'width': [64, 144, 288, 616], 'depth': [2, 6, 11, 11],
'train_imsize': 320, 'test_imsize': 384,
'drop_rate': 0.4},
'B5': {'width': [80, 168, 336, 704], 'depth': [3, 7, 14, 14],
'train_imsize': 384, 'test_imsize': 456,
'drop_rate': 0.4},
'B6': {'width': [88, 184, 376, 792], 'depth': [3, 8, 16, 16],
'train_imsize': 448, 'test_imsize': 528,
'drop_rate': 0.5},
'B7': {'width': [96, 208, 416, 880], 'depth': [4, 10, 19, 19],
'train_imsize': 512, 'test_imsize': 600,
'drop_rate': 0.5},
'B8': {'width': [104, 232, 456, 968], 'depth': [4, 11, 22, 22],
'train_imsize': 600, 'test_imsize': 672,
'drop_rate': 0.5},
}
nfnet_params = {}
# F-series models
nfnet_params.update(**{
'F0': {
'width': [256, 512, 1536, 1536], 'depth': [1, 2, 6, 3],
'train_imsize': 192, 'test_imsize': 256,
'RA_level': '405', 'drop_rate': 0.2},
'F1': {
'width': [256, 512, 1536, 1536], 'depth': [2, 4, 12, 6],
'train_imsize': 224, 'test_imsize': 320,
'RA_level': '410', 'drop_rate': 0.3},
'F2': {
'width': [256, 512, 1536, 1536], 'depth': [3, 6, 18, 9],
'train_imsize': 256, 'test_imsize': 352,
'RA_level': '410', 'drop_rate': 0.4},
'F3': {
'width': [256, 512, 1536, 1536], 'depth': [4, 8, 24, 12],
'train_imsize': 320, 'test_imsize': 416,
'RA_level': '415', 'drop_rate': 0.4},
'F4': {
'width': [256, 512, 1536, 1536], 'depth': [5, 10, 30, 15],
'train_imsize': 384, 'test_imsize': 512,
'RA_level': '415', 'drop_rate': 0.5},
'F5': {
'width': [256, 512, 1536, 1536], 'depth': [6, 12, 36, 18],
'train_imsize': 416, 'test_imsize': 544,
'RA_level': '415', 'drop_rate': 0.5},
'F6': {
'width': [256, 512, 1536, 1536], 'depth': [7, 14, 42, 21],
'train_imsize': 448, 'test_imsize': 576,
'RA_level': '415', 'drop_rate': 0.5},
'F7': {
'width': [256, 512, 1536, 1536], 'depth': [8, 16, 48, 24],
'train_imsize': 480, 'test_imsize': 608,
'RA_level': '415', 'drop_rate': 0.5},
})
# Minor variants FN+, slightly wider
nfnet_params.update(**{
**{f'{key}+': {**nfnet_params[key], 'width': [384, 768, 2048, 2048],}
for key in nfnet_params}
})
# Nonlinearities with magic constants (gamma) baked in.
# Note that not all nonlinearities will be stable, especially if they are
# not perfectly monotonic. Good choices include relu, silu, and gelu.
nonlinearities = {
'identity': lambda x: x,
'celu': lambda x: jax.nn.celu(x) * 1.270926833152771,
'elu': lambda x: jax.nn.elu(x) * 1.2716004848480225,
'gelu': lambda x: jax.nn.gelu(x) * 1.7015043497085571,
'glu': lambda x: jax.nn.glu(x) * 1.8484294414520264,
'leaky_relu': lambda x: jax.nn.leaky_relu(x) * 1.70590341091156,
'log_sigmoid': lambda x: jax.nn.log_sigmoid(x) * 1.9193484783172607,
'log_softmax': lambda x: jax.nn.log_softmax(x) * 1.0002083778381348,
'relu': lambda x: jax.nn.relu(x) * 1.7139588594436646,
'relu6': lambda x: jax.nn.relu6(x) * 1.7131484746932983,
'selu': lambda x: jax.nn.selu(x) * 1.0008515119552612,
'sigmoid': lambda x: jax.nn.sigmoid(x) * 4.803835391998291,
'silu': lambda x: jax.nn.silu(x) * 1.7881293296813965,
'soft_sign': lambda x: jax.nn.soft_sign(x) * 2.338853120803833,
'softplus': lambda x: jax.nn.softplus(x) * 1.9203323125839233,
'tanh': lambda x: jnp.tanh(x) * 1.5939117670059204,
}
class WSConv2D(hk.Conv2D):
"""2D Convolution with Scaled Weight Standardization and affine gain+bias."""
@hk.transparent
def standardize_weight(self, weight, eps=1e-4):
"""Apply scaled WS with affine gain."""
mean = jnp.mean(weight, axis=(0, 1, 2), keepdims=True)
var = jnp.var(weight, axis=(0, 1, 2), keepdims=True)
fan_in = np.prod(weight.shape[:-1])
# Get gain
gain = hk.get_parameter('gain', shape=(weight.shape[-1],),
dtype=weight.dtype, init=jnp.ones)
# Manually fused normalization, eq. to (w - mean) * gain / sqrt(N * var)
scale = jax.lax.rsqrt(jnp.maximum(var * fan_in, eps)) * gain
shift = mean * scale
return weight * scale - shift
def __call__(self, inputs: jnp.ndarray, eps: float = 1e-4) -> jnp.ndarray:
w_shape = self.kernel_shape + (
inputs.shape[self.channel_index] // self.feature_group_count,
self.output_channels)
# Use fan-in scaled init, but WS is largely insensitive to this choice.
w_init = hk.initializers.VarianceScaling(1.0, 'fan_in', 'normal')
w = hk.get_parameter('w', w_shape, inputs.dtype, init=w_init)
weight = self.standardize_weight(w, eps)
out = jax.lax.conv_general_dilated(
inputs, weight, window_strides=self.stride, padding=self.padding,
lhs_dilation=self.lhs_dilation, rhs_dilation=self.kernel_dilation,
dimension_numbers=self.dimension_numbers,
feature_group_count=self.feature_group_count)
# Always add bias
bias_shape = (self.output_channels,)
bias = hk.get_parameter('bias', bias_shape, inputs.dtype, init=jnp.zeros)
return out + bias
def signal_metrics(x, i):
"""Things to measure about a NCHW tensor activation."""
metrics = {}
# Average channel-wise mean-squared
metrics[f'avg_sq_mean_{i}'] = jnp.mean(jnp.mean(x, axis=[0, 1, 2])**2)
# Average channel variance
metrics[f'avg_var_{i}'] = jnp.mean(jnp.var(x, axis=[0, 1, 2]))
return metrics
def count_conv_flops(in_ch, conv, h, w):
"""For a conv layer with in_ch inputs, count the FLOPS."""
# How many outputs are we producing? Note this is wrong for VALID padding.
output_shape = conv.output_channels * (h * w) / np.prod(conv.stride)
# At each OHW location we do computation equal to (I//G) * kh * kw
flop_per_loc = (in_ch / conv.feature_group_count)
flop_per_loc *= np.prod(conv.kernel_shape)
return output_shape * flop_per_loc
class SqueezeExcite(hk.Module):
"""Simple Squeeze+Excite module."""
def __init__(self, in_ch, out_ch, se_ratio=0.5,
hidden_ch=None, activation=jax.nn.relu,
name=None):
super().__init__(name=name)
self.in_ch, self.out_ch = in_ch, out_ch
if se_ratio is None:
if hidden_ch is None:
raise ValueError('Must provide one of se_ratio or hidden_ch')
self.hidden_ch = hidden_ch
else:
self.hidden_ch = max(1, int(self.in_ch * se_ratio))
self.activation = activation
self.fc0 = hk.Linear(self.hidden_ch, with_bias=True)
self.fc1 = hk.Linear(self.out_ch, with_bias=True)
def __call__(self, x):
h = jnp.mean(x, axis=[1, 2]) # Mean pool over HW extent
h = self.fc1(self.activation(self.fc0(h)))
h = jax.nn.sigmoid(h)[:, None, None] # Broadcast along H, W
return h
class StochDepth(hk.Module):
"""Batchwise Dropout used in EfficientNet, optionally sans rescaling."""
def __init__(self, drop_rate, scale_by_keep=False, name=None):
super().__init__(name=name)
self.drop_rate = drop_rate
self.scale_by_keep = scale_by_keep
def __call__(self, x, is_training) -> jnp.ndarray:
if not is_training:
return x
batch_size = x.shape[0]
r = jax.random.uniform(hk.next_rng_key(), [batch_size, 1, 1, 1],
dtype=x.dtype)
keep_prob = 1. - self.drop_rate
binary_tensor = jnp.floor(keep_prob + r)
if self.scale_by_keep:
x = x / keep_prob
return x * binary_tensor
| deepmind-research-master | nfnets/base.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Norm-Free Nets."""
# pylint: disable=unused-import
# pylint: disable=invalid-name
import functools
import haiku as hk
import jax
import jax.numpy as jnp
import jax.random as jrandom
import numpy as np
from nfnets import base
class NFNet(hk.Module):
"""Normalizer-Free Networks with an improved architecture.
References:
[Brock, Smith, De, Simonyan 2021] High-Performance Large-Scale Image
Recognition Without Normalization.
"""
variant_dict = base.nfnet_params
def __init__(self, num_classes, variant='F0',
width=1.0, se_ratio=0.5,
alpha=0.2, stochdepth_rate=0.1, drop_rate=None,
activation='gelu', fc_init=None,
final_conv_mult=2, final_conv_ch=None,
use_two_convs=True,
name='NFNet'):
super().__init__(name=name)
self.num_classes = num_classes
self.variant = variant
self.width = width
self.se_ratio = se_ratio
# Get variant info
block_params = self.variant_dict[self.variant]
self.train_imsize = block_params['train_imsize']
self.test_imsize = block_params['test_imsize']
self.width_pattern = block_params['width']
self.depth_pattern = block_params['depth']
self.bneck_pattern = block_params.get('expansion', [0.5] * 4)
self.group_pattern = block_params.get('group_width', [128] * 4)
self.big_pattern = block_params.get('big_width', [True] * 4)
self.activation = base.nonlinearities[activation]
if drop_rate is None:
self.drop_rate = block_params['drop_rate']
else:
self.drop_rate = drop_rate
self.which_conv = base.WSConv2D
# Stem
ch = self.width_pattern[0] // 2
self.stem = hk.Sequential([
self.which_conv(16, kernel_shape=3, stride=2,
padding='SAME', name='stem_conv0'),
self.activation,
self.which_conv(32, kernel_shape=3, stride=1,
padding='SAME', name='stem_conv1'),
self.activation,
self.which_conv(64, kernel_shape=3, stride=1,
padding='SAME', name='stem_conv2'),
self.activation,
self.which_conv(ch, kernel_shape=3, stride=2,
padding='SAME', name='stem_conv3'),
])
# Body
self.blocks = []
expected_std = 1.0
num_blocks = sum(self.depth_pattern)
index = 0 # Overall block index
stride_pattern = [1, 2, 2, 2]
block_args = zip(self.width_pattern, self.depth_pattern, self.bneck_pattern,
self.group_pattern, self.big_pattern, stride_pattern)
for (block_width, stage_depth, expand_ratio,
group_size, big_width, stride) in block_args:
for block_index in range(stage_depth):
# Scalar pre-multiplier so each block sees an N(0,1) input at init
beta = 1./ expected_std
# Block stochastic depth drop-rate
block_stochdepth_rate = stochdepth_rate * index / num_blocks
out_ch = (int(block_width * self.width))
self.blocks += [NFBlock(ch, out_ch,
expansion=expand_ratio, se_ratio=se_ratio,
group_size=group_size,
stride=stride if block_index == 0 else 1,
beta=beta, alpha=alpha,
activation=self.activation,
which_conv=self.which_conv,
stochdepth_rate=block_stochdepth_rate,
big_width=big_width,
use_two_convs=use_two_convs,
)]
ch = out_ch
index += 1
# Reset expected std but still give it 1 block of growth
if block_index == 0:
expected_std = 1.0
expected_std = (expected_std **2 + alpha**2)**0.5
# Head
if final_conv_mult is None:
if final_conv_ch is None:
raise ValueError('Must provide one of final_conv_mult or final_conv_ch')
ch = final_conv_ch
else:
ch = int(final_conv_mult * ch)
self.final_conv = self.which_conv(ch, kernel_shape=1,
padding='SAME', name='final_conv')
# By default, initialize with N(0, 0.01)
if fc_init is None:
fc_init = hk.initializers.RandomNormal(mean=0, stddev=0.01)
self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True)
def __call__(self, x, is_training=True, return_metrics=False):
"""Return the output of the final layer without any [log-]softmax."""
# Stem
outputs = {}
out = self.stem(x)
if return_metrics:
outputs.update(base.signal_metrics(out, 0))
# Blocks
for i, block in enumerate(self.blocks):
out, res_avg_var = block(out, is_training=is_training)
if return_metrics:
outputs.update(base.signal_metrics(out, i + 1))
outputs[f'res_avg_var_{i}'] = res_avg_var
# Final-conv->activation, pool, dropout, classify
out = self.activation(self.final_conv(out))
pool = jnp.mean(out, [1, 2])
outputs['pool'] = pool
# Optionally apply dropout
if self.drop_rate > 0.0 and is_training:
pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool)
outputs['logits'] = self.fc(pool)
return outputs
def count_flops(self, h, w):
flops = []
ch = 3
for module in self.stem.layers:
if isinstance(module, hk.Conv2D):
flops += [base.count_conv_flops(ch, module, h, w)]
if any([item > 1 for item in module.stride]):
h, w = h / module.stride[0], w / module.stride[1]
ch = module.output_channels
# Body FLOPs
for block in self.blocks:
flops += [block.count_flops(h, w)]
if block.stride > 1:
h, w = h / block.stride, w / block.stride
# Head module FLOPs
out_ch = self.blocks[-1].out_ch
flops += [base.count_conv_flops(out_ch, self.final_conv, h, w)]
# Count flops for classifier
flops += [self.final_conv.output_channels * self.fc.output_size]
return flops, sum(flops)
class NFBlock(hk.Module):
"""Normalizer-Free Net Block."""
def __init__(self, in_ch, out_ch, expansion=0.5, se_ratio=0.5,
kernel_shape=3, group_size=128, stride=1,
beta=1.0, alpha=0.2,
which_conv=base.WSConv2D, activation=jax.nn.gelu,
big_width=True, use_two_convs=True,
stochdepth_rate=None, name=None):
super().__init__(name=name)
self.in_ch, self.out_ch = in_ch, out_ch
self.expansion = expansion
self.se_ratio = se_ratio
self.kernel_shape = kernel_shape
self.activation = activation
self.beta, self.alpha = beta, alpha
# Mimic resnet style bigwidth scaling?
width = int((self.out_ch if big_width else self.in_ch) * expansion)
# Round expanded with based on group count
self.groups = width // group_size
self.width = group_size * self.groups
self.stride = stride
self.use_two_convs = use_two_convs
# Conv 0 (typically expansion conv)
self.conv0 = which_conv(self.width, kernel_shape=1, padding='SAME',
name='conv0')
# Grouped NxN conv
self.conv1 = which_conv(self.width, kernel_shape=kernel_shape,
stride=stride, padding='SAME',
feature_group_count=self.groups, name='conv1')
if self.use_two_convs:
self.conv1b = which_conv(self.width, kernel_shape=kernel_shape,
stride=1, padding='SAME',
feature_group_count=self.groups, name='conv1b')
# Conv 2, typically projection conv
self.conv2 = which_conv(self.out_ch, kernel_shape=1, padding='SAME',
name='conv2')
# Use shortcut conv on channel change or downsample.
self.use_projection = stride > 1 or self.in_ch != self.out_ch
if self.use_projection:
self.conv_shortcut = which_conv(self.out_ch, kernel_shape=1,
padding='SAME', name='conv_shortcut')
# Squeeze + Excite Module
self.se = base.SqueezeExcite(self.out_ch, self.out_ch, self.se_ratio)
# Are we using stochastic depth?
self._has_stochdepth = (stochdepth_rate is not None and
stochdepth_rate > 0. and stochdepth_rate < 1.0)
if self._has_stochdepth:
self.stoch_depth = base.StochDepth(stochdepth_rate)
def __call__(self, x, is_training):
out = self.activation(x) * self.beta
if self.stride > 1: # Average-pool downsample.
shortcut = hk.avg_pool(out, window_shape=(1, 2, 2, 1),
strides=(1, 2, 2, 1), padding='SAME')
if self.use_projection:
shortcut = self.conv_shortcut(shortcut)
elif self.use_projection:
shortcut = self.conv_shortcut(out)
else:
shortcut = x
out = self.conv0(out)
out = self.conv1(self.activation(out))
if self.use_two_convs:
out = self.conv1b(self.activation(out))
out = self.conv2(self.activation(out))
out = (self.se(out) * 2) * out # Multiply by 2 for rescaling
# Get average residual standard deviation for reporting metrics.
res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2]))
# Apply stochdepth if applicable.
if self._has_stochdepth:
out = self.stoch_depth(out, is_training)
# SkipInit Gain
out = out * hk.get_parameter('skip_gain', (), out.dtype, init=jnp.zeros)
return out * self.alpha + shortcut, res_avg_var
def count_flops(self, h, w):
# Count conv FLOPs based on input HW
expand_flops = base.count_conv_flops(self.in_ch, self.conv0, h, w)
# If block is strided we decrease resolution here.
dw_flops = base.count_conv_flops(self.width, self.conv1, h, w)
if self.stride > 1:
h, w = h / self.stride, w / self.stride
if self.use_two_convs:
dw_flops += base.count_conv_flops(self.width, self.conv1b, h, w)
if self.use_projection:
sc_flops = base.count_conv_flops(self.in_ch, self.conv_shortcut, h, w)
else:
sc_flops = 0
# SE flops happen on avg-pooled activations
se_flops = self.se.fc0.output_size * self.out_ch
se_flops += self.se.fc0.output_size * self.se.fc1.output_size
contract_flops = base.count_conv_flops(self.width, self.conv2, h, w)
return sum([expand_flops, dw_flops, se_flops, contract_flops, sc_flops])
| deepmind-research-master | nfnets/nfnet.py |
# Copyright 2021 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config, focused on model evaluation."""
from ml_collections import config_dict
def get_config(filter_time_intervals=None):
"""Return config object for training."""
config = config_dict.ConfigDict()
config.eval_strategy = config_dict.ConfigDict()
config.eval_strategy.class_name = 'OneDeviceConfig'
config.eval_strategy.kwargs = config_dict.ConfigDict(
dict(device_type='v100'))
## Experiment config.
config.experiment_kwargs = config_dict.ConfigDict(dict(
resnet_kwargs=dict(
blocks_per_group_list=[3, 4, 6, 3], # This choice is ResNet50.
bn_config=dict(
decay_rate=0.9,
eps=1e-5),
resnet_v2=False,
additional_features_mode='mlp',
),
optimizer_config=dict(
class_name='Momentum',
kwargs={'momentum': 0.9},
# Set up the learning rate schedule.
lr_init=0.025,
lr_factor=0.1,
lr_schedule=(50e3, 100e3, 150e3),
gradient_clip=5.,
),
l2_regularization=1e-4,
total_train_batch_size=128,
train_net_args={'is_training': True},
eval_batch_size=128,
eval_net_args={'is_training': True},
data_config=dict(
# dataset loading
dataset_path=None,
num_val_splits=10,
val_split=0,
# image cropping
image_size=(80, 80, 7),
train_crop_type='crop_fixed',
test_crop_type='crop_fixed',
n_crop_repeat=1,
train_augmentations=dict(
rotation_and_flip=True,
rescaling=True,
translation=True,
),
test_augmentations=dict(
rotation_and_flip=False,
rescaling=False,
translation=False,
),
test_time_ensembling='sum',
num_eval_buckets=5,
eval_confidence_interval=95,
task='grounded_unnormalized_regression',
loss_config=dict(
loss='mse',
mse_normalize=False,
),
model_uncertainty=True,
additional_features='',
time_filter_intervals=filter_time_intervals,
class_boundaries={
'0': [[-1., 0]],
'1': [[0, 1.]]
},
frequencies_to_use='all',
),
n_train_epochs=100
))
return config
| deepmind-research-master | galaxy_mergers/config.py |
# Copyright 2021 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers to visualize gradients and other interpretability analysis."""
import numpy as np
import tensorflow.compat.v2 as tf
def rotate_by_right_angle_multiple(image, rot=90):
"""Rotate an image by right angles."""
if rot not in [0, 90, 180, 270]:
raise ValueError(f"Cannot rotate by non-90 degree angle {rot}")
if rot in [90, -270]:
image = np.transpose(image, (1, 0, 2))
image = image[::-1]
elif rot in [180, -180]:
image = image[::-1, ::-1]
elif rot in [270, -90]:
image = np.transpose(image, (1, 0, 2))
image = image[:, ::-1]
return image
def compute_gradient(images, evaluator, is_training=False):
inputs = tf.Variable(images[None], dtype=tf.float32)
with tf.GradientTape() as tape:
tape.watch(inputs)
time_sigma = evaluator.model(inputs, None, is_training)
grad_time = tape.gradient(time_sigma[:, 0], inputs)
return grad_time, time_sigma
def compute_grads_for_rotations(images, evaluator, is_training=False):
test_gradients, test_outputs = [], []
for rotation in np.arange(0, 360, 90):
images_rot = rotate_by_right_angle_multiple(images, rotation)
grads, time_sigma = compute_gradient(images_rot, evaluator, is_training)
grads = np.squeeze(grads.numpy())
inv_grads = rotate_by_right_angle_multiple(grads, -rotation)
test_gradients.append(inv_grads)
test_outputs.append(time_sigma.numpy())
return np.squeeze(test_gradients), np.squeeze(test_outputs)
def compute_grads_for_rotations_and_flips(images, evaluator):
grads, time_sigma = compute_grads_for_rotations(images, evaluator)
grads_f, time_sigma_f = compute_grads_for_rotations(images[::-1], evaluator)
grads_f = grads_f[:, ::-1]
all_grads = np.concatenate([grads, grads_f], 0)
model_outputs = np.concatenate((time_sigma, time_sigma_f), 0)
return all_grads, model_outputs
| deepmind-research-master | galaxy_mergers/interpretability_helpers.py |
# Copyright 2021 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fork of a generic ResNet to incorporate additional cosmological features."""
from typing import Mapping, Optional, Sequence, Text
import sonnet.v2 as snt
import tensorflow.compat.v2 as tf
class ResNet(snt.Module):
"""ResNet model."""
def __init__(self,
n_repeats: int,
blocks_per_group_list: Sequence[int],
num_classes: int,
bn_config: Optional[Mapping[Text, float]] = None,
resnet_v2: bool = False,
channels_per_group_list: Sequence[int] = (256, 512, 1024, 2048),
use_additional_features: bool = False,
additional_features_mode: Optional[Text] = "per_block",
name: Optional[Text] = None):
"""Constructs a ResNet model.
Args:
n_repeats: The batch dimension for the input is expected to have the form
`B = b * n_repeats`. After the conv stack, the logits for the
`n_repeats` replicas are reduced, leading to an output batch dimension
of `b`.
blocks_per_group_list: A sequence of length 4 that indicates the number of
blocks created in each group.
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, `decay_rate` and `eps` to be
passed on to the `BatchNorm` layers. By default the `decay_rate` is
`0.9` and `eps` is `1e-5`.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
False.
channels_per_group_list: A sequence of length 4 that indicates the number
of channels used for each block in each group.
use_additional_features: If true, additional vector features will be
concatenated to the residual stack before logits are computed.
additional_features_mode: Mode for processing additional features.
Supported modes: 'mlp' and 'per_block'.
name: Name of the module.
"""
super(ResNet, self).__init__(name=name)
self._n_repeats = n_repeats
if bn_config is None:
bn_config = {"decay_rate": 0.9, "eps": 1e-5}
self._bn_config = bn_config
self._resnet_v2 = resnet_v2
# Number of blocks in each group for ResNet.
if len(blocks_per_group_list) != 4:
raise ValueError(
"`blocks_per_group_list` must be of length 4 not {}".format(
len(blocks_per_group_list)))
self._blocks_per_group_list = blocks_per_group_list
# Number of channels in each group for ResNet.
if len(channels_per_group_list) != 4:
raise ValueError(
"`channels_per_group_list` must be of length 4 not {}".format(
len(channels_per_group_list)))
self._channels_per_group_list = channels_per_group_list
self._use_additional_features = use_additional_features
self._additional_features_mode = additional_features_mode
self._initial_conv = snt.Conv2D(
output_channels=64,
kernel_shape=7,
stride=2,
with_bias=False,
padding="SAME",
name="initial_conv")
if not self._resnet_v2:
self._initial_batchnorm = snt.BatchNorm(
create_scale=True,
create_offset=True,
name="initial_batchnorm",
**bn_config)
self._block_groups = []
strides = [1, 2, 2, 2]
for i in range(4):
self._block_groups.append(
snt.nets.resnet.BlockGroup(
channels=self._channels_per_group_list[i],
num_blocks=self._blocks_per_group_list[i],
stride=strides[i],
bn_config=bn_config,
resnet_v2=resnet_v2,
name="block_group_%d" % (i)))
if self._resnet_v2:
self._final_batchnorm = snt.BatchNorm(
create_scale=True,
create_offset=True,
name="final_batchnorm",
**bn_config)
self._logits = snt.Linear(
output_size=num_classes,
w_init=snt.initializers.VarianceScaling(scale=2.0), name="logits")
if self._use_additional_features:
self._embedding = LinearBNReLU(output_size=16, name="embedding",
**bn_config)
if self._additional_features_mode == "mlp":
self._feature_repr = LinearBNReLU(
output_size=self._channels_per_group_list[-1], name="features_repr",
**bn_config)
elif self._additional_features_mode == "per_block":
self._feature_repr = []
for i, ch in enumerate(self._channels_per_group_list):
self._feature_repr.append(
LinearBNReLU(output_size=ch, name=f"features_{i}", **bn_config))
else:
raise ValueError(f"Unsupported addiitonal features mode: "
f"{additional_features_mode}")
def __call__(self, inputs, features, is_training):
net = inputs
net = self._initial_conv(net)
if not self._resnet_v2:
net = self._initial_batchnorm(net, is_training=is_training)
net = tf.nn.relu(net)
net = tf.nn.max_pool2d(
net, ksize=3, strides=2, padding="SAME", name="initial_max_pool")
if self._use_additional_features:
assert features is not None
features = self._embedding(features, is_training=is_training)
for i, block_group in enumerate(self._block_groups):
net = block_group(net, is_training)
if (self._use_additional_features and
self._additional_features_mode == "per_block"):
features_i = self._feature_repr[i](features, is_training=is_training)
# support for n_repeats > 1
features_i = tf.repeat(features_i, self._n_repeats, axis=0)
net += features_i[:, None, None, :] # expand to spacial resolution
if self._resnet_v2:
net = self._final_batchnorm(net, is_training=is_training)
net = tf.nn.relu(net)
net = tf.reduce_mean(net, axis=[1, 2], name="final_avg_pool")
# Re-split the batch dimension
net = tf.reshape(net, [-1, self._n_repeats] + net.shape.as_list()[1:])
# Average over the various repeats of the input (e.g. those could have
# corresponded to different crops).
net = tf.reduce_mean(net, axis=1)
if (self._use_additional_features and
self._additional_features_mode == "mlp"):
net += self._feature_repr(features, is_training=is_training)
return self._logits(net)
class LinearBNReLU(snt.Module):
"""Wrapper class for Linear layer with Batch Norm and ReLU activation."""
def __init__(self, output_size=64,
w_init=snt.initializers.VarianceScaling(scale=2.0),
name="linear", **bn_config):
"""Constructs a LinearBNReLU module.
Args:
output_size: Output dimension.
w_init: weight Initializer for snt.Linear.
name: Name of the module.
**bn_config: Optional parameters to be passed to snt.BatchNorm.
"""
super(LinearBNReLU, self).__init__(name=name)
self._linear = snt.Linear(output_size=output_size, w_init=w_init,
name=f"{name}_linear")
self._bn = snt.BatchNorm(create_scale=True, create_offset=True,
name=f"{name}_bn", **bn_config)
def __call__(self, x, is_training):
x = self._linear(x)
x = self._bn(x, is_training=is_training)
return tf.nn.relu(x)
| deepmind-research-master | galaxy_mergers/model.py |
# Copyright 2021 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers to pre-process Antennae galaxy images."""
import collections
import os
from astropy.io import fits
import numpy as np
from scipy import ndimage
import tensorflow.compat.v2 as tf
def norm_antennae_images(images, scale=1000):
return tf.math.asinh(images/scale)
def renorm_antennae(images):
median = np.percentile(images.numpy().flatten(), 50)
img_range = np.ptp(images.numpy().flatten())
return (images - median) / (img_range / 2)
def get_antennae_images(antennae_fits_dir):
"""Load the raw Antennae galaxy images."""
all_fits_files = [
os.path.join(antennae_fits_dir, f)
for f in os.listdir(antennae_fits_dir)
]
freq_mapping = {'red': 160, 'blue': 850}
paired_fits_files = collections.defaultdict(list)
for f in all_fits_files:
redshift = float(f[-8:-5])
paired_fits_files[redshift].append(f)
for redshift, files in paired_fits_files.items():
paired_fits_files[redshift] = sorted(
files, key=lambda f: freq_mapping[f.split('/')[-1].split('_')[0]])
print('Reading files:', paired_fits_files)
print('Redshifts:', sorted(paired_fits_files.keys()))
galaxy_views = collections.defaultdict(list)
for redshift in paired_fits_files:
for view_path in paired_fits_files[redshift]:
with open(view_path, 'rb') as f:
fits_data = fits.open(f)
galaxy_views[redshift].append(np.array(fits_data[0].data))
batched_images = []
for redshift in paired_fits_files:
img = tf.constant(np.array(galaxy_views[redshift]))
img = tf.transpose(img, (1, 2, 0))
img = tf.image.resize(img, size=(60, 60))
batched_images.append(img)
return tf.stack(batched_images)
def preprocess_antennae_images(antennae_images):
"""Pre-process the Antennae galaxy images into a reasonable range."""
rotated_antennae_images = [
ndimage.rotate(img, 10, reshape=True, cval=-1)[10:-10, 10:-10]
for img in antennae_images
]
rotated_antennae_images = [
np.clip(img, 0, 1e9) for img in rotated_antennae_images
]
rotated_antennae_images = tf.stack(rotated_antennae_images)
normed_antennae_images = norm_antennae_images(rotated_antennae_images)
normed_antennae_images = tf.clip_by_value(normed_antennae_images, 1, 4.5)
renormed_antennae_images = renorm_antennae(normed_antennae_images)
return renormed_antennae_images
| deepmind-research-master | galaxy_mergers/antennae_helpers.py |
# Copyright 2021 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pre-processing functions for input data."""
import functools
from absl import logging
import tensorflow.compat.v2 as tf
from galaxy_mergers import losses
CROP_TYPE_NONE = 'crop_none'
CROP_TYPE_FIXED = 'crop_fixed'
CROP_TYPE_RANDOM = 'crop_random'
DATASET_FREQUENCY_MEAN = 4.0
DATASET_FREQUENCY_RANGE = 8.0
PHYSICAL_FEATURES_MIN_MAX = {
'redshift': (0.572788, 2.112304),
'mass': (9.823963, 10.951282)
}
ALL_FREQUENCIES = [105, 125, 160, 435, 606, 775, 850]
VALID_ADDITIONAL_FEATURES = ['redshift', 'sequence_average_redshift', 'mass']
def _make_padding_sizes(pad_size, random_centering):
if random_centering:
pad_size_left = tf.random.uniform(
shape=[], minval=0, maxval=pad_size+1, dtype=tf.int32)
else:
pad_size_left = pad_size // 2
pad_size_right = pad_size - pad_size_left
return pad_size_left, pad_size_right
def resize_and_pad(image, target_size, random_centering):
"""Resize image to target_size (<= image.size) and pad to original size."""
original_shape = image.shape
size = tf.reshape(target_size, [1])
size = tf.concat([size, size], axis=0)
image = tf.image.resize(image, size=size)
pad_size = original_shape[1] - target_size
pad_size_left, pad_size_right = _make_padding_sizes(
pad_size, random_centering)
padding = [[pad_size_left, pad_size_right],
[pad_size_left, pad_size_right], [0, 0]]
if len(original_shape) == 4:
padding = [[0, 0]] + padding
image = tf.pad(image, padding)
image.set_shape(original_shape)
return image
def resize_and_extract(image, target_size, random_centering):
"""Upscale image to target_size (>image.size), extract original size crop."""
original_shape = image.shape
size = tf.reshape(target_size, [1])
size = tf.concat([size, size], axis=0)
image = tf.image.resize(image, size=size)
pad_size = target_size - original_shape[1]
pad_size_left, pad_size_right = _make_padding_sizes(
pad_size, random_centering)
if len(original_shape) == 3:
image = tf.expand_dims(image, 0)
image = tf.cond(pad_size_right > 0,
lambda: image[:, pad_size_left:-pad_size_right, :, :],
lambda: image[:, pad_size_left:, :, :])
image = tf.cond(pad_size_right > 0,
lambda: image[:, :, pad_size_left:-pad_size_right, :],
lambda: image[:, :, pad_size_left:, :])
if len(original_shape) == 3:
image = tf.squeeze(image, 0)
image.set_shape(original_shape)
return image
def resize_and_center(image, target_size, random_centering):
return tf.cond(
tf.math.less_equal(target_size, image.shape[1]),
lambda: resize_and_pad(image, target_size, random_centering),
lambda: resize_and_extract(image, target_size, random_centering))
def random_rotation_and_flip(image):
angle = tf.random.uniform(shape=[], minval=0, maxval=4, dtype=tf.int32)
return tf.image.random_flip_left_right(tf.image.rot90(image, angle))
def get_all_rotations_and_flips(images):
assert isinstance(images, list)
new_images = []
for image in images:
for rotation in range(4):
new_images.append(tf.image.rot90(image, rotation))
flipped_image = tf.image.flip_left_right(image)
new_images.append(tf.image.rot90(flipped_image, rotation))
return new_images
def random_rescaling(image, random_centering):
assert image.shape.as_list()[0] == image.shape.as_list()[1]
original_size = image.shape.as_list()[1]
min_size = 2 * (original_size // 4)
max_size = original_size * 2
target_size = tf.random.uniform(
shape=[], minval=min_size, maxval=max_size // 2,
dtype=tf.int32) * 2
return resize_and_center(image, target_size, random_centering)
def get_all_rescalings(images, image_width, random_centering):
"""Get a uniform sample of rescalings of all images in input."""
assert isinstance(images, list)
min_size = 2 * (image_width // 4)
max_size = image_width * 2
delta_size = (max_size + 2 - min_size) // 5
sizes = range(min_size, max_size + 2, delta_size)
new_images = []
for image in images:
for size in sizes:
new_images.append(resize_and_center(image, size, random_centering))
return new_images
def move_repeats_to_batch(image, n_repeats):
width, height, n_channels = image.shape.as_list()[1:]
image = tf.reshape(image, [-1, width, height, n_channels, n_repeats])
image = tf.transpose(image, [0, 4, 1, 2, 3]) # [B, repeats, x, y, c]
return tf.reshape(image, [-1, width, height, n_channels])
def get_classification_label(dataset_row, class_boundaries):
merge_time = dataset_row['grounded_normalized_time']
label = tf.dtypes.cast(0, tf.int64)
for category, intervals in class_boundaries.items():
for interval in intervals:
if merge_time > interval[0] and merge_time < interval[1]:
label = tf.dtypes.cast(int(category), tf.int64)
return label
def get_regression_label(dataset_row, task_type):
"""Returns time-until-merger regression target given desired modeling task."""
if task_type == losses.TASK_NORMALIZED_REGRESSION:
return tf.dtypes.cast(dataset_row['normalized_time'], tf.float32)
elif task_type == losses.TASK_GROUNDED_UNNORMALIZED_REGRESSION:
return tf.dtypes.cast(dataset_row['grounded_normalized_time'], tf.float32)
elif task_type == losses.TASK_UNNORMALIZED_REGRESSION:
return tf.dtypes.cast(dataset_row['unnormalized_time'], tf.float32)
elif task_type == losses.TASK_CLASSIFICATION:
return tf.dtypes.cast(dataset_row['grounded_normalized_time'], tf.float32)
else:
raise ValueError
def get_normalized_time_target(dataset_row):
return tf.dtypes.cast(dataset_row['normalized_time'], tf.float32)
def apply_time_filter(dataset_row, time_interval):
"""Returns True if data is within the given time intervals."""
merge_time = dataset_row['grounded_normalized_time']
lower_time, upper_time = time_interval
return merge_time > lower_time and merge_time < upper_time
def normalize_physical_feature(name, dataset_row):
min_feat, max_feat = PHYSICAL_FEATURES_MIN_MAX[name]
value = getattr(dataset_row, name)
return 2 * (value - min_feat) / (max_feat - min_feat) - 1
def prepare_dataset(ds, target_size, crop_type, n_repeats, augmentations,
task_type, additional_features, class_boundaries,
time_intervals=None, frequencies_to_use='all',
additional_lambdas=None):
"""Prepare a zipped dataset of image, classification/regression labels."""
def _prepare_image(dataset_row):
"""Transpose, crop and cast an image."""
image = tf.dtypes.cast(dataset_row['image'], tf.float32)
image = tf.reshape(image, tf.cast(dataset_row['image_shape'], tf.int32))
image = tf.transpose(image, perm=[1, 2, 0]) # Convert to NHWC
freqs = ALL_FREQUENCIES if frequencies_to_use == 'all' else frequencies_to_use
idxs_to_keep = [ALL_FREQUENCIES.index(f) for f in freqs]
image = tf.gather(params=image, indices=idxs_to_keep, axis=-1)
# Based on offline computation on the empirical frequency range:
# Converts [0, 8.] ~~> [-1, 1]
image = (image - DATASET_FREQUENCY_MEAN)/(DATASET_FREQUENCY_RANGE/2.0)
def crop(image):
if crop_type == CROP_TYPE_FIXED:
crop_loc = tf.cast(dataset_row['proposed_crop'][0], tf.int32)
crop_size = tf.cast(dataset_row['proposed_crop'][1], tf.int32)
image = image[
crop_loc[0]:crop_loc[0] + crop_size[0],
crop_loc[1]:crop_loc[1] + crop_size[1], :]
image = tf.image.resize(image, target_size[0:2])
image.set_shape([target_size[0], target_size[1], target_size[2]])
elif crop_type == CROP_TYPE_RANDOM:
image = tf.image.random_crop(image, target_size)
image.set_shape([target_size[0], target_size[1], target_size[2]])
elif crop_type != CROP_TYPE_NONE:
raise NotImplementedError
return image
repeated_images = []
for _ in range(n_repeats):
repeated_images.append(crop(image))
image = tf.concat(repeated_images, axis=-1)
if augmentations['rotation_and_flip']:
image = random_rotation_and_flip(image)
if augmentations['rescaling']:
image = random_rescaling(image, augmentations['translation'])
return image
def get_regression_label_wrapper(dataset_row):
return get_regression_label(dataset_row, task_type=task_type)
def get_classification_label_wrapper(dataset_row):
return get_classification_label(dataset_row,
class_boundaries=class_boundaries)
if time_intervals:
for time_interval in time_intervals:
filter_fn = functools.partial(apply_time_filter,
time_interval=time_interval)
ds = ds.filter(filter_fn)
datasets = [ds.map(_prepare_image)]
if additional_features:
additional_features = additional_features.split(',')
assert all([f in VALID_ADDITIONAL_FEATURES for f in additional_features])
logging.info('Running with additional features: %s.',
', '.join(additional_features))
def _prepare_additional_features(dataset_row):
features = []
for f in additional_features:
features.append(normalize_physical_feature(f, dataset_row))
features = tf.convert_to_tensor(features, dtype=tf.float32)
features.set_shape([len(additional_features)])
return features
datasets += [ds.map(_prepare_additional_features)]
datasets += [
ds.map(get_classification_label_wrapper),
ds.map(get_regression_label_wrapper),
ds.map(get_normalized_time_target)]
if additional_lambdas:
for process_fn in additional_lambdas:
datasets += [ds.map(process_fn)]
return tf.data.Dataset.zip(tuple(datasets))
| deepmind-research-master | galaxy_mergers/preprocessing.py |
# Copyright 2021 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers to compute loss metrics."""
import scipy.stats
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
TASK_CLASSIFICATION = 'classification'
TASK_NORMALIZED_REGRESSION = 'normalized_regression'
TASK_UNNORMALIZED_REGRESSION = 'unnormalized_regression'
TASK_GROUNDED_UNNORMALIZED_REGRESSION = 'grounded_unnormalized_regression'
REGRESSION_TASKS = [TASK_NORMALIZED_REGRESSION, TASK_UNNORMALIZED_REGRESSION,
TASK_GROUNDED_UNNORMALIZED_REGRESSION]
ALL_TASKS = [TASK_CLASSIFICATION] + REGRESSION_TASKS
LOSS_MSE = 'mse'
LOSS_SOFTMAX_CROSS_ENTROPY = 'softmax_cross_entropy'
ALL_LOSSES = [LOSS_SOFTMAX_CROSS_ENTROPY, LOSS_MSE]
def normalize_regression_loss(regression_loss, predictions):
# Normalize loss such that:
# 1) E_{x uniform}[loss(x, prediction)] does not depend on prediction
# 2) E_{x uniform, prediction uniform}[loss(x, prediction)] is as before.
# Divides MSE regression loss by E[(prediction-x)^2]; assumes x=[-1,1]
normalization = 2./3.
normalized_loss = regression_loss / ((1./3 + predictions**2) / normalization)
return normalized_loss
def equal32(x, y):
return tf.cast(tf.equal(x, y), tf.float32)
def mse_loss(predicted, targets):
return (predicted - targets) ** 2
def get_std_factor_from_confidence_percent(percent):
dec = percent/100.
inv_dec = 1 - dec
return scipy.stats.norm.ppf(dec+inv_dec/2)
def get_all_metric_names(task_type, model_uncertainty, loss_config, # pylint: disable=unused-argument
mode='eval', return_dict=True):
"""Get all the scalar fields produced by compute_loss_and_metrics."""
names = ['regularization_loss', 'prediction_accuracy', str(mode)+'_loss']
if task_type == TASK_CLASSIFICATION:
names += ['classification_loss']
else:
names += ['regression_loss', 'avg_mu', 'var_mu']
if model_uncertainty:
names += ['uncertainty_loss', 'scaled_regression_loss',
'uncertainty_plus_scaled_regression',
'avg_sigma', 'var_sigma',
'percent_in_conf_interval', 'error_sigma_correlation',
'avg_prob']
if return_dict:
return {name: 0. for name in names}
else:
return names
def compute_loss_and_metrics(mu, log_sigma_sq,
regression_targets, labels,
task_type, model_uncertainty, loss_config,
regularization_loss=0., confidence_interval=95,
mode='train'):
"""Computes loss statistics and other metrics."""
scalars_to_log = dict()
vectors_to_log = dict()
scalars_to_log['regularization_loss'] = regularization_loss
vectors_to_log['mu'] = mu
if task_type == TASK_CLASSIFICATION:
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=mu, labels=labels, name='cross_entropy')
classification_loss = tf.reduce_mean(cross_entropy, name='class_loss')
total_loss = classification_loss
sigma = None
scalars_to_log['classification_loss'] = classification_loss
predicted_labels = tf.argmax(mu, axis=1)
correct_predictions = equal32(predicted_labels, labels)
else:
regression_loss = mse_loss(mu, regression_targets)
if 'mse_normalize' in loss_config and loss_config['mse_normalize']:
assert task_type in [TASK_GROUNDED_UNNORMALIZED_REGRESSION,
TASK_NORMALIZED_REGRESSION]
regression_loss = normalize_regression_loss(regression_loss, mu)
avg_regression_loss = tf.reduce_mean(regression_loss)
vectors_to_log['regression_loss'] = regression_loss
scalars_to_log['regression_loss'] = avg_regression_loss
scalars_to_log['avg_mu'] = tf.reduce_mean(mu)
scalars_to_log['var_mu'] = tf.reduce_mean(mse_loss(mu, tf.reduce_mean(mu)))
predicted_labels = tf.cast(mu > 0, tf.int64)
correct_predictions = equal32(predicted_labels, labels)
if model_uncertainty:
# This implements Eq. (1) in https://arxiv.org/pdf/1612.01474.pdf
inv_sigma_sq = tf.math.exp(-log_sigma_sq)
scaled_regression_loss = regression_loss * inv_sigma_sq
scaled_regression_loss = tf.reduce_mean(scaled_regression_loss)
uncertainty_loss = tf.reduce_mean(log_sigma_sq)
total_loss = uncertainty_loss + scaled_regression_loss
scalars_to_log['uncertainty_loss'] = uncertainty_loss
scalars_to_log['scaled_regression_loss'] = scaled_regression_loss
scalars_to_log['uncertainty_plus_scaled_regression'] = total_loss
sigma = tf.math.exp(log_sigma_sq / 2.)
vectors_to_log['sigma'] = sigma
scalars_to_log['avg_sigma'] = tf.reduce_mean(sigma)
var_sigma = tf.reduce_mean(mse_loss(sigma, tf.reduce_mean(sigma)))
scalars_to_log['var_sigma'] = var_sigma
# Compute # of labels that fall into the confidence interval.
std_factor = get_std_factor_from_confidence_percent(confidence_interval)
lower_bound = mu - std_factor * sigma
upper_bound = mu + std_factor * sigma
preds = tf.logical_and(tf.greater(regression_targets, lower_bound),
tf.less(regression_targets, upper_bound))
percent_in_conf_interval = tf.reduce_mean(tf.cast(preds, tf.float32))
scalars_to_log['percent_in_conf_interval'] = percent_in_conf_interval*100
error_sigma_corr = tfp.stats.correlation(x=regression_loss,
y=sigma, event_axis=None)
scalars_to_log['error_sigma_correlation'] = error_sigma_corr
dists = tfp.distributions.Normal(mu, sigma)
probs = dists.prob(regression_targets)
scalars_to_log['avg_prob'] = tf.reduce_mean(probs)
else:
total_loss = avg_regression_loss
loss_name = str(mode)+'_loss'
total_loss = tf.add(total_loss, regularization_loss, name=loss_name)
scalars_to_log[loss_name] = total_loss
vectors_to_log['correct_predictions'] = correct_predictions
scalars_to_log['prediction_accuracy'] = tf.reduce_mean(correct_predictions)
# Validate that metrics outputted are exactly what is expected
expected = get_all_metric_names(task_type, model_uncertainty,
loss_config, mode, False)
assert set(expected) == set(scalars_to_log.keys())
return scalars_to_log, vectors_to_log
| deepmind-research-master | galaxy_mergers/losses.py |
# Copyright 2021 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple script to model evaluation on a checkpoint and dataset."""
import ast
from absl import app
from absl import flags
from absl import logging
from galaxy_mergers import evaluator
flags.DEFINE_string('checkpoint_path', '', 'Path to TF2 checkpoint to eval.')
flags.DEFINE_string('data_path', '', 'Path to TFRecord(s) with data.')
flags.DEFINE_string('filter_time_intervals', None,
'Merger time intervals on which to perform regression.'
'Specify None for the default time interval [-1,1], or'
' a custom list of intervals, e.g. [[-0.2,0], [0.5,1]].')
FLAGS = flags.FLAGS
def main(_) -> None:
if FLAGS.filter_time_intervals is not None:
filter_time_intervals = ast.literal_eval(FLAGS.filter_time_intervals)
else:
filter_time_intervals = None
config, ds, experiment = evaluator.get_config_dataset_evaluator(
filter_time_intervals,
FLAGS.checkpoint_path,
config_override={
'experiment_kwargs.data_config.dataset_path': FLAGS.data_path,
})
metrics, _, _ = evaluator.run_model_on_dataset(experiment, ds, config)
logging.info('Evaluation complete. Metrics: %s', metrics)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | galaxy_mergers/main.py |
# Copyright 2021 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for a galaxy merger model evaluation."""
import glob
import os
from astropy import cosmology
from astropy.io import fits
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import tensorflow.compat.v2 as tf
def restore_checkpoint(checkpoint_dir, experiment):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
global_step = tf.Variable(
0, dtype=tf.int32, trainable=False, name='global_step')
checkpoint = tf.train.Checkpoint(
_global_step_=global_step, **experiment.checkpoint_items)
checkpoint.restore(checkpoint_path)
def sum_average_transformed_mu_and_sigma(mu, log_sigma_sq):
"""Computes <mu>, var(mu) + <var> in transformed representation.
This corresponds to assuming that the output distribution is a sum of
Gaussian and computing the mean and variance of the resulting (non-Gaussian)
distribution.
Args:
mu: Tensor of shape [B, ...] representing the means of the input
distributions.
log_sigma_sq: Tensor of shape [B, ...] representing log(sigma**2) of the
input distributions. Can be None, in which case the variance is assumed
to be zero.
Returns:
mu: Tensor of shape [...] representing the means of the output
distributions.
log_sigma_sq: Tensor of shape [...] representing log(sigma**2) of the
output distributions.
"""
av_mu = tf.reduce_mean(mu, axis=0)
var_mu = tf.math.reduce_std(mu, axis=0)**2
if log_sigma_sq is None:
return av_mu, tf.math.log(var_mu)
max_log_sigma_sq = tf.reduce_max(log_sigma_sq, axis=0)
log_sigma_sq -= max_log_sigma_sq
# (sigma/sigma_0)**2
sigma_sq = tf.math.exp(log_sigma_sq)
# (<sigma**2>)/sigma_0**2 (<1)
av_sigma_sq = tf.reduce_mean(sigma_sq, axis=0)
# (<sigma**2> + var(mu))/sigma_0**2
av_sigma_sq += var_mu * tf.math.exp(-max_log_sigma_sq)
# log(<sigma**2> + var(mu))
log_av_sigma_sq = tf.math.log(av_sigma_sq) + max_log_sigma_sq
return av_mu, log_av_sigma_sq
def aggregate_regression_ensemble(logits_or_times, ensemble_size,
use_uncertainty, test_time_ensembling):
"""Aggregate output of model ensemble."""
out_shape = logits_or_times.shape.as_list()[1:]
logits_or_times = tf.reshape(logits_or_times, [ensemble_size, -1] + out_shape)
mus = logits_or_times[..., 0]
log_sigma_sqs = logits_or_times[..., -1] if use_uncertainty else None
if test_time_ensembling == 'sum':
mu, log_sigma_sq = sum_average_transformed_mu_and_sigma(mus, log_sigma_sqs)
elif test_time_ensembling == 'none':
mu = mus[0]
log_sigma_sq = log_sigma_sqs[0] if use_uncertainty else None
else:
raise ValueError('Unexpected test_time_ensembling')
return mu, log_sigma_sq
def aggregate_classification_ensemble(logits_or_times, ensemble_size,
test_time_ensembling):
"""Averages the output logits across models in the ensemble."""
out_shape = logits_or_times.shape.as_list()[1:]
logits = tf.reshape(logits_or_times, [ensemble_size, -1] + out_shape)
if test_time_ensembling == 'sum':
logits = tf.reduce_mean(logits, axis=0)
return logits, None
elif test_time_ensembling == 'none':
return logits, None
else:
raise ValueError('Unexpected test_time_ensembling')
def unpack_evaluator_output(data, return_seq_info=False, return_redshift=False):
"""Unpack evaluator.run_model_on_dataset output."""
mus = np.array(data[1]['mu']).flatten()
sigmas = np.array(data[1]['sigma']).flatten()
regression_targets = np.array(data[1]['regression_targets']).flatten()
outputs = [mus, sigmas, regression_targets]
if return_seq_info:
seq_ids = np.array(data[2][0]).flatten()
seq_ids = np.array([seq_id.decode('UTF-8') for seq_id in seq_ids])
time_idxs = np.array(data[2][1]).flatten()
axes = np.array(data[2][2]).flatten()
outputs += [seq_ids, axes, time_idxs]
if return_redshift:
redshifts = np.array(data[2][6]).flatten()
outputs += [redshifts]
return outputs
def process_data_into_myrs(redshifts, *data_lists):
"""Converts normalized time to virial time using Planck cosmology."""
# small hack to avoid build tools not recognizing non-standard trickery
# done in the astropy library:
# https://github.com/astropy/astropy/blob/master/astropy/cosmology/core.py#L3290
# that dynamically generates and imports new classes.
planck13 = getattr(cosmology, 'Plank13')
hubble_constants = planck13.H(redshifts) # (km/s)/megaparsec
inv_hubble_constants = 1/hubble_constants # (megaparsec*s) / km
megaparsec_to_km = 1e19*3.1
seconds_to_gigayears = 1e-15/31.556
conversion_factor = megaparsec_to_km * seconds_to_gigayears
hubble_time_gigayears = conversion_factor * inv_hubble_constants
hubble_to_virial_time = 0.14 # approximate simulation-based conversion factor
virial_dyn_time = hubble_to_virial_time*hubble_time_gigayears.value
return [data_list*virial_dyn_time for data_list in data_lists]
def print_rmse_and_class_accuracy(mus, regression_targets, redshifts):
"""Convert to virial dynamical time and print stats."""
time_pred, time_gt = process_data_into_myrs(
redshifts, mus, regression_targets)
time_sq_errors = (time_pred-time_gt)**2
rmse = np.sqrt(np.mean(time_sq_errors))
labels = regression_targets > 0
class_preds = mus > 0
accuracy = sum((labels == class_preds).astype(np.int8)) / len(class_preds)
print(f'95% Error: {np.percentile(np.sqrt(time_sq_errors), 95)}')
print(f'RMSE: {rmse}')
print(f'Classification Accuracy: {accuracy}')
def print_stats(vec, do_print=True):
fvec = vec.flatten()
if do_print:
print(len(fvec), min(fvec), np.mean(fvec), np.median(fvec), max(fvec))
return (len(fvec), min(fvec), np.mean(fvec), np.median(fvec), max(fvec))
def get_image_from_fits(base_dir, seq='475_31271', time='497', axis=2):
"""Read *.fits galaxy image from directory."""
axis_map = {0: 'x', 1: 'y', 2: 'z'}
fits_glob = f'{base_dir}/{seq}/fits_of_flux_psf/{time}/*_{axis_map[axis]}_*.fits'
def get_freq_from_path(p):
return int(p.split('/')[-1].split('_')[2][1:])
fits_image_paths = sorted(glob.glob(fits_glob), key=get_freq_from_path)
assert len(fits_image_paths) == 7
combined_frequencies = []
for fit_path in fits_image_paths:
with open(fit_path, 'rb') as f:
fits_data = np.array(fits.open(f)[0].data.astype(np.float32))
combined_frequencies.append(fits_data)
fits_image = np.transpose(np.array(combined_frequencies), (1, 2, 0))
return fits_image
def stack_desired_galaxy_images(base_dir, seq, n_time_slices):
"""Searth through galaxy image directory gathering images."""
fits_sequence_dir = os.path.join(base_dir, seq, 'fits_of_flux_psf')
all_times_for_seq = os.listdir(fits_sequence_dir)
hop = (len(all_times_for_seq)-1)//(n_time_slices-1)
desired_time_idxs = [k*hop for k in range(n_time_slices)]
all_imgs = []
for j in desired_time_idxs:
time = all_times_for_seq[j]
img = get_image_from_fits(base_dir=base_dir, seq=seq, time=time, axis=2)
all_imgs.append(img)
min_img_size = min([img.shape[0] for img in all_imgs])
return all_imgs, min_img_size
def draw_galaxy_image(image, target_size=None, color_map='viridis'):
normalized_image = image / max(image.flatten())
color_map = plt.get_cmap(color_map)
colored_image = color_map(normalized_image)[:, :, :3]
colored_image = (colored_image * 255).astype(np.uint8)
colored_image = Image.fromarray(colored_image, mode='RGB')
if target_size:
colored_image = colored_image.resize(target_size, Image.ANTIALIAS)
return colored_image
def collect_merger_sequence(ds, seq=b'370_11071', n_examples_to_sift=5000):
images, targets, redshifts = [], [], []
for i, all_inputs in enumerate(ds):
if all_inputs[4][0].numpy() == seq:
images.append(all_inputs[0][0].numpy())
targets.append(all_inputs[2][0].numpy())
redshifts.append(all_inputs[10][0].numpy())
if i > n_examples_to_sift: break
return np.squeeze(images), np.squeeze(targets), np.squeeze(redshifts)
def take_samples(sample_idxs, *data_lists):
return [np.take(l, sample_idxs, axis=0) for l in data_lists]
| deepmind-research-master | galaxy_mergers/helpers.py |
# Copyright 2021 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation runner."""
import collections
from absl import logging
import tensorflow.compat.v2 as tf
from galaxy_mergers import config as tp_config
from galaxy_mergers import helpers
from galaxy_mergers import losses
from galaxy_mergers import model
from galaxy_mergers import preprocessing
class GalaxyMergeClassifierEvaluator():
"""Galaxy Merge Rate Prediction Evaluation Runner."""
def __init__(self, strategy, optimizer_config, total_train_batch_size,
train_net_args, eval_batch_size, eval_net_args,
l2_regularization, data_config, resnet_kwargs, n_train_epochs):
"""Initializes evaluator/experiment."""
logging.info('Initializing evaluator...')
self._strategy = strategy
self._data_config = data_config
self._use_additional_features = bool(data_config['additional_features'])
self._eval_batch_size = eval_batch_size
self._eval_net_args = eval_net_args
self._num_buckets = data_config['num_eval_buckets']
self._n_repeats = data_config['n_crop_repeat']
self._image_size = data_config['image_size']
self._task_type = data_config['task']
self._loss_config = data_config['loss_config']
self._model_uncertainty = data_config['model_uncertainty']
del l2_regularization, optimizer_config, train_net_args
del total_train_batch_size, n_train_epochs
logging.info('Creating model...')
num_classes = 2 if self._model_uncertainty else 1
if self._task_type == losses.TASK_CLASSIFICATION:
num_classes = len(self._data_config['class_boundaries'])
self.model = model.ResNet(
n_repeats=self._data_config['n_crop_repeat'], num_classes=num_classes,
use_additional_features=self._use_additional_features, **resnet_kwargs)
self._eval_input = None
def build_eval_input(self, additional_lambdas=None):
"""Create the galaxy merger evaluation dataset."""
def decode_fn(record_bytes):
parsed_example = tf.io.parse_single_example(
record_bytes,
{
'image':
tf.io.VarLenFeature(tf.float32),
'image_shape':
tf.io.FixedLenFeature([3], dtype=tf.int64),
'axis':
tf.io.FixedLenFeature([], dtype=tf.int64),
'proposed_crop':
tf.io.FixedLenFeature([2, 2], dtype=tf.int64),
'normalized_time':
tf.io.FixedLenFeature([], dtype=tf.float32),
'unnormalized_time':
tf.io.FixedLenFeature([], dtype=tf.float32),
'grounded_normalized_time':
tf.io.FixedLenFeature([], dtype=tf.float32),
'redshift':
tf.io.FixedLenFeature([], dtype=tf.float32),
'sequence_average_redshift':
tf.io.FixedLenFeature([], dtype=tf.float32),
'mass':
tf.io.FixedLenFeature([], dtype=tf.float32),
'time_index':
tf.io.FixedLenFeature([], dtype=tf.int64),
'sequence_id':
tf.io.FixedLenFeature([], dtype=tf.string),
})
parsed_example['image'] = tf.sparse.to_dense(
parsed_example['image'], default_value=0)
dataset_row = parsed_example
return dataset_row
def build_eval_pipeline(_):
"""Generate the processed input evaluation data."""
logging.info('Building evaluation input pipeline...')
ds_path = self._data_config['dataset_path']
ds = tf.data.TFRecordDataset([ds_path]).map(decode_fn)
augmentations = dict(
rotation_and_flip=False,
rescaling=False,
translation=False
)
ds = preprocessing.prepare_dataset(
ds=ds, target_size=self._image_size,
crop_type=self._data_config['test_crop_type'],
n_repeats=self._n_repeats,
augmentations=augmentations,
task_type=self._task_type,
additional_features=self._data_config['additional_features'],
class_boundaries=self._data_config['class_boundaries'],
time_intervals=self._data_config['time_filter_intervals'],
frequencies_to_use=self._data_config['frequencies_to_use'],
additional_lambdas=additional_lambdas)
batched_ds = ds.cache().batch(self._eval_batch_size).prefetch(128)
logging.info('Finished building input pipeline...')
return batched_ds
return self._strategy.experimental_distribute_datasets_from_function(
build_eval_pipeline)
def run_test_model_ensemble(self, images, physical_features, augmentations):
"""Run evaluation on input images."""
image_variations = [images]
image_shape = images.shape.as_list()
if augmentations['rotation_and_flip']:
image_variations = preprocessing.get_all_rotations_and_flips(
image_variations)
if augmentations['rescaling']:
image_variations = preprocessing.get_all_rescalings(
image_variations, image_shape[1], augmentations['translation'])
# Put all augmented images into the batch: batch * num_augmented
augmented_images = tf.stack(image_variations, axis=0)
augmented_images = tf.reshape(augmented_images, [-1] + image_shape[1:])
if self._use_additional_features:
physical_features = tf.concat(
[physical_features] * len(image_variations), axis=0)
n_reps = self._data_config['n_crop_repeat']
augmented_images = preprocessing.move_repeats_to_batch(augmented_images,
n_reps)
logits_or_times = self.model(augmented_images, physical_features,
**self._eval_net_args)
if self._task_type == losses.TASK_CLASSIFICATION:
mu, log_sigma_sq = helpers.aggregate_classification_ensemble(
logits_or_times, len(image_variations),
self._data_config['test_time_ensembling'])
else:
assert self._task_type in losses.REGRESSION_TASKS
mu, log_sigma_sq = helpers.aggregate_regression_ensemble(
logits_or_times, len(image_variations),
self._model_uncertainty,
self._data_config['test_time_ensembling'])
return mu, log_sigma_sq
@property
def checkpoint_items(self):
return {'model': self.model}
def run_model_on_dataset(evaluator, dataset, config, n_batches=16):
"""Runs the model against a dataset, aggregates model output."""
scalar_metrics_to_log = collections.defaultdict(list)
model_outputs_to_log = collections.defaultdict(list)
dataset_features_to_log = collections.defaultdict(list)
batch_count = 1
for all_inputs in dataset:
if config.experiment_kwargs.data_config['additional_features']:
images = all_inputs[0]
physical_features = all_inputs[1]
labels, regression_targets, _ = all_inputs[2:5]
other_dataset_features = all_inputs[5:]
else:
images, physical_features = all_inputs[0], None
labels, regression_targets, _ = all_inputs[1:4]
other_dataset_features = all_inputs[4:]
mu, log_sigma_sq = evaluator.run_test_model_ensemble(
images, physical_features,
config.experiment_kwargs.data_config['test_augmentations'])
loss_config = config.experiment_kwargs.data_config['loss_config']
task_type = config.experiment_kwargs.data_config['task']
uncertainty = config.experiment_kwargs.data_config['model_uncertainty']
conf = config.experiment_kwargs.data_config['eval_confidence_interval']
scalar_metrics, vector_metrics = losses.compute_loss_and_metrics(
mu, log_sigma_sq, regression_targets, labels,
task_type, uncertainty, loss_config, 0, conf, mode='eval')
for i, dataset_feature in enumerate(other_dataset_features):
dataset_features_to_log[i].append(dataset_feature.numpy())
for scalar_metric in scalar_metrics:
v = scalar_metrics[scalar_metric]
val = v if isinstance(v, int) or isinstance(v, float) else v.numpy()
scalar_metrics_to_log[scalar_metric].append(val)
for vector_metric in vector_metrics:
val = vector_metrics[vector_metric].numpy()
model_outputs_to_log[vector_metric].append(val)
regression_targets_np = regression_targets.numpy()
labels_np = labels.numpy()
model_outputs_to_log['regression_targets'].append(regression_targets_np)
model_outputs_to_log['labels'].append(labels_np)
model_outputs_to_log['model_input_images'].append(images.numpy())
if n_batches and batch_count >= n_batches:
break
batch_count += 1
return scalar_metrics_to_log, model_outputs_to_log, dataset_features_to_log
def get_config_dataset_evaluator(filter_time_intervals,
ckpt_path,
config_override=None,
setup_dataset=True):
"""Set-up a default config, evaluation dataset, and evaluator."""
config = tp_config.get_config(filter_time_intervals=filter_time_intervals)
if config_override:
with config.ignore_type():
config.update_from_flattened_dict(config_override)
strategy = tf.distribute.OneDeviceStrategy(device='/gpu:0')
experiment = GalaxyMergeClassifierEvaluator(
strategy=strategy, **config.experiment_kwargs)
helpers.restore_checkpoint(ckpt_path, experiment)
if setup_dataset:
additional_lambdas = [
lambda ds: ds['sequence_id'],
lambda ds: ds['time_index'],
lambda ds: ds['axis'],
lambda ds: ds['normalized_time'],
lambda ds: ds['grounded_normalized_time'],
lambda ds: ds['unnormalized_time'],
lambda ds: ds['redshift'],
lambda ds: ds['mass']
]
ds = experiment.build_eval_input(additional_lambdas=additional_lambdas)
else:
ds = None
return config, ds, experiment
| deepmind-research-master | galaxy_mergers/evaluator.py |
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A prop-carry task that transition between multiple phases."""
import collections
import colorsys
import enum
from absl import logging
from dm_control import composer
from dm_control import mjcf
from dm_control.composer.observation import observable
from dm_control.locomotion.arenas import floors
from dm_control.locomotion.mocap import loader as mocap_loader
from dm_control.mujoco.wrapper import mjbindings
import numpy as np
from catch_carry import arm_opener
from catch_carry import mocap_data
from catch_carry import props
from catch_carry import trajectories
_PHYSICS_TIMESTEP = 0.005
# Maximum number of physics steps to run when settling props onto pedestals
# during episode initialization.
_MAX_SETTLE_STEPS = 1000
# Maximum velocity for prop to be considered settled.
# Used during episode initialization only.
_SETTLE_QVEL_TOL = 1e-5
# Magnitude of the sparse reward.
_SPARSE_REWARD = 1.0
# Maximum distance for walkers to be considered to be "near" a pedestal/target.
_TARGET_TOL = 0.65
# Defines how pedestals are placed around the arena.
# Pedestals are placed at constant angle intervals around the arena's center.
_BASE_PEDESTAL_DIST = 3 # Base distance from center.
_PEDESTAL_DIST_DELTA = 0.5 # Maximum variation on the base distance.
# Base hue-luminosity-saturation of the pedestal colors.
# We rotate through the hue for each pedestal created in the environment.
_BASE_PEDESTAL_H = 0.1
_BASE_PEDESTAL_L = 0.3
_BASE_PEDESTAL_S = 0.7
# Pedestal luminosity when active.
_ACTIVATED_PEDESTAL_L = 0.8
_PEDESTAL_SIZE = (0.2, 0.2, 0.02)
_SINGLE_PEDESTAL_COLOR = colorsys.hls_to_rgb(.3, .15, .35) + (1.0,)
WALKER_PEDESTAL = 'walker_pedestal'
WALKER_PROP = 'walker_prop'
PROP_PEDESTAL = 'prop_pedestal'
TARGET_STATE = 'target_state/'
CURRENT_STATE = 'meta/current_state/'
def _is_same_state(state_1, state_2):
if state_1.keys() != state_2.keys():
return False
for k in state_1:
if not np.all(state_1[k] == state_2[k]):
return False
return True
def _singleton_or_none(iterable):
iterator = iter(iterable)
try:
return next(iterator)
except StopIteration:
return None
def _generate_pedestal_colors(num_pedestals):
"""Function to get colors for pedestals."""
colors = []
for i in range(num_pedestals):
h = _BASE_PEDESTAL_H + i / num_pedestals
while h > 1:
h -= 1
colors.append(
colorsys.hls_to_rgb(h, _BASE_PEDESTAL_L, _BASE_PEDESTAL_S) + (1.0,))
return colors
InitializationParameters = collections.namedtuple(
'InitializationParameters', ('clip_segment', 'prop_id', 'pedestal_id'))
def _rotate_vector_by_quaternion(vec, quat):
result = np.empty(3)
mjbindings.mjlib.mju_rotVecQuat(result, np.asarray(vec), np.asarray(quat))
return result
@enum.unique
class WarehousePhase(enum.Enum):
TERMINATED = 0
GOTOTARGET = 1
PICKUP = 2
CARRYTOTARGET = 3
PUTDOWN = 4
def _find_random_free_pedestal_id(target_state, random_state):
free_pedestals = (
np.where(np.logical_not(np.any(target_state, axis=0)))[0])
return random_state.choice(free_pedestals)
def _find_random_occupied_pedestal_id(target_state, random_state):
occupied_pedestals = (
np.where(np.any(target_state, axis=0))[0])
return random_state.choice(occupied_pedestals)
def one_hot(values, num_unique):
return np.squeeze(np.eye(num_unique)[np.array(values).reshape(-1)])
class SinglePropFourPhases(object):
"""A phase manager that transitions between four phases for a single prop."""
def __init__(self, fixed_initialization_phase=None):
self._phase = WarehousePhase.TERMINATED
self._fixed_initialization_phase = fixed_initialization_phase
def initialize_episode(self, target_state, random_state):
"""Randomly initializes an episode into one of the four phases."""
if self._fixed_initialization_phase is None:
self._phase = random_state.choice([
WarehousePhase.GOTOTARGET, WarehousePhase.PICKUP,
WarehousePhase.CARRYTOTARGET, WarehousePhase.PUTDOWN
])
else:
self._phase = self._fixed_initialization_phase
self._prop_id = random_state.randint(len(target_state[PROP_PEDESTAL]))
self._pedestal_id = np.nonzero(
target_state[PROP_PEDESTAL][self._prop_id])[0][0]
pedestal_id_for_initialization = self._pedestal_id
if self._phase == WarehousePhase.GOTOTARGET:
clip_segment = trajectories.ClipSegment.APPROACH
target_state[WALKER_PROP][:] = 0
target_state[WALKER_PEDESTAL][self._pedestal_id] = 1
elif self._phase == WarehousePhase.PICKUP:
clip_segment = trajectories.ClipSegment.PICKUP
target_state[WALKER_PROP][self._prop_id] = 1
target_state[WALKER_PEDESTAL][self._pedestal_id] = 1
# Set self._pedestal_id to the next pedestal after pickup is successful.
self._pedestal_id = _find_random_free_pedestal_id(
target_state[PROP_PEDESTAL], random_state)
target_state[PROP_PEDESTAL][self._prop_id, :] = 0
elif self._phase == WarehousePhase.CARRYTOTARGET:
clip_segment = random_state.choice([
trajectories.ClipSegment.CARRY1, trajectories.ClipSegment.CARRY2])
self._pedestal_id = _find_random_free_pedestal_id(
target_state[PROP_PEDESTAL], random_state)
if clip_segment == trajectories.ClipSegment.CARRY2:
pedestal_id_for_initialization = self._pedestal_id
target_state[WALKER_PROP][self._prop_id] = 1
target_state[WALKER_PEDESTAL][self._pedestal_id] = 1
target_state[PROP_PEDESTAL][self._prop_id, :] = 0
elif self._phase == WarehousePhase.PUTDOWN:
clip_segment = trajectories.ClipSegment.PUTDOWN
target_state[WALKER_PROP][:] = 0
target_state[WALKER_PEDESTAL][self._pedestal_id] = 1
return InitializationParameters(
clip_segment, self._prop_id, pedestal_id_for_initialization)
def on_success(self, target_state, random_state):
"""Transitions into the next phase upon success of current phase."""
if self._phase == WarehousePhase.GOTOTARGET:
if self._prop_id is not None:
self._phase = WarehousePhase.PICKUP
# Set self._pedestal_id to the next pedestal after pickup is successful.
self._pedestal_id = (
_find_random_free_pedestal_id(
target_state[PROP_PEDESTAL], random_state))
target_state[WALKER_PROP][self._prop_id] = 1
target_state[PROP_PEDESTAL][self._prop_id, :] = 0
else:
# If you go to an empty pedestal, go to pedestal with a prop.
self._pedestal_id = (
_find_random_occupied_pedestal_id(
target_state[PROP_PEDESTAL], random_state))
target_state[WALKER_PEDESTAL][:] = 0
target_state[WALKER_PEDESTAL][self._pedestal_id] = 1
self._prop_id = np.argwhere(
target_state[PROP_PEDESTAL][:, self._pedestal_id])[0, 0]
elif self._phase == WarehousePhase.PICKUP:
self._phase = WarehousePhase.CARRYTOTARGET
target_state[WALKER_PEDESTAL][:] = 0
target_state[WALKER_PEDESTAL][self._pedestal_id] = 1
elif self._phase == WarehousePhase.CARRYTOTARGET:
self._phase = WarehousePhase.PUTDOWN
target_state[WALKER_PROP][:] = 0
target_state[PROP_PEDESTAL][self._prop_id, self._pedestal_id] = 1
elif self._phase == WarehousePhase.PUTDOWN:
self._phase = WarehousePhase.GOTOTARGET
# Set self._pedestal_id to the next pedestal after putdown is successful.
self._pedestal_id = (
_find_random_free_pedestal_id(
target_state[PROP_PEDESTAL], random_state))
self._prop_id = None
target_state[WALKER_PEDESTAL][:] = 0
target_state[WALKER_PEDESTAL][self._pedestal_id] = 1
return self._phase
@property
def phase(self):
return self._phase
@property
def prop_id(self):
return self._prop_id
@property
def pedestal_id(self):
return self._pedestal_id
class PhasedBoxCarry(composer.Task):
"""A prop-carry task that transitions between multiple phases."""
def __init__(
self,
walker,
num_props,
num_pedestals,
proto_modifier=None,
transition_class=SinglePropFourPhases,
min_prop_gap=0.05,
pedestal_height_range=(0.45, 0.75),
log_transitions=False,
negative_reward_on_failure_termination=True,
use_single_pedestal_color=True,
priority_friction=False,
fixed_initialization_phase=None):
"""Initialize phased/instructed box-carrying ("warehouse") task.
Args:
walker: the walker to be used in this task.
num_props: the number of props in the task scene.
num_pedestals: the number of floating shelves (pedestals) in the task
scene.
proto_modifier: function to modify trajectory proto.
transition_class: the object that handles the transition logic.
min_prop_gap: arms are automatically opened to leave a gap around the prop
to avoid problematic collisions upon initialization.
pedestal_height_range: range of heights for the pedestal.
log_transitions: logging/printing of transitions.
negative_reward_on_failure_termination: boolean for whether to provide
negative sparse rewards on failure termination.
use_single_pedestal_color: boolean option for pedestals being the same
color or different colors.
priority_friction: sets friction priority thereby making prop objects have
higher friction.
fixed_initialization_phase: an instance of the `WarehousePhase` enum that
specifies the phase in which to always initialize the task, or `None` if
the initial task phase should be chosen randomly for each episode.
"""
self._num_props = num_props
self._num_pedestals = num_pedestals
self._proto_modifier = proto_modifier
self._transition_manager = transition_class(
fixed_initialization_phase=fixed_initialization_phase)
self._min_prop_gap = min_prop_gap
self._pedestal_height_range = pedestal_height_range
self._log_transitions = log_transitions
self._target_state = collections.OrderedDict([
(WALKER_PEDESTAL, np.zeros(num_pedestals)),
(WALKER_PROP, np.zeros(num_props)),
(PROP_PEDESTAL, np.zeros([num_props, num_pedestals]))
])
self._current_state = collections.OrderedDict([
(WALKER_PEDESTAL, np.zeros(num_pedestals)),
(WALKER_PROP, np.zeros(num_props)),
(PROP_PEDESTAL, np.zeros([num_props, num_pedestals]))
])
self._negative_reward_on_failure_termination = (
negative_reward_on_failure_termination)
self._priority_friction = priority_friction
clips = sorted(
set(mocap_data.medium_pedestal())
& (set(mocap_data.small_box()) | set(mocap_data.large_box())))
loader = mocap_loader.HDF5TrajectoryLoader(
mocap_data.H5_PATH, trajectories.SinglePropCarrySegmentedTrajectory)
self._trajectories = [
loader.get_trajectory(clip.clip_identifier) for clip in clips]
self._arena = floors.Floor()
self._walker = walker
self._feet_geoms = (
walker.mjcf_model.find('body', 'lfoot').find_all('geom') +
walker.mjcf_model.find('body', 'rfoot').find_all('geom'))
self._lhand_geoms = (
walker.mjcf_model.find('body', 'lhand').find_all('geom'))
self._rhand_geoms = (
walker.mjcf_model.find('body', 'rhand').find_all('geom'))
self._trajectories[0].configure_walkers([self._walker])
walker.create_root_joints(self._arena.attach(walker))
control_timestep = self._trajectories[0].dt
for i, trajectory in enumerate(self._trajectories):
if trajectory.dt != control_timestep:
raise ValueError(
'Inconsistent control timestep: '
'trajectories[{}].dt == {} but trajectories[0].dt == {}'
.format(i, trajectory.dt, control_timestep))
self.set_timesteps(control_timestep, _PHYSICS_TIMESTEP)
if use_single_pedestal_color:
self._pedestal_colors = [_SINGLE_PEDESTAL_COLOR] * num_pedestals
else:
self._pedestal_colors = _generate_pedestal_colors(num_pedestals)
self._pedestals = [props.Pedestal(_PEDESTAL_SIZE, rgba)
for rgba in self._pedestal_colors]
for pedestal in self._pedestals:
self._arena.attach(pedestal)
self._props = [
self._trajectories[0].create_props(
priority_friction=self._priority_friction)[0]
for _ in range(num_props)
]
for prop in self._props:
self._arena.add_free_entity(prop)
self._task_observables = collections.OrderedDict()
self._task_observables['target_phase'] = observable.Generic(
lambda _: one_hot(self._transition_manager.phase.value, num_unique=5))
def ego_prop_xpos(physics):
prop_id = self._focal_prop_id
if prop_id is None:
return np.zeros((3,))
prop = self._props[prop_id]
prop_xpos, _ = prop.get_pose(physics)
walker_xpos = physics.bind(self._walker.root_body).xpos
return self._walker.transform_vec_to_egocentric_frame(
physics, prop_xpos - walker_xpos)
self._task_observables['target_prop/xpos'] = (
observable.Generic(ego_prop_xpos))
def prop_zaxis(physics):
prop_id = self._focal_prop_id
if prop_id is None:
return np.zeros((3,))
prop = self._props[prop_id]
prop_xmat = physics.bind(
mjcf.get_attachment_frame(prop.mjcf_model)).xmat
return prop_xmat[[2, 5, 8]]
self._task_observables['target_prop/zaxis'] = (
observable.Generic(prop_zaxis))
def ego_pedestal_xpos(physics):
pedestal_id = self._focal_pedestal_id
if pedestal_id is None:
return np.zeros((3,))
pedestal = self._pedestals[pedestal_id]
pedestal_xpos, _ = pedestal.get_pose(physics)
walker_xpos = physics.bind(self._walker.root_body).xpos
return self._walker.transform_vec_to_egocentric_frame(
physics, pedestal_xpos - walker_xpos)
self._task_observables['target_pedestal/xpos'] = (
observable.Generic(ego_pedestal_xpos))
for obs in (self._walker.observables.proprioception +
self._walker.observables.kinematic_sensors +
self._walker.observables.dynamic_sensors +
list(self._task_observables.values())):
obs.enabled = True
self._focal_prop_id = None
self._focal_pedestal_id = None
@property
def root_entity(self):
return self._arena
@property
def task_observables(self):
return self._task_observables
@property
def name(self):
return 'warehouse'
def initialize_episode_mjcf(self, random_state):
self._reward = 0.0
self._discount = 1.0
self._should_terminate = False
self._before_step_success = False
for target_value in self._target_state.values():
target_value[:] = 0
for pedestal_id, pedestal in enumerate(self._pedestals):
angle = 2 * np.pi * pedestal_id / len(self._pedestals)
dist = (_BASE_PEDESTAL_DIST +
_PEDESTAL_DIST_DELTA * random_state.uniform(-1, 1))
height = random_state.uniform(*self._pedestal_height_range)
pedestal_pos = [dist * np.cos(angle), dist * np.sin(angle),
height - pedestal.geom.size[2]]
mjcf.get_attachment_frame(pedestal.mjcf_model).pos = pedestal_pos
for prop in self._props:
prop.detach()
self._props = []
self._trajectory_for_prop = []
for prop_id in range(self._num_props):
trajectory = random_state.choice(self._trajectories)
if self._proto_modifier:
trajectory = trajectory.get_modified_trajectory(
self._proto_modifier, random_state=random_state)
prop = trajectory.create_props(
priority_friction=self._priority_friction)[0]
prop.mjcf_model.model = 'prop_{}'.format(prop_id)
self._arena.add_free_entity(prop)
self._props.append(prop)
self._trajectory_for_prop.append(trajectory)
def _settle_props(self, physics):
prop_freejoints = [mjcf.get_attachment_frame(prop.mjcf_model).freejoint
for prop in self._props]
physics.bind(prop_freejoints).qvel = 0
physics.forward()
for _ in range(_MAX_SETTLE_STEPS):
self._update_current_state(physics)
success = self._evaluate_target_state()
stopped = max(abs(physics.bind(prop_freejoints).qvel)) < _SETTLE_QVEL_TOL
if success and stopped:
break
else:
physics.step()
physics.data.time = 0
def initialize_episode(self, physics, random_state):
self._ground_geomid = physics.bind(
self._arena.mjcf_model.worldbody.geom[0]).element_id
self._feet_geomids = set(physics.bind(self._feet_geoms).element_id)
self._lhand_geomids = set(physics.bind(self._lhand_geoms).element_id)
self._rhand_geomids = set(physics.bind(self._rhand_geoms).element_id)
for prop_id in range(len(self._props)):
pedestal_id = _find_random_free_pedestal_id(
self._target_state[PROP_PEDESTAL], random_state)
pedestal = self._pedestals[pedestal_id]
self._target_state[PROP_PEDESTAL][prop_id, pedestal_id] = 1
for prop_id, prop in enumerate(self._props):
trajectory = self._trajectory_for_prop[prop_id]
pedestal_id = np.nonzero(
self._target_state[PROP_PEDESTAL][prop_id])[0][0]
pedestal = self._pedestals[pedestal_id]
pedestal_pos, _ = pedestal.get_pose(physics)
pedestal_delta = np.array(
pedestal_pos - trajectory.infer_pedestal_positions()[0])
pedestal_delta[2] += pedestal.geom.size[2]
prop_timestep = trajectory.get_timestep_data(0).props[0]
prop_pos = prop_timestep.position + np.array(pedestal_delta)
prop_quat = prop_timestep.quaternion
prop_pos[:2] += random_state.uniform(
-pedestal.geom.size[:2] / 2, pedestal.geom.size[:2] / 2)
prop.set_pose(physics, prop_pos, prop_quat)
self._settle_props(physics)
init_params = self._transition_manager.initialize_episode(
self._target_state, random_state)
if self._log_transitions:
logging.info(init_params)
self._on_transition(physics)
init_prop = self._props[init_params.prop_id]
init_pedestal = self._pedestals[init_params.pedestal_id]
self._init_prop_id = init_params.prop_id
self._init_pedestal_id = init_params.pedestal_id
init_trajectory = self._trajectory_for_prop[init_params.prop_id]
init_timestep = init_trajectory.get_random_timestep_in_segment(
init_params.clip_segment, random_state)
trajectory_pedestal_pos = init_trajectory.infer_pedestal_positions()[0]
init_pedestal_pos = np.array(init_pedestal.get_pose(physics)[0])
delta_pos = init_pedestal_pos - trajectory_pedestal_pos
delta_pos[2] = 0
delta_angle = np.pi + np.arctan2(init_pedestal_pos[1], init_pedestal_pos[0])
delta_quat = (np.cos(delta_angle / 2), 0, 0, np.sin(delta_angle / 2))
trajectory_pedestal_to_walker = (
init_timestep.walkers[0].position - trajectory_pedestal_pos)
rotated_pedestal_to_walker = _rotate_vector_by_quaternion(
trajectory_pedestal_to_walker, delta_quat)
self._walker.set_pose(
physics,
position=trajectory_pedestal_pos + rotated_pedestal_to_walker,
quaternion=init_timestep.walkers[0].quaternion)
self._walker.set_velocity(
physics, velocity=init_timestep.walkers[0].velocity,
angular_velocity=init_timestep.walkers[0].angular_velocity)
self._walker.shift_pose(
physics, position=delta_pos, quaternion=delta_quat,
rotate_velocity=True)
physics.bind(self._walker.mocap_joints).qpos = (
init_timestep.walkers[0].joints)
physics.bind(self._walker.mocap_joints).qvel = (
init_timestep.walkers[0].joints_velocity)
if init_params.clip_segment in (trajectories.ClipSegment.CARRY1,
trajectories.ClipSegment.CARRY2,
trajectories.ClipSegment.PUTDOWN):
trajectory_pedestal_to_prop = (
init_timestep.props[0].position - trajectory_pedestal_pos)
rotated_pedestal_to_prop = _rotate_vector_by_quaternion(
trajectory_pedestal_to_prop, delta_quat)
init_prop.set_pose(
physics,
position=trajectory_pedestal_pos + rotated_pedestal_to_prop,
quaternion=init_timestep.props[0].quaternion)
init_prop.set_velocity(
physics, velocity=init_timestep.props[0].velocity,
angular_velocity=init_timestep.props[0].angular_velocity)
init_prop.shift_pose(
physics, position=delta_pos,
quaternion=delta_quat, rotate_velocity=True)
# If we have moved the pedestal upwards during height initialization,
# the prop may now be lodged inside it. We fix that here.
if init_pedestal_pos[2] > trajectory_pedestal_pos[2]:
init_prop_geomid = physics.bind(init_prop.geom).element_id
init_pedestal_geomid = physics.bind(init_pedestal.geom).element_id
disallowed_contact = sorted((init_prop_geomid, init_pedestal_geomid))
def has_disallowed_contact():
physics.forward()
for contact in physics.data.contact:
if sorted((contact.geom1, contact.geom2)) == disallowed_contact:
return True
return False
while has_disallowed_contact():
init_prop.shift_pose(physics, (0, 0, 0.001))
self._move_arms_if_necessary(physics)
self._update_current_state(physics)
self._previous_step_success = self._evaluate_target_state()
self._focal_prop_id = self._init_prop_id
self._focal_pedestal_id = self._init_pedestal_id
def _move_arms_if_necessary(self, physics):
if self._min_prop_gap is not None:
for entity in self._props + self._pedestals:
try:
arm_opener.open_arms_for_prop(
physics, self._walker.left_arm_root, self._walker.right_arm_root,
entity.mjcf_model, self._min_prop_gap)
except RuntimeError as e:
raise composer.EpisodeInitializationError(e)
def after_step(self, physics, random_state):
# First we check for failure termination.
for contact in physics.data.contact:
if ((contact.geom1 == self._ground_geomid and
contact.geom2 not in self._feet_geomids) or
(contact.geom2 == self._ground_geomid and
contact.geom1 not in self._feet_geomids)):
if self._negative_reward_on_failure_termination:
self._reward = -_SPARSE_REWARD
else:
self._reward = 0.0
self._should_terminate = True
self._discount = 0.0
return
# Then check for normal reward and state transitions.
self._update_current_state(physics)
success = self._evaluate_target_state()
if success and not self._previous_step_success:
self._reward = _SPARSE_REWARD
new_phase = (
self._transition_manager.on_success(self._target_state, random_state))
self._should_terminate = (new_phase == WarehousePhase.TERMINATED)
self._on_transition(physics)
self._previous_step_success = self._evaluate_target_state()
else:
self._reward = 0.0
def _on_transition(self, physics):
self._focal_prop_id = self._transition_manager.prop_id
self._focal_pedestal_id = self._transition_manager.pedestal_id
if self._log_transitions:
logging.info('target_state:\n%s', self._target_state)
for pedestal_id, pedestal_active in enumerate(
self._target_state[WALKER_PEDESTAL]):
r, g, b, a = self._pedestal_colors[pedestal_id]
if pedestal_active:
h, _, s = colorsys.rgb_to_hls(r, g, b)
r, g, b = colorsys.hls_to_rgb(h, _ACTIVATED_PEDESTAL_L, s)
physics.bind(self._pedestals[pedestal_id].geom).rgba = (r, g, b, a)
def get_reward(self, physics):
return self._reward
def get_discount(self, physics):
return self._discount
def should_terminate_episode(self, physics):
return self._should_terminate
def _update_current_state(self, physics):
for current_state_value in self._current_state.values():
current_state_value[:] = 0
# Check if the walker is near each pedestal.
walker_pos, _ = self._walker.get_pose(physics)
for pedestal_id, pedestal in enumerate(self._pedestals):
target_pos, _ = pedestal.get_pose(physics)
walker_to_target_dist = np.linalg.norm(walker_pos[:2] - target_pos[:2])
if walker_to_target_dist <= _TARGET_TOL:
self._current_state[WALKER_PEDESTAL][pedestal_id] = 1
prop_geomids = {
physics.bind(prop.geom).element_id: prop_id
for prop_id, prop in enumerate(self._props)}
pedestal_geomids = {
physics.bind(pedestal.geom).element_id: pedestal_id
for pedestal_id, pedestal in enumerate(self._pedestals)}
prop_pedestal_contact_counts = np.zeros(
[self._num_props, self._num_pedestals])
prop_lhand_contact = [False] * self._num_props
prop_rhand_contact = [False] * self._num_props
for contact in physics.data.contact:
prop_id = prop_geomids.get(contact.geom1, prop_geomids.get(contact.geom2))
pedestal_id = pedestal_geomids.get(
contact.geom1, pedestal_geomids.get(contact.geom2))
has_lhand = (contact.geom1 in self._lhand_geomids or
contact.geom2 in self._lhand_geomids)
has_rhand = (contact.geom1 in self._rhand_geomids or
contact.geom2 in self._rhand_geomids)
if prop_id is not None and pedestal_id is not None:
prop_pedestal_contact_counts[prop_id, pedestal_id] += 1
if prop_id is not None and has_lhand:
prop_lhand_contact[prop_id] = True
if prop_id is not None and has_rhand:
prop_rhand_contact[prop_id] = True
for prop_id in range(self._num_props):
if prop_lhand_contact[prop_id] and prop_rhand_contact[prop_id]:
self._current_state[WALKER_PROP][prop_id] = 1
pedestal_contact_counts = prop_pedestal_contact_counts[prop_id]
for pedestal_id in range(self._num_pedestals):
if pedestal_contact_counts[pedestal_id] >= 4:
self._current_state[PROP_PEDESTAL][prop_id, pedestal_id] = 1
def _evaluate_target_state(self):
return _is_same_state(self._current_state, self._target_state)
| deepmind-research-master | catch_carry/warehouse.py |
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for opening arms until they are not in contact with a prop."""
import contextlib
from dm_control.mujoco.wrapper import mjbindings
import numpy as np
_MAX_IK_ATTEMPTS = 100
_IK_MAX_CORRECTION_WEIGHT = 0.1
_JOINT_LIMIT_TOLERANCE = 1e-4
_GAP_TOLERANCE = 0.1
class _ArmPropContactRemover(object):
"""Helper class for removing contacts between an arm and a prop via IK."""
def __init__(self, physics, arm_root, prop, gap):
arm_geoms = arm_root.find_all('geom')
self._arm_geom_ids = set(physics.bind(arm_geoms).element_id)
arm_joints = arm_root.find_all('joint')
self._arm_joint_ids = list(physics.bind(arm_joints).element_id)
self._arm_qpos_indices = physics.model.jnt_qposadr[self._arm_joint_ids]
self._arm_dof_indices = physics.model.jnt_dofadr[self._arm_joint_ids]
self._prop_geoms = prop.find_all('geom')
self._prop_geom_ids = set(physics.bind(self._prop_geoms).element_id)
self._arm_joint_min = np.full(len(self._arm_joint_ids), float('-inf'),
dtype=physics.model.jnt_range.dtype)
self._arm_joint_max = np.full(len(self._arm_joint_ids), float('inf'),
dtype=physics.model.jnt_range.dtype)
for i, joint_id in enumerate(self._arm_joint_ids):
if physics.model.jnt_limited[joint_id]:
self._arm_joint_min[i], self._arm_joint_max[i] = (
physics.model.jnt_range[joint_id])
self._gap = gap
def _contact_pair_is_relevant(self, contact):
set1 = self._arm_geom_ids
set2 = self._prop_geom_ids
return ((contact.geom1 in set1 and contact.geom2 in set2) or
(contact.geom2 in set1 and contact.geom1 in set2))
def _forward_and_find_next_contact(self, physics):
"""Forwards the physics and finds the next contact to handle."""
physics.forward()
next_contact = None
for contact in physics.data.contact:
if (self._contact_pair_is_relevant(contact) and
(next_contact is None or contact.dist < next_contact.dist)):
next_contact = contact
return next_contact
def _remove_contact_ik_iteration(self, physics, contact):
"""Performs one linearized IK iteration to remove the specified contact."""
if contact.geom1 in self._arm_geom_ids:
sign = -1
geom_id = contact.geom1
else:
sign = 1
geom_id = contact.geom2
body_id = physics.model.geom_bodyid[geom_id]
normal = sign * contact.frame[:3]
jac_dtype = physics.data.qpos.dtype
jac = np.empty((6, physics.model.nv), dtype=jac_dtype)
jac_pos, jac_rot = jac[:3], jac[3:]
mjbindings.mjlib.mj_jacPointAxis(
physics.model.ptr, physics.data.ptr,
jac_pos, jac_rot,
contact.pos + (contact.dist / 2) * normal, normal, body_id)
# Calculate corrections w.r.t. all joints, disregarding joint limits.
delta_xpos = normal * max(0, self._gap - contact.dist)
jac_all_joints = jac_pos[:, self._arm_dof_indices]
update_unfiltered = np.linalg.lstsq(
jac_all_joints, delta_xpos, rcond=None)[0]
# Filter out joints at limit that are corrected in the "wrong" direction.
initial_qpos = np.array(physics.data.qpos[self._arm_qpos_indices])
min_filter = np.logical_and(
initial_qpos - self._arm_joint_min < _JOINT_LIMIT_TOLERANCE,
update_unfiltered < 0)
max_filter = np.logical_and(
self._arm_joint_max - initial_qpos < _JOINT_LIMIT_TOLERANCE,
update_unfiltered > 0)
active_joints = np.where(
np.logical_not(np.logical_or(min_filter, max_filter)))[0]
# Calculate corrections w.r.t. valid joints only.
active_dof_indices = self._arm_dof_indices[active_joints]
jac_joints = jac_pos[:, active_dof_indices]
update_filtered = np.linalg.lstsq(jac_joints, delta_xpos, rcond=None)[0]
update_nv = np.zeros(physics.model.nv, dtype=jac_dtype)
update_nv[active_dof_indices] = update_filtered
# Calculate maximum correction weight that does not violate joint limits.
weights = np.full_like(update_filtered, _IK_MAX_CORRECTION_WEIGHT)
active_initial_qpos = initial_qpos[active_joints]
active_joint_min = self._arm_joint_min[active_joints]
active_joint_max = self._arm_joint_max[active_joints]
for i in range(len(weights)):
proposed_update = update_filtered[i]
if proposed_update > 0:
max_allowed_update = active_joint_max[i] - active_initial_qpos[i]
weights[i] = min(max_allowed_update / proposed_update, weights[i])
elif proposed_update < 0:
min_allowed_update = active_joint_min[i] - active_initial_qpos[i]
weights[i] = min(min_allowed_update / proposed_update, weights[i])
weight = min(weights)
# Integrate the correction into `qpos`.
mjbindings.mjlib.mj_integratePos(
physics.model.ptr, physics.data.qpos, update_nv, weight)
# "Paranoid" clip the modified joint `qpos` to within joint limits.
active_qpos_indices = self._arm_qpos_indices[active_joints]
physics.data.qpos[active_qpos_indices] = np.clip(
physics.data.qpos[active_qpos_indices],
active_joint_min, active_joint_max)
@contextlib.contextmanager
def _override_margins_and_gaps(self, physics):
"""Context manager that overrides geom margins and gaps to `self._gap`."""
prop_geom_bindings = physics.bind(self._prop_geoms)
original_margins = np.array(prop_geom_bindings.margin)
original_gaps = np.array(prop_geom_bindings.gap)
prop_geom_bindings.margin = self._gap * (1 - _GAP_TOLERANCE)
prop_geom_bindings.gap = self._gap * (1 - _GAP_TOLERANCE)
yield
prop_geom_bindings.margin = original_margins
prop_geom_bindings.gap = original_gaps
physics.forward()
def remove_contacts(self, physics):
with self._override_margins_and_gaps(physics):
for _ in range(_MAX_IK_ATTEMPTS):
contact = self._forward_and_find_next_contact(physics)
if contact is None:
return
self._remove_contact_ik_iteration(physics, contact)
contact = self._forward_and_find_next_contact(physics)
if contact and contact.dist < 0:
raise RuntimeError(
'Failed to remove contact with prop after {} iterations. '
'Final contact distance is {}.'.format(
_MAX_IK_ATTEMPTS, contact.dist))
def open_arms_for_prop(physics, left_arm_root, right_arm_root, prop, gap):
"""Opens left and right arms so as to leave a specified gap with the prop."""
left_arm_opener = _ArmPropContactRemover(physics, left_arm_root, prop, gap)
left_arm_opener.remove_contacts(physics)
right_arm_opener = _ArmPropContactRemover(physics, right_arm_root, prop, gap)
right_arm_opener.remove_contacts(physics)
| deepmind-research-master | catch_carry/arm_opener.py |
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metadata for mocap clips that correspond to a walker carrying a prop."""
import collections
import enum
import os
from dm_control.locomotion.mocap import loader as mocap_loader
from catch_carry import trajectories
H5_DIR = os.path.dirname(__file__)
H5_PATH = os.path.join(H5_DIR, 'mocap_data.h5')
IDENTIFIER_PREFIX = 'DeepMindCatchCarry'
IDENTIFIER_TEMPLATE = IDENTIFIER_PREFIX + '-{:03d}'
ClipInfo = collections.namedtuple(
'ClipInfo', ('clip_identifier', 'num_steps', 'dt', 'flags'))
class Flag(enum.IntEnum):
BOX = 1 << 0
BALL = 1 << 1
LIGHT_PROP = 1 << 2
HEAVY_PROP = 1 << 3
SMALL_PROP = 1 << 4
LARGE_PROP = 1 << 5
FLOOR_LEVEL = 1 << 6
MEDIUM_PEDESTAL = 1 << 7
HIGH_PEDESTAL = 1 << 8
_ALL_CLIPS = None
def _get_clip_info(loader, clip_number, flags):
clip = loader.get_trajectory(IDENTIFIER_TEMPLATE.format(clip_number))
return ClipInfo(
clip_identifier=clip.identifier,
num_steps=clip.num_steps,
dt=clip.dt,
flags=flags)
def _get_all_clip_infos_if_necessary():
"""Creates the global _ALL_CLIPS list if it has not already been created."""
global _ALL_CLIPS
if _ALL_CLIPS is None:
loader = mocap_loader.HDF5TrajectoryLoader(
H5_PATH, trajectories.WarehouseTrajectory)
clip_numbers = (1, 2, 3, 4, 5, 6, 9, 10,
11, 12, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 42, 43, 44, 45,
46, 47, 48, 49, 50, 51, 52, 53)
clip_infos = []
for i, clip_number in enumerate(clip_numbers):
flags = 0
if i in _FLOOR_LEVEL:
flags |= Flag.FLOOR_LEVEL
elif i in _MEDIUM_PEDESTAL:
flags |= Flag.MEDIUM_PEDESTAL
elif i in _HIGH_PEDESTAL:
flags |= Flag.HIGH_PEDESTAL
if i in _LIGHT_PROP:
flags |= Flag.LIGHT_PROP
elif i in _HEAVY_PROP:
flags |= Flag.HEAVY_PROP
if i in _SMALL_BOX:
flags |= Flag.SMALL_PROP
flags |= Flag.BOX
elif i in _LARGE_BOX:
flags |= Flag.LARGE_PROP
flags |= Flag.BOX
elif i in _SMALL_BALL:
flags |= Flag.SMALL_PROP
flags |= Flag.BALL
elif i in _LARGE_BALL:
flags |= Flag.LARGE_PROP
flags |= Flag.BALL
clip_infos.append(_get_clip_info(loader, clip_number, flags))
_ALL_CLIPS = tuple(clip_infos)
def _assert_partitions_all_clips(*args):
"""Asserts that a given set of subcollections partitions ALL_CLIPS."""
sets = tuple(set(arg) for arg in args)
# Check that the union of all the sets is ALL_CLIPS.
union = set()
for subset in sets:
union = union | set(subset)
assert union == set(range(48))
# Check that the sets are pairwise disjoint.
for i in range(len(sets)):
for j in range(i + 1, len(sets)):
assert sets[i] & sets[j] == set()
_FLOOR_LEVEL = tuple(range(0, 16))
_MEDIUM_PEDESTAL = tuple(range(16, 32))
_HIGH_PEDESTAL = tuple(range(32, 48))
_assert_partitions_all_clips(_FLOOR_LEVEL, _MEDIUM_PEDESTAL, _HIGH_PEDESTAL)
_LIGHT_PROP = (0, 1, 2, 3, 8, 9, 12, 13, 16, 17, 18, 19, 24,
25, 26, 27, 34, 35, 38, 39, 42, 43, 46, 47)
_HEAVY_PROP = (4, 5, 6, 7, 10, 11, 14, 15, 20, 21, 22, 23, 28,
29, 30, 31, 32, 33, 36, 37, 40, 41, 44, 45)
_assert_partitions_all_clips(_LIGHT_PROP, _HEAVY_PROP)
_SMALL_BOX = (0, 1, 4, 5, 16, 17, 20, 21, 34, 35, 36, 37)
_LARGE_BOX = (2, 3, 6, 7, 18, 19, 22, 23, 32, 33, 38, 39)
_SMALL_BALL = (8, 9, 10, 11, 24, 25, 30, 31, 40, 41, 46, 47)
_LARGE_BALL = (12, 13, 14, 15, 26, 27, 28, 29, 42, 43, 44, 45)
_assert_partitions_all_clips(_SMALL_BOX, _LARGE_BOX, _SMALL_BALL, _LARGE_BALL)
def all_clips():
_get_all_clip_infos_if_necessary()
return _ALL_CLIPS
def floor_level():
clips = all_clips()
return tuple(clips[i] for i in _FLOOR_LEVEL)
def medium_pedestal():
clips = all_clips()
return tuple(clips[i] for i in _MEDIUM_PEDESTAL)
def high_pedestal():
clips = all_clips()
return tuple(clips[i] for i in _HIGH_PEDESTAL)
def light_prop():
clips = all_clips()
return tuple(clips[i] for i in _LIGHT_PROP)
def heavy_prop():
clips = all_clips()
return tuple(clips[i] for i in _HEAVY_PROP)
def small_box():
clips = all_clips()
return tuple(clips[i] for i in _SMALL_BOX)
def large_box():
clips = all_clips()
return tuple(clips[i] for i in _LARGE_BOX)
def small_ball():
clips = all_clips()
return tuple(clips[i] for i in _SMALL_BALL)
def large_ball():
clips = all_clips()
return tuple(clips[i] for i in _LARGE_BALL)
| deepmind-research-master | catch_carry/mocap_data.py |
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| deepmind-research-master | catch_carry/__init__.py |
# Copyright 2020 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['absl-py', 'dm_control', 'numpy']
setup(
name='catch_carry',
version='0.1',
description='Whole-body object manipulation tasks and motion capture data.',
url='https://github.com/deepmind/deepmind-research/catch_carry',
author='DeepMind',
author_email='[email protected]',
# Contained modules and scripts.
packages=find_packages(),
install_requires=REQUIRED_PACKAGES,
platforms=['any'],
license='Apache 2.0',
)
| deepmind-research-master | catch_carry/setup.py |
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A ball-tossing task."""
import collections
from dm_control import composer
from dm_control import mjcf
from dm_control.composer import variation
from dm_control.composer.observation import observable
from dm_control.locomotion.arenas import floors
from dm_control.locomotion.mocap import loader as mocap_loader
import numpy as np
from catch_carry import mocap_data
from catch_carry import props
from catch_carry import trajectories
_PHYSICS_TIMESTEP = 0.005
_BUCKET_SIZE = (0.2, 0.2, 0.02)
# Magnitude of the sparse reward.
_SPARSE_REWARD = 1.0
class BallToss(composer.Task):
"""A task involving catching and throwing a ball."""
def __init__(self, walker,
proto_modifier=None,
negative_reward_on_failure_termination=True,
priority_friction=False,
bucket_offset=1.,
y_range=0.5,
toss_delay=0.5,
randomize_init=False,
):
"""Initialize ball tossing task.
Args:
walker: the walker to be used in this task.
proto_modifier: function to modify trajectory proto.
negative_reward_on_failure_termination: flag to provide negative reward
as task fails.
priority_friction: sets friction priority thereby making prop objects have
higher friction.
bucket_offset: distance in meters to push bucket (away from walker)
y_range: range (uniformly sampled) of distance in meters the ball is
thrown left/right of the walker.
toss_delay: time in seconds to delay after catching before changing reward
to encourage throwing the ball.
randomize_init: flag to randomize initial pose.
"""
self._proto_modifier = proto_modifier
self._negative_reward_on_failure_termination = (
negative_reward_on_failure_termination)
self._priority_friction = priority_friction
self._bucket_rewarded = False
self._bucket_offset = bucket_offset
self._y_range = y_range
self._toss_delay = toss_delay
self._randomize_init = randomize_init
# load a clip to grab a ball prop and initializations
loader = mocap_loader.HDF5TrajectoryLoader(
mocap_data.H5_PATH, trajectories.WarehouseTrajectory)
clip_number = 54
self._trajectory = loader.get_trajectory(
mocap_data.IDENTIFIER_TEMPLATE.format(clip_number))
# create the floor arena
self._arena = floors.Floor()
self._walker = walker
self._walker_geoms = tuple(self._walker.mjcf_model.find_all('geom'))
self._feet_geoms = (
walker.mjcf_model.find('body', 'lfoot').find_all('geom') +
walker.mjcf_model.find('body', 'rfoot').find_all('geom'))
self._lhand_geoms = (
walker.mjcf_model.find('body', 'lhand').find_all('geom'))
self._rhand_geoms = (
walker.mjcf_model.find('body', 'rhand').find_all('geom'))
# resize the humanoid based on the motion capture data subject
self._trajectory.configure_walkers([self._walker])
walker.create_root_joints(self._arena.attach(walker))
control_timestep = self._trajectory.dt
self.set_timesteps(control_timestep, _PHYSICS_TIMESTEP)
# build and attach the bucket to the arena
self._bucket = props.Bucket(_BUCKET_SIZE)
self._arena.attach(self._bucket)
self._prop = self._trajectory.create_props(
priority_friction=self._priority_friction)[0]
self._arena.add_free_entity(self._prop)
self._task_observables = collections.OrderedDict()
# define feature based observations (agent may or may not use these)
def ego_prop_xpos(physics):
prop_xpos, _ = self._prop.get_pose(physics)
walker_xpos = physics.bind(self._walker.root_body).xpos
return self._walker.transform_vec_to_egocentric_frame(
physics, prop_xpos - walker_xpos)
self._task_observables['prop_{}/xpos'.format(0)] = (
observable.Generic(ego_prop_xpos))
def prop_zaxis(physics):
prop_xmat = physics.bind(
mjcf.get_attachment_frame(self._prop.mjcf_model)).xmat
return prop_xmat[[2, 5, 8]]
self._task_observables['prop_{}/zaxis'.format(0)] = (
observable.Generic(prop_zaxis))
def ego_bucket_xpos(physics):
bucket_xpos, _ = self._bucket.get_pose(physics)
walker_xpos = physics.bind(self._walker.root_body).xpos
return self._walker.transform_vec_to_egocentric_frame(
physics, bucket_xpos - walker_xpos)
self._task_observables['bucket_{}/xpos'.format(0)] = (
observable.Generic(ego_bucket_xpos))
for obs in (self._walker.observables.proprioception +
self._walker.observables.kinematic_sensors +
self._walker.observables.dynamic_sensors +
list(self._task_observables.values())):
obs.enabled = True
@property
def root_entity(self):
return self._arena
@property
def task_observables(self):
return self._task_observables
@property
def name(self):
return 'ball_toss'
def initialize_episode_mjcf(self, random_state):
self._reward = 0.0
self._discount = 1.0
self._should_terminate = False
self._prop.detach()
if self._proto_modifier:
trajectory = self._trajectory.get_modified_trajectory(
self._proto_modifier)
self._prop = trajectory.create_props(
priority_friction=self._priority_friction)[0]
self._arena.add_free_entity(self._prop)
# set the bucket position for this episode
bucket_distance = 1.*random_state.rand()+self._bucket_offset
mjcf.get_attachment_frame(self._bucket.mjcf_model).pos = [bucket_distance,
0, 0]
def initialize_episode(self, physics, random_state):
self._ground_geomid = physics.bind(
self._arena.mjcf_model.worldbody.geom[0]).element_id
self._feet_geomids = set(physics.bind(self._feet_geoms).element_id)
self._lhand_geomids = set(physics.bind(self._lhand_geoms).element_id)
self._rhand_geomids = set(physics.bind(self._rhand_geoms).element_id)
self._walker_geomids = set(physics.bind(self._walker_geoms).element_id)
self._bucket_rewarded = False
if self._randomize_init:
timestep_ind = random_state.randint(
len(self._trajectory._proto.timesteps)) # pylint: disable=protected-access
else:
timestep_ind = 0
walker_init_timestep = self._trajectory._proto.timesteps[timestep_ind] # pylint: disable=protected-access
prop_init_timestep = self._trajectory._proto.timesteps[0] # pylint: disable=protected-access
self._walker.set_pose(
physics,
position=walker_init_timestep.walkers[0].position,
quaternion=walker_init_timestep.walkers[0].quaternion)
self._walker.set_velocity(
physics, velocity=walker_init_timestep.walkers[0].velocity,
angular_velocity=walker_init_timestep.walkers[0].angular_velocity)
physics.bind(self._walker.mocap_joints).qpos = (
walker_init_timestep.walkers[0].joints)
physics.bind(self._walker.mocap_joints).qvel = (
walker_init_timestep.walkers[0].joints_velocity)
initial_prop_pos = np.copy(prop_init_timestep.props[0].position)
initial_prop_pos[0] += 1. # move ball (from mocap) relative to origin
initial_prop_pos[1] = 0 # align ball with walker along y-axis
self._prop.set_pose(
physics,
position=initial_prop_pos,
quaternion=prop_init_timestep.props[0].quaternion)
# specify the distributions of ball velocity componentwise
x_vel_mag = 4.5*random_state.rand()+1.5 # m/s
x_dist = 3 # approximate initial distance from walker to ball
self._t_dist = x_dist/x_vel_mag # target time at which to hit the humanoid
z_offset = .4*random_state.rand()+.1 # height at which to hit person
# compute velocity to satisfy desired projectile trajectory
z_vel_mag = (4.9*(self._t_dist**2) + z_offset)/self._t_dist
y_range = variation.evaluate(self._y_range, random_state=random_state)
y_vel_mag = y_range*random_state.rand()-y_range/2
trans_vel = [-x_vel_mag, y_vel_mag, z_vel_mag]
ang_vel = 1.5*random_state.rand(3)-0.75
self._prop.set_velocity(
physics,
velocity=trans_vel,
angular_velocity=ang_vel)
def after_step(self, physics, random_state):
# First we check for failure termination (walker or ball touches ground).
ground_failure = False
for contact in physics.data.contact:
if ((contact.geom1 == self._ground_geomid and
contact.geom2 not in self._feet_geomids) or
(contact.geom2 == self._ground_geomid and
contact.geom1 not in self._feet_geomids)):
ground_failure = True
break
contact_features = self._evaluate_contacts(physics)
prop_lhand, prop_rhand, bucket_prop, bucket_walker, walker_prop = contact_features
# or also fail if walker hits bucket
if ground_failure or bucket_walker:
if self._negative_reward_on_failure_termination:
self._reward = -_SPARSE_REWARD
else:
self._reward = 0.0
self._should_terminate = True
self._discount = 0.0
return
self._reward = 0.0
# give reward if prop is in bucket (prop touching bottom surface of bucket)
if bucket_prop:
self._reward += _SPARSE_REWARD/10
# shaping reward for being closer to bucket
if physics.data.time > (self._t_dist + self._toss_delay):
bucket_xy = physics.bind(self._bucket.geom).xpos[0][:2]
prop_xy = self._prop.get_pose(physics)[0][:2]
xy_dist = np.sum(np.array(np.abs(bucket_xy - prop_xy)))
self._reward += np.exp(-xy_dist/3.)*_SPARSE_REWARD/50
else:
# bonus for hands touching ball
if prop_lhand:
self._reward += _SPARSE_REWARD/100
if prop_rhand:
self._reward += _SPARSE_REWARD/100
# combined with penalty for other body parts touching the ball
if walker_prop:
self._reward -= _SPARSE_REWARD/100
def get_reward(self, physics):
return self._reward
def get_discount(self, physics):
return self._discount
def should_terminate_episode(self, physics):
return self._should_terminate
def _evaluate_contacts(self, physics):
prop_elem_id = physics.bind(self._prop.geom).element_id
bucket_bottom_elem_id = physics.bind(self._bucket.geom[0]).element_id
bucket_any_elem_id = set(physics.bind(self._bucket.geom).element_id)
prop_lhand_contact = False
prop_rhand_contact = False
bucket_prop_contact = False
bucket_walker_contact = False
walker_prop_contact = False
for contact in physics.data.contact:
has_prop = (contact.geom1 == prop_elem_id or
contact.geom2 == prop_elem_id)
has_bucket_bottom = (contact.geom1 == bucket_bottom_elem_id or
contact.geom2 == bucket_bottom_elem_id)
has_bucket_any = (contact.geom1 in bucket_any_elem_id or
contact.geom2 in bucket_any_elem_id)
has_lhand = (contact.geom1 in self._lhand_geomids or
contact.geom2 in self._lhand_geomids)
has_rhand = (contact.geom1 in self._rhand_geomids or
contact.geom2 in self._rhand_geomids)
has_walker = (contact.geom1 in self._walker_geomids or
contact.geom2 in self._walker_geomids)
if has_prop and has_bucket_bottom:
bucket_prop_contact = True
if has_walker and has_bucket_any:
bucket_walker_contact = True
if has_walker and has_prop:
walker_prop_contact = True
if has_prop and has_lhand:
prop_lhand_contact = True
if has_prop and has_rhand:
prop_rhand_contact = True
return (prop_lhand_contact, prop_rhand_contact, bucket_prop_contact,
bucket_walker_contact, walker_prop_contact)
| deepmind-research-master | catch_carry/ball_toss.py |
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple script to launch viewer with an example environment."""
from absl import app
from absl import flags
from dm_control import viewer
from catch_carry import task_examples
FLAGS = flags.FLAGS
flags.DEFINE_enum('task', 'warehouse', ['warehouse', 'toss'],
'The task to visualize.')
TASKS = {
'warehouse': task_examples.build_vision_warehouse,
'toss': task_examples.build_vision_toss,
}
def main(unused_argv):
viewer.launch(environment_loader=TASKS[FLAGS.task])
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | catch_carry/explore.py |
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A rectangular pedestal."""
from dm_control import composer
from dm_control import mjcf
class Pedestal(composer.Entity):
"""A rectangular pedestal."""
def _build(self, size=(.2, .3, .05), rgba=(0, .5, 0, 1), name='pedestal'):
self._mjcf_root = mjcf.RootElement(model=name)
self._geom = self._mjcf_root.worldbody.add(
'geom', type='box', size=size, name='geom', rgba=rgba)
@property
def mjcf_model(self):
return self._mjcf_root
@property
def geom(self):
return self._geom
def after_compile(self, physics, unused_random_state):
super(Pedestal, self).after_compile(physics, unused_random_state)
self._body_geom_ids = set(
physics.bind(geom).element_id
for geom in self.mjcf_model.find_all('geom'))
@property
def body_geom_ids(self):
return self._body_geom_ids
class Bucket(composer.Entity):
"""A rectangular bucket."""
def _build(self, size=(.2, .3, .05), rgba=(0, .5, 0, 1), name='pedestal'):
self._mjcf_root = mjcf.RootElement(model=name)
self._geoms = []
self._geoms.append(self._mjcf_root.worldbody.add(
'geom', type='box', size=size, name='geom_bottom', rgba=rgba))
self._geoms.append(self._mjcf_root.worldbody.add(
'geom', type='box', size=(size[2], size[1], size[0]), name='geom_s1',
rgba=rgba, pos=[size[0], 0, size[0]]))
self._geoms.append(self._mjcf_root.worldbody.add(
'geom', type='box', size=(size[2], size[1], size[0]), name='geom_s2',
rgba=rgba, pos=[-size[0], 0, size[0]]))
self._geoms.append(self._mjcf_root.worldbody.add(
'geom', type='box', size=(size[0], size[2], size[0]), name='geom_s3',
rgba=rgba, pos=[0, size[1], size[0]]))
self._geoms.append(self._mjcf_root.worldbody.add(
'geom', type='box', size=(size[0], size[2], size[0]), name='geom_s4',
rgba=rgba, pos=[0, -size[1], size[0]]))
@property
def mjcf_model(self):
return self._mjcf_root
@property
def geom(self):
return self._geoms
def after_compile(self, physics, unused_random_state):
super(Bucket, self).after_compile(physics, unused_random_state)
self._body_geom_ids = set(
physics.bind(geom).element_id
for geom in self.mjcf_model.find_all('geom'))
@property
def body_geom_ids(self):
return self._body_geom_ids
| deepmind-research-master | catch_carry/props.py |
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mocap trajectory that assumes props start stationary on pedestals."""
import copy
import enum
import itertools
from dm_control.locomotion.mocap import mocap_pb2
from dm_control.locomotion.mocap import trajectory
from dm_control.utils import transformations
import numpy as np
_PEDESTAL_SIZE = (0.2, 0.2, 0.02)
_MAX_SETTLE_STEPS = 100
@enum.unique
class ClipSegment(enum.Enum):
"""Annotations for subsegments within a warehouse clips."""
# Clip segment corresponding to a walker approaching an object
APPROACH = 1
# Clip segment corresponding to a walker picking up an object.
PICKUP = 2
# Clip segment corresponding to the "first half" of the walker carrying an
# object, beginning from the walker backing away from a pedestal with
# object in hand.
CARRY1 = 3
# Clip segment corresponding to the "second half" of the walker carrying an
# object, ending in the walker approaching a pedestal the object in hand.
CARRY2 = 4
# Clip segment corresponding to a walker putting down an object on a pedestal.
PUTDOWN = 5
# Clip segment corresponding to a walker backing off after successfully
# placing an object on a pedestal.
BACKOFF = 6
def _get_rotated_bounding_box(size, quaternion):
"""Calculates the bounding box of a rotated 3D box.
Args:
size: An array of length 3 specifying the half-lengths of a box.
quaternion: A unit quaternion specifying the box's orientation.
Returns:
An array of length 3 specifying the half-lengths of the bounding box of
the rotated box.
"""
corners = ((size[0], size[1], size[2]),
(size[0], size[1], -size[2]),
(size[0], -size[1], size[2]),
(-size[0], size[1], size[2]))
rotated_corners = tuple(
transformations.quat_rotate(quaternion, corner) for corner in corners)
return np.amax(np.abs(rotated_corners), axis=0)
def _get_prop_z_extent(prop_proto, quaternion):
"""Calculates the "z-extent" of the prop in given orientation.
This is the distance from the centre of the prop to its lowest point in the
world frame, taking into account the prop's orientation.
Args:
prop_proto: A `mocap_pb2.Prop` protocol buffer defining a prop.
quaternion: A unit quaternion specifying the prop's orientation.
Returns:
the distance from the centre of the prop to its lowest point in the
world frame in the specified orientation.
"""
if prop_proto.shape == mocap_pb2.Prop.BOX:
return _get_rotated_bounding_box(prop_proto.size, quaternion)[2]
elif prop_proto.shape == mocap_pb2.Prop.SPHERE:
return prop_proto.size[0]
else:
raise NotImplementedError(
'Unsupported prop shape: {}'.format(prop_proto.shape))
class WarehouseTrajectory(trajectory.Trajectory):
"""Mocap trajectory that assumes props start stationary on pedestals."""
def infer_pedestal_positions(self, num_averaged_steps=30,
ground_height_tolerance=0.1,
proto_modifier=None):
proto = self._proto
if proto_modifier is not None:
proto = copy.copy(proto)
proto_modifier(proto)
if not proto.props:
return []
positions = []
for timestep in itertools.islice(proto.timesteps, num_averaged_steps):
positions_for_timestep = []
for prop_proto, prop_timestep in zip(proto.props, timestep.props):
z_extent = _get_prop_z_extent(prop_proto, prop_timestep.quaternion)
positions_for_timestep.append([prop_timestep.position[0],
prop_timestep.position[1],
prop_timestep.position[2] - z_extent])
positions.append(positions_for_timestep)
median_positions = np.median(positions, axis=0)
median_positions[:, 2][median_positions[:, 2] < ground_height_tolerance] = 0
return median_positions
def get_props_z_extent(self, physics):
timestep = self._proto.timesteps[self._get_step_id(physics.time())]
out = []
for prop_proto, prop_timestep in zip(self._proto.props, timestep.props):
z_extent = _get_prop_z_extent(prop_proto, prop_timestep.quaternion)
out.append(z_extent)
return out
class SinglePropCarrySegmentedTrajectory(WarehouseTrajectory):
"""A mocap trajectory class that automatically segments prop-carry clips.
The algorithm implemented in the class only works if the trajectory consists
of exactly one walker and one prop. The value of `pedestal_zone_distance`
the exact nature of zone crossings are determined empirically from the
DeepMindCatchCarry dataset, and are likely to not work well outside of this
setting.
"""
def __init__(self,
proto,
start_time=None,
end_time=None,
pedestal_zone_distance=0.65,
start_step=None,
end_step=None,
zero_out_velocities=True):
super(SinglePropCarrySegmentedTrajectory, self).__init__(
proto, start_time, end_time, start_step=start_step, end_step=end_step,
zero_out_velocities=zero_out_velocities)
self._pedestal_zone_distance = pedestal_zone_distance
self._generate_segments()
def _generate_segments(self):
pedestal_position = self.infer_pedestal_positions()[0]
# First we find the timesteps at which the walker cross the pedestal's
# vicinity zone. This should happen exactly 4 times: enter it to pick up,
# leave it, enter it again to put down, and leave it again.
was_in_pedestal_zone = False
crossings = []
for i, timestep in enumerate(self._proto.timesteps):
pedestal_dist = np.linalg.norm(
timestep.walkers[0].position[:2] - pedestal_position[:2])
if pedestal_dist > self._pedestal_zone_distance and was_in_pedestal_zone:
crossings.append(i)
was_in_pedestal_zone = False
elif (pedestal_dist <= self._pedestal_zone_distance and
not was_in_pedestal_zone):
crossings.append(i)
was_in_pedestal_zone = True
if len(crossings) < 3:
raise RuntimeError(
'Failed to segment the given trajectory: '
'walker should cross the pedestal zone\'s boundary >= 3 times '
'but got {}'.format(len(crossings)))
elif len(crossings) == 3:
crossings.append(len(self._proto.timesteps) - 1)
elif len(crossings) > 4:
crossings = [crossings[0], crossings[1], crossings[-2], crossings[-1]]
# Identify the pick up event during the first in-zone interval.
start_position = np.array(self._proto.timesteps[0].props[0].position)
end_position = np.array(self._proto.timesteps[-1].props[0].position)
pick_up_step = crossings[1] - 1
while pick_up_step > crossings[0]:
prev_position = self._proto.timesteps[pick_up_step - 1].props[0].position
if np.linalg.norm(start_position[2] - prev_position[2]) < 0.001:
break
pick_up_step -= 1
# Identify the put down event during the second in-zone interval.
put_down_step = crossings[2]
while put_down_step <= crossings[3]:
next_position = self._proto.timesteps[put_down_step + 1].props[0].position
if np.linalg.norm(end_position[2] - next_position[2]) < 0.001:
break
put_down_step += 1
carry_halfway_step = int((crossings[1] + crossings[2]) / 2)
self._segment_intervals = {
ClipSegment.APPROACH: (0, crossings[0]),
ClipSegment.PICKUP: (crossings[0], pick_up_step),
ClipSegment.CARRY1: (pick_up_step, carry_halfway_step),
ClipSegment.CARRY2: (carry_halfway_step, crossings[2]),
ClipSegment.PUTDOWN: (crossings[2], put_down_step),
ClipSegment.BACKOFF: (put_down_step, len(self._proto.timesteps))
}
def segment_interval(self, segment):
start_step, end_step = self._segment_intervals[segment]
return (start_step * self._proto.dt, (end_step - 1) * self._proto.dt)
def get_random_timestep_in_segment(self, segment, random_step):
return self._proto.timesteps[
random_step.randint(*self._segment_intervals[segment])]
| deepmind-research-master | catch_carry/trajectories.py |
# Copyright 2020 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions that build representative tasks."""
from dm_control import composer
from dm_control.composer.variation import distributions
from dm_control.locomotion.mocap import loader as mocap_loader
from dm_control.locomotion.walkers import cmu_humanoid
from catch_carry import ball_toss
from catch_carry import warehouse
def build_vision_warehouse(random_state=None):
"""Build canonical 4-pedestal, 2-prop task."""
# Build a position-controlled CMU humanoid walker.
walker = cmu_humanoid.CMUHumanoidPositionControlled(
observable_options={'egocentric_camera': dict(enabled=True)})
# Build the task.
size_distribution = distributions.Uniform(low=0.75, high=1.25)
mass_distribution = distributions.Uniform(low=2, high=7)
prop_resizer = mocap_loader.PropResizer(size_factor=size_distribution,
mass=mass_distribution)
task = warehouse.PhasedBoxCarry(
walker=walker,
num_props=2,
num_pedestals=4,
proto_modifier=prop_resizer,
negative_reward_on_failure_termination=True)
# return the environment
return composer.Environment(
time_limit=15,
task=task,
random_state=random_state,
strip_singleton_obs_buffer_dim=True,
max_reset_attempts=float('inf'))
def build_vision_toss(random_state=None):
"""Build canonical ball tossing task."""
# Build a position-controlled CMU humanoid walker.
walker = cmu_humanoid.CMUHumanoidPositionControlled(
observable_options={'egocentric_camera': dict(enabled=True)})
# Build the task.
size_distribution = distributions.Uniform(low=0.95, high=1.5)
mass_distribution = distributions.Uniform(low=2, high=4)
prop_resizer = mocap_loader.PropResizer(size_factor=size_distribution,
mass=mass_distribution)
task = ball_toss.BallToss(
walker=walker,
proto_modifier=prop_resizer,
negative_reward_on_failure_termination=True,
priority_friction=True,
bucket_offset=3.,
y_range=0.5,
toss_delay=1.5,
randomize_init=True)
# return the environment
return composer.Environment(
time_limit=6,
task=task,
random_state=random_state,
strip_singleton_obs_buffer_dim=True,
max_reset_attempts=float('inf'))
| deepmind-research-master | catch_carry/task_examples.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Hierarchical Probabilistic U-Net open-source version."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from model import HierarchicalProbUNet
import tensorflow.compat.v1 as tf
_NUM_CLASSES = 2
_BATCH_SIZE = 2
_SPATIAL_SHAPE = [32, 32]
_CHANNELS_PER_BLOCK = [5, 7, 9, 11, 13]
_IMAGE_SHAPE = [_BATCH_SIZE] + _SPATIAL_SHAPE + [1]
_BOTTLENECK_SIZE = _SPATIAL_SHAPE[0] // 2 ** (len(_CHANNELS_PER_BLOCK) - 1)
_SEGMENTATION_SHAPE = [_BATCH_SIZE] + _SPATIAL_SHAPE + [_NUM_CLASSES]
_LATENT_DIMS = [3, 2, 1]
_INITIALIZERS = {'w': tf.orthogonal_initializer(gain=1.0, seed=None),
'b': tf.truncated_normal_initializer(stddev=0.001)}
def _get_placeholders():
"""Returns placeholders for the image and segmentation."""
img = tf.placeholder(dtype=tf.float32, shape=_IMAGE_SHAPE)
seg = tf.placeholder(dtype=tf.float32, shape=_SEGMENTATION_SHAPE)
return img, seg
class HierarchicalProbUNetTest(tf.test.TestCase):
def test_shape_of_sample(self):
hpu_net = HierarchicalProbUNet(latent_dims=_LATENT_DIMS,
channels_per_block=_CHANNELS_PER_BLOCK,
num_classes=_NUM_CLASSES,
initializers=_INITIALIZERS)
img, _ = _get_placeholders()
sample = hpu_net.sample(img)
self.assertEqual(sample.shape.as_list(), _SEGMENTATION_SHAPE)
def test_shape_of_reconstruction(self):
hpu_net = HierarchicalProbUNet(latent_dims=_LATENT_DIMS,
channels_per_block=_CHANNELS_PER_BLOCK,
num_classes=_NUM_CLASSES,
initializers=_INITIALIZERS)
img, seg = _get_placeholders()
reconstruction = hpu_net.reconstruct(img, seg)
self.assertEqual(reconstruction.shape.as_list(), _SEGMENTATION_SHAPE)
def test_shapes_in_prior(self):
hpu_net = HierarchicalProbUNet(latent_dims=_LATENT_DIMS,
channels_per_block=_CHANNELS_PER_BLOCK,
num_classes=_NUM_CLASSES,
initializers=_INITIALIZERS)
img, _ = _get_placeholders()
prior_out = hpu_net._prior(img)
distributions = prior_out['distributions']
latents = prior_out['used_latents']
encoder_features = prior_out['encoder_features']
decoder_features = prior_out['decoder_features']
# Test number of latent disctributions.
self.assertEqual(len(distributions), len(_LATENT_DIMS))
# Test shapes of latent scales.
for level in range(len(_LATENT_DIMS)):
latent_spatial_shape = _BOTTLENECK_SIZE * 2 ** level
latent_shape = [_BATCH_SIZE, latent_spatial_shape, latent_spatial_shape,
_LATENT_DIMS[level]]
self.assertEqual(latents[level].shape.as_list(), latent_shape)
# Test encoder shapes.
for level in range(len(_CHANNELS_PER_BLOCK)):
spatial_shape = _SPATIAL_SHAPE[0] // 2 ** level
feature_shape = [_BATCH_SIZE, spatial_shape, spatial_shape,
_CHANNELS_PER_BLOCK[level]]
self.assertEqual(encoder_features[level].shape.as_list(), feature_shape)
# Test decoder shape.
start_level = len(_LATENT_DIMS)
latent_spatial_shape = _BOTTLENECK_SIZE * 2 ** start_level
latent_shape = [_BATCH_SIZE, latent_spatial_shape, latent_spatial_shape,
_CHANNELS_PER_BLOCK[::-1][start_level]]
self.assertEqual(decoder_features.shape.as_list(), latent_shape)
def test_shape_of_kl(self):
hpu_net = HierarchicalProbUNet(latent_dims=_LATENT_DIMS,
channels_per_block=_CHANNELS_PER_BLOCK,
num_classes=_NUM_CLASSES,
initializers=_INITIALIZERS)
img, seg = _get_placeholders()
kl_dict = hpu_net.kl(img, seg)
self.assertEqual(len(kl_dict), len(_LATENT_DIMS))
if __name__ == '__main__':
tf.test.main()
| deepmind-research-master | hierarchical_probabilistic_unet/model_test.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility Functions for the GECO-objective.
(GECO is described in `Taming VAEs`, see https://arxiv.org/abs/1810.00597).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
class MovingAverage(snt.AbstractModule):
"""A thin wrapper around snt.MovingAverage.
The module adds the option not to differentiate through the last element that
is added to the moving average, specified by means of the kwarg
`differentiable`.
"""
def __init__(self, decay, local=True, differentiable=False,
name='snt_moving_average'):
super(MovingAverage, self).__init__(name=name)
self._differentiable = differentiable
self._moving_average = snt.MovingAverage(
decay=decay, local=local, name=name)
def _build(self, inputs):
if not self._differentiable:
inputs = tf.stop_gradient(inputs)
return self._moving_average(inputs)
class LagrangeMultiplier(snt.AbstractModule):
"""A lagrange multiplier sonnet module."""
def __init__(self,
rate=1e-2,
name='snt_lagrange_multiplier'):
"""Initializer for the sonnet module.
Args:
rate: Scalar used to scale the magnitude of gradients of the Lagrange
multipliers, defaulting to 1e-2.
name: Name of the Lagrange multiplier sonnet module.
"""
super(LagrangeMultiplier, self).__init__(name=name)
self._rate = rate
def _build(self, ma_constraint):
"""Connects the module to the graph.
Args:
ma_constraint: A loss minus a target value, denoting a constraint that
shall be less or equal than zero.
Returns:
An op, which when added to a loss and calling minimize on the loss
results in the optimizer minimizing w.r.t. to the model's parameters and
maximizing w.r.t. the Lagrande multipliers, hence enforcing the
constraints.
"""
lagmul = snt.get_lagrange_multiplier(
shape=ma_constraint.shape, rate=self._rate,
initializer=np.ones(ma_constraint.shape))
return lagmul
def _sample_gumbel(shape, eps=1e-20):
"""Transforms a uniform random variable to be standard Gumbel distributed."""
return -tf.log(
-tf.log(tf.random_uniform(shape, minval=0, maxval=1) + eps) + eps)
def _topk_mask(score, k):
"""Returns a mask for the top-k elements in score."""
_, indices = tf.nn.top_k(score, k=k)
return tf.scatter_nd(tf.expand_dims(indices, -1), tf.ones(k),
tf.squeeze(score).shape.as_list())
def ce_loss(logits, labels, mask=None, top_k_percentage=None,
deterministic=False):
"""Computes the cross-entropy loss.
Optionally a mask and a top-k percentage for the used pixels can be specified.
The top-k mask can be produced deterministically or sampled.
Args:
logits: A tensor of shape (b,h,w,num_classes)
labels: A tensor of shape (b,h,w,num_classes)
mask: None or a tensor of shape (b,h,w).
top_k_percentage: None or a float in (0.,1.]. If None, a standard
cross-entropy loss is calculated.
deterministic: A Boolean indicating whether or not to produce the
prospective top-k mask deterministically.
Returns:
A dictionary holding the mean and the pixelwise sum of the loss for the
batch as well as the employed loss mask.
"""
num_classes = logits.shape.as_list()[-1]
y_flat = tf.reshape(logits, (-1, num_classes), name='reshape_y')
t_flat = tf.reshape(labels, (-1, num_classes), name='reshape_t')
if mask is None:
mask = tf.ones(shape=(t_flat.shape.as_list()[0],))
else:
assert mask.shape.as_list()[:3] == labels.shape.as_list()[:3],\
'The loss mask shape differs from the target shape: {} vs. {}.'.format(
mask.shape.as_list(), labels.shape.as_list()[:3])
mask = tf.reshape(mask, (-1,), name='reshape_mask')
n_pixels_in_batch = y_flat.shape.as_list()[0]
xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=t_flat, logits=y_flat)
if top_k_percentage is not None:
assert 0.0 < top_k_percentage <= 1.0
k_pixels = tf.cast(tf.floor(n_pixels_in_batch * top_k_percentage), tf.int32)
stopgrad_xe = tf.stop_gradient(xe)
norm_xe = stopgrad_xe / tf.reduce_sum(stopgrad_xe)
if deterministic:
score = tf.log(norm_xe)
else:
# Use the Gumbel trick to sample the top-k pixels, equivalent to sampling
# from a categorical distribution over pixels whose probabilities are
# given by the normalized cross-entropy loss values. This is done by
# adding Gumbel noise to the logarithmic normalized cross-entropy loss
# (followed by choosing the top-k pixels).
score = tf.log(norm_xe) + _sample_gumbel(norm_xe.shape.as_list())
score = score + tf.log(mask)
top_k_mask = _topk_mask(score, k_pixels)
mask = mask * top_k_mask
# Calculate batch-averages for the sum and mean of the loss
batch_size = labels.shape.as_list()[0]
xe = tf.reshape(xe, shape=(batch_size, -1))
mask = tf.reshape(mask, shape=(batch_size, -1))
ce_sum_per_instance = tf.reduce_sum(mask * xe, axis=1)
ce_sum = tf.reduce_mean(ce_sum_per_instance, axis=0)
ce_mean = tf.reduce_sum(mask * xe) / tf.reduce_sum(mask)
return {'mean': ce_mean, 'sum': ce_sum, 'mask': mask}
| deepmind-research-master | hierarchical_probabilistic_unet/geco_utils.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Open Source Version of the Hierarchical Probabilistic U-Net."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import geco_utils
import sonnet as snt
import tensorflow as tf
from tensorflow_probability import distributions as tfd
import unet_utils
class _HierarchicalCore(snt.AbstractModule):
"""A U-Net encoder-decoder with a full encoder and a truncated decoder.
The truncated decoder is interleaved with the hierarchical latent space and
has as many levels as there are levels in the hierarchy plus one additional
level.
"""
def __init__(self, latent_dims, channels_per_block,
down_channels_per_block=None, activation_fn=tf.nn.relu,
initializers=None, regularizers=None, convs_per_block=3,
blocks_per_level=3, name='HierarchicalDecoderDist'):
"""Initializes a HierarchicalCore.
Args:
latent_dims: List of integers specifying the dimensions of the latents at
each scale. The length of the list indicates the number of U-Net decoder
scales that have latents.
channels_per_block: A list of integers specifying the number of output
channels for each encoder block.
down_channels_per_block: A list of integers specifying the number of
intermediate channels for each encoder block or None. If None, the
intermediate channels are chosen equal to channels_per_block.
activation_fn: A callable activation function.
initializers: Optional dict containing ops to initialize the filters (with
key 'w') or biases (with key 'b'). The default initializer for the
weights is a truncated normal initializer, which is commonly used when
the inputs are zero centered (see
https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for the
bias is a zero initializer.
regularizers: Optional dict containing regularizers for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes a
single `Tensor` as an input and returns a scalar `Tensor` output, e.g.
the L1 and L2 regularizers in `tf.contrib.layers`.
convs_per_block: An integer specifying the number of convolutional layers.
blocks_per_level: An integer specifying the number of residual blocks per
level.
name: A string specifying the name of the module.
"""
super(_HierarchicalCore, self).__init__(name=name)
self._latent_dims = latent_dims
self._channels_per_block = channels_per_block
self._activation_fn = activation_fn
self._initializers = initializers
self._regularizers = regularizers
self._convs_per_block = convs_per_block
self._blocks_per_level = blocks_per_level
if down_channels_per_block is None:
self._down_channels_per_block = channels_per_block
else:
self._down_channels_per_block = down_channels_per_block
self._name = name
def _build(self, inputs, mean=False, z_q=None):
"""A build-method allowing to sample from the module as specified.
Args:
inputs: A tensor of shape (b,h,w,c). When using the module as a prior the
`inputs` tensor should be a batch of images. When using it as a posterior
the tensor should be a (batched) concatentation of images and
segmentations.
mean: A boolean or a list of booleans. If a boolean, it specifies whether
or not to use the distributions' means in ALL latent scales. If a list,
each bool therein specifies whether or not to use the scale's mean. If
False, the latents of the scale are sampled.
z_q: None or a list of tensors. If not None, z_q provides external latents
to be used instead of sampling them. This is used to employ posterior
latents in the prior during training. Therefore, if z_q is not None, the
value of `mean` is ignored. If z_q is None, either the distributions
mean is used (in case `mean` for the respective scale is True) or else
a sample from the distribution is drawn.
Returns:
A Dictionary holding the output feature map of the truncated U-Net
decoder under key 'decoder_features', a list of the U-Net encoder features
produced at the end of each encoder scale under key 'encoder_outputs', a
list of the predicted distributions at each scale under key
'distributions', a list of the used latents at each scale under the key
'used_latents'.
"""
encoder_features = inputs
encoder_outputs = []
num_levels = len(self._channels_per_block)
num_latent_levels = len(self._latent_dims)
if isinstance(mean, bool):
mean = [mean] * num_latent_levels
distributions = []
used_latents = []
# Iterate the descending levels in the U-Net encoder.
for level in range(num_levels):
# Iterate the residual blocks in each level.
for _ in range(self._blocks_per_level):
encoder_features = unet_utils.res_block(
input_features=encoder_features,
n_channels=self._channels_per_block[level],
n_down_channels=self._down_channels_per_block[level],
activation_fn=self._activation_fn,
initializers=self._initializers,
regularizers=self._regularizers,
convs_per_block=self._convs_per_block)
encoder_outputs.append(encoder_features)
if level != num_levels - 1:
encoder_features = unet_utils.resize_down(encoder_features, scale=2)
# Iterate the ascending levels in the (truncated) U-Net decoder.
decoder_features = encoder_outputs[-1]
for level in range(num_latent_levels):
# Predict a Gaussian distribution for each pixel in the feature map.
latent_dim = self._latent_dims[level]
mu_logsigma = snt.Conv2D(
2 * latent_dim,
(1, 1),
padding='SAME',
initializers=self._initializers,
regularizers=self._regularizers,
)(decoder_features)
mu = mu_logsigma[..., :latent_dim]
logsigma = mu_logsigma[..., latent_dim:]
dist = tfd.MultivariateNormalDiag(loc=mu, scale_diag=tf.exp(logsigma))
distributions.append(dist)
# Get the latents to condition on.
if z_q is not None:
z = z_q[level]
elif mean[level]:
z = dist.loc
else:
z = dist.sample()
used_latents.append(z)
# Concat and upsample the latents with the previous features.
decoder_output_lo = tf.concat([z, decoder_features], axis=-1)
decoder_output_hi = unet_utils.resize_up(decoder_output_lo, scale=2)
decoder_features = tf.concat(
[decoder_output_hi, encoder_outputs[::-1][level + 1]], axis=-1)
# Iterate the residual blocks in each level.
for _ in range(self._blocks_per_level):
decoder_features = unet_utils.res_block(
input_features=decoder_features,
n_channels=self._channels_per_block[::-1][level + 1],
n_down_channels=self._down_channels_per_block[::-1][level + 1],
activation_fn=self._activation_fn,
initializers=self._initializers,
regularizers=self._regularizers,
convs_per_block=self._convs_per_block)
return {'decoder_features': decoder_features,
'encoder_features': encoder_outputs,
'distributions': distributions,
'used_latents': used_latents}
class _StitchingDecoder(snt.AbstractModule):
"""A module that completes the truncated U-Net decoder.
Using the output of the HierarchicalCore this module fills in the missing
decoder levels such that together the two form a symmetric U-Net.
"""
def __init__(self, latent_dims, channels_per_block, num_classes,
down_channels_per_block=None, activation_fn=tf.nn.relu,
initializers=None, regularizers=None, convs_per_block=3,
blocks_per_level=3, name='StitchingDecoder'):
"""Initializes a StichtingDecoder.
Args:
latent_dims: List of integers specifying the dimensions of the latents at
each scale. The length of the list indicates the number of U-Net
decoder scales that have latents.
channels_per_block: A list of integers specifying the number of output
channels for each encoder block.
num_classes: An integer specifying the number of segmentation classes.
down_channels_per_block: A list of integers specifying the number of
intermediate channels for each encoder block. If None, the
intermediate channels are chosen equal to channels_per_block.
activation_fn: A callable activation function.
initializers: Optional dict containing ops to initialize the filters (with
key 'w') or biases (with key 'b'). The default initializer for the
weights is a truncated normal initializer, which is commonly used when
the inputs are zero centered (see
https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for the
bias is a zero initializer.
regularizers: Optional dict containing regularizers for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes a
single `Tensor` as an input and returns a scalar `Tensor` output, e.g.
the L1 and L2 regularizers in `tf.contrib.layers`.
convs_per_block: An integer specifying the number of convolutional layers.
blocks_per_level: An integer specifying the number of residual blocks per
level.
name: A string specifying the name of the module.
"""
super(_StitchingDecoder, self).__init__(name=name)
self._latent_dims = latent_dims
self._channels_per_block = channels_per_block
self._num_classes = num_classes
self._activation_fn = activation_fn
self._initializers = initializers
self._regularizers = regularizers
self._convs_per_block = convs_per_block
self._blocks_per_level = blocks_per_level
if down_channels_per_block is None:
down_channels_per_block = channels_per_block
self._down_channels_per_block = down_channels_per_block
def _build(self, encoder_features, decoder_features):
"""Build-method that returns the segmentation logits.
Args:
encoder_features: A list of tensors of shape (b,h_i,w_i,c_i).
decoder_features: A tensor of shape (b,h,w,c).
Returns:
Logits, i.e. a tensor of shape (b,h,w,num_classes).
"""
num_latents = len(self._latent_dims)
start_level = num_latents + 1
num_levels = len(self._channels_per_block)
for level in range(start_level, num_levels, 1):
decoder_features = unet_utils.resize_up(decoder_features, scale=2)
decoder_features = tf.concat([decoder_features,
encoder_features[::-1][level]], axis=-1)
for _ in range(self._blocks_per_level):
decoder_features = unet_utils.res_block(
input_features=decoder_features,
n_channels=self._channels_per_block[::-1][level],
n_down_channels=self._down_channels_per_block[::-1][level],
activation_fn=self._activation_fn,
initializers=self._initializers,
regularizers=self._regularizers,
convs_per_block=self._convs_per_block)
return snt.Conv2D(output_channels=self._num_classes,
kernel_shape=(1, 1),
padding='SAME',
initializers=self._initializers,
regularizers=self._regularizers,
name='logits')(decoder_features)
class HierarchicalProbUNet(snt.AbstractModule):
"""A Hierarchical Probabilistic U-Net."""
def __init__(self,
latent_dims=(1, 1, 1, 1),
channels_per_block=None,
num_classes=2,
down_channels_per_block=None,
activation_fn=tf.nn.relu,
initializers=None,
regularizers=None,
convs_per_block=3,
blocks_per_level=3,
loss_kwargs=None,
name='HPUNet'):
"""Initializes a HierarchicalProbUNet.
The default values are set as for the LIDC-IDRI experiments in
`A Hierarchical Probabilistic U-Net for Modeling Multi-Scale Ambiguities',
see https://arxiv.org/abs/1905.13077.
Args:
latent_dims: List of integers specifying the dimensions of the latents at
each scales. The length of the list indicates the number of U-Net
decoder scales that have latents.
channels_per_block: A list of integers specifying the number of output
channels for each encoder block.
num_classes: An integer specifying the number of segmentation classes.
down_channels_per_block: A list of integers specifying the number of
intermediate channels for each encoder block. If None, the
intermediate channels are chosen equal to channels_per_block.
activation_fn: A callable activation function.
initializers: Optional dict containing ops to initialize the filters (with
key 'w') or biases (with key 'b').
regularizers: Optional dict containing regularizers for the filters
(with key 'w') and the biases (with key 'b').
convs_per_block: An integer specifying the number of convolutional layers.
blocks_per_level: An integer specifying the number of residual blocks per
level.
loss_kwargs: None or dictionary specifying the loss setup.
name: A string specifying the name of the module.
"""
super(HierarchicalProbUNet, self).__init__(name=name)
base_channels = 24
default_channels_per_block = (
base_channels, 2 * base_channels, 4 * base_channels, 8 * base_channels,
8 * base_channels, 8 * base_channels, 8 * base_channels,
8 * base_channels
)
if channels_per_block is None:
channels_per_block = default_channels_per_block
if down_channels_per_block is None:
down_channels_per_block =\
tuple([i / 2 for i in default_channels_per_block])
if initializers is None:
initializers = {
'w': tf.orthogonal_initializer(gain=1.0, seed=None),
'b': tf.truncated_normal_initializer(stddev=0.001)
}
if regularizers is None:
regularizers = {
'w': tf.keras.regularizers.l2(1e-5),
'b': tf.keras.regularizers.l2(1e-5)
}
if loss_kwargs is None:
self._loss_kwargs = {
'type': 'geco',
'top_k_percentage': 0.02,
'deterministic_top_k': False,
'kappa': 0.05,
'decay': 0.99,
'rate': 1e-2,
'beta': None
}
else:
self._loss_kwargs = loss_kwargs
if down_channels_per_block is None:
down_channels_per_block = channels_per_block
with self._enter_variable_scope():
self._prior = _HierarchicalCore(
latent_dims=latent_dims,
channels_per_block=channels_per_block,
down_channels_per_block=down_channels_per_block,
activation_fn=activation_fn,
initializers=initializers,
regularizers=regularizers,
convs_per_block=convs_per_block,
blocks_per_level=blocks_per_level,
name='prior')
self._posterior = _HierarchicalCore(
latent_dims=latent_dims,
channels_per_block=channels_per_block,
down_channels_per_block=down_channels_per_block,
activation_fn=activation_fn,
initializers=initializers,
regularizers=regularizers,
convs_per_block=convs_per_block,
blocks_per_level=blocks_per_level,
name='posterior')
self._f_comb = _StitchingDecoder(
latent_dims=latent_dims,
channels_per_block=channels_per_block,
num_classes=num_classes,
down_channels_per_block=down_channels_per_block,
activation_fn=activation_fn,
initializers=initializers,
regularizers=regularizers,
convs_per_block=convs_per_block,
blocks_per_level=blocks_per_level,
name='f_comb')
if self._loss_kwargs['type'] == 'geco':
self._moving_average = geco_utils.MovingAverage(
decay=self._loss_kwargs['decay'], differentiable=True,
name='ma_test')
self._lagmul = geco_utils.LagrangeMultiplier(
rate=self._loss_kwargs['rate'])
self._cache = ()
def _build(self, seg, img):
"""Inserts all ops used during training into the graph exactly once.
The first time this method is called given the input pair (seg, img) all
ops relevant for training are inserted into the graph. Calling this method
more than once does not re-insert the modules into the graph (memoization),
thus preventing multiple forward passes of submodules for the same inputs.
The method is private and called when setting up the loss.
Args:
seg: A tensor of shape (b, h, w, num_classes).
img: A tensor of shape (b, h, w, c)
Returns: None
"""
inputs = (seg, img)
if self._cache == inputs:
return
else:
self._q_sample = self._posterior(
tf.concat([seg, img], axis=-1), mean=False)
self._q_sample_mean = self._posterior(
tf.concat([seg, img], axis=-1), mean=True)
self._p_sample = self._prior(
img, mean=False, z_q=None)
self._p_sample_z_q = self._prior(
img, z_q=self._q_sample['used_latents'])
self._p_sample_z_q_mean = self._prior(
img, z_q=self._q_sample_mean['used_latents'])
self._cache = inputs
return
def sample(self, img, mean=False, z_q=None):
"""Sample a segmentation from the prior, given an input image.
Args:
img: A tensor of shape (b, h, w, c).
mean: A boolean or a list of booleans. If a boolean, it specifies whether
or not to use the distributions' means in ALL latent scales. If a list,
each bool therein specifies whether or not to use the scale's mean. If
False, the latents of the scale are sampled.
z_q: None or a list of tensors. If not None, z_q provides external latents
to be used instead of sampling them. This is used to employ posterior
latents in the prior during training. Therefore, if z_q is not None, the
value of `mean` is ignored. If z_q is None, either the distributions
mean is used (in case `mean` for the respective scale is True) or else
a sample from the distribution is drawn
Returns:
A segmentation tensor of shape (b, h, w, num_classes).
"""
prior_out = self._prior(img, mean, z_q)
encoder_features = prior_out['encoder_features']
decoder_features = prior_out['decoder_features']
return self._f_comb(encoder_features=encoder_features,
decoder_features=decoder_features)
def reconstruct(self, seg, img, mean=False):
"""Reconstruct a segmentation using the posterior.
Args:
seg: A tensor of shape (b, h, w, num_classes).
img: A tensor of shape (b, h, w, c).
mean: A boolean, specifying whether to sample from the full hierarchy of
the posterior or use the posterior means at each scale of the hierarchy.
Returns:
A segmentation tensor of shape (b,h,w,num_classes).
"""
self._build(seg, img)
if mean:
prior_out = self._p_sample_z_q_mean
else:
prior_out = self._p_sample_z_q
encoder_features = prior_out['encoder_features']
decoder_features = prior_out['decoder_features']
return self._f_comb(encoder_features=encoder_features,
decoder_features=decoder_features)
def rec_loss(self, seg, img, mask=None, top_k_percentage=None,
deterministic=True):
"""Cross-entropy reconstruction loss employed in the ELBO-/ GECO-objective.
Args:
seg: A tensor of shape (b, h, w, num_classes).
img: A tensor of shape (b, h, w, c).
mask: A mask of shape (b, h, w) or None. If None no pixels are masked in
the loss.
top_k_percentage: None or a float in (0.,1.]. If None, a standard
cross-entropy loss is calculated.
deterministic: A Boolean indicating whether or not to produce the
prospective top-k mask deterministically.
Returns:
A dictionary holding the mean and the pixelwise sum of the loss for the
batch as well as the employed loss mask.
"""
reconstruction = self.reconstruct(seg, img, mean=False)
return geco_utils.ce_loss(
reconstruction, seg, mask, top_k_percentage, deterministic)
def kl(self, seg, img):
"""Kullback-Leibler divergence between the posterior and the prior.
Args:
seg: A tensor of shape (b, h, w, num_classes).
img: A tensor of shape (b, h, w, c).
Returns:
A dictionary with keys indexing the hierarchy's levels and corresponding
values holding the KL-term for each level (per batch).
"""
self._build(seg, img)
posterior_out = self._q_sample
prior_out = self._p_sample_z_q
q_dists = posterior_out['distributions']
p_dists = prior_out['distributions']
kl = {}
for level, (q, p) in enumerate(zip(q_dists, p_dists)):
# Shape (b, h, w).
kl_per_pixel = tfd.kl_divergence(q, p)
# Shape (b,).
kl_per_instance = tf.reduce_sum(kl_per_pixel, axis=[1, 2])
# Shape (1,).
kl[level] = tf.reduce_mean(kl_per_instance)
return kl
def loss(self, seg, img, mask):
"""The full training objective, either ELBO or GECO.
Args:
seg: A tensor of shape (b, h, w, num_classes).
img: A tensor of shape (b, h, w, c).
mask: A mask of shape (b, h, w) or None. If None no pixels are masked in
the loss.
Returns:
A dictionary holding the loss (with key 'loss') and the tensorboard
summaries (with key 'summaries').
"""
summaries = {}
top_k_percentage = self._loss_kwargs['top_k_percentage']
deterministic = self._loss_kwargs['deterministic_top_k']
rec_loss = self.rec_loss(seg, img, mask, top_k_percentage, deterministic)
kl_dict = self.kl(seg, img)
kl_sum = tf.reduce_sum(
tf.stack([kl for _, kl in kl_dict.iteritems()], axis=-1))
summaries['rec_loss_mean'] = rec_loss['mean']
summaries['rec_loss_sum'] = rec_loss['sum']
summaries['kl_sum'] = kl_sum
for level, kl in kl_dict.iteritems():
summaries['kl_{}'.format(level)] = kl
# Set up a regular ELBO objective.
if self._loss_kwargs['type'] == 'elbo':
loss = rec_loss['sum'] + self._loss_kwargs['beta'] * kl_sum
summaries['elbo_loss'] = loss
# Set up a GECO objective (ELBO with a reconstruction constraint).
elif self._loss_kwargs['type'] == 'geco':
ma_rec_loss = self._moving_average(rec_loss['sum'])
mask_sum_per_instance = tf.reduce_sum(rec_loss['mask'], axis=-1)
num_valid_pixels = tf.reduce_mean(mask_sum_per_instance)
reconstruction_threshold = self._loss_kwargs['kappa'] * num_valid_pixels
rec_constraint = ma_rec_loss - reconstruction_threshold
lagmul = self._lagmul(rec_constraint)
loss = lagmul * rec_constraint + kl_sum
summaries['geco_loss'] = loss
summaries['ma_rec_loss_mean'] = ma_rec_loss / num_valid_pixels
summaries['num_valid_pixels'] = num_valid_pixels
summaries['lagmul'] = lagmul
else:
raise NotImplementedError('Loss type {} not implemeted!'.format(
self._loss_kwargs['type']))
return dict(supervised_loss=loss, summaries=summaries)
if __name__ == '__main__':
hpu_net = HierarchicalProbUNet()
| deepmind-research-master | hierarchical_probabilistic_unet/model.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['numpy', 'dm-sonnet==1.35', 'tensorflow==1.14',
'tensorflow-probability==0.7.0']
setup(
name='hpu_net',
version='0.1',
description='A library for the Hierarchical Probabilistic U-Net model.',
url='https://github.com/deepmind/deepmind-research/hierarchical_probabilistic_unet',
author='DeepMind',
author_email='[email protected]',
# Contained modules and scripts.
packages=find_packages(),
install_requires=REQUIRED_PACKAGES,
platforms=['any'],
license='Apache 2.0',
)
| deepmind-research-master | hierarchical_probabilistic_unet/setup.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Architectural blocks and utility functions of the U-Net."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sonnet as snt
import tensorflow.compat.v1 as tf
def res_block(input_features, n_channels, n_down_channels=None,
activation_fn=tf.nn.relu, initializers=None, regularizers=None,
convs_per_block=3):
"""A pre-activated residual block.
Args:
input_features: A tensor of shape (b, h, w, c).
n_channels: An integer specifying the number of output channels.
n_down_channels: An integer specifying the number of intermediate channels.
activation_fn: A callable activation function.
initializers: Initializers for the weights and biases.
regularizers: Regularizers for the weights and biases.
convs_per_block: An Integer specifying the number of convolutional layers.
Returns:
A tensor of shape (b, h, w, c).
"""
# Pre-activate the inputs.
skip = input_features
residual = activation_fn(input_features)
# Set the number of intermediate channels that we compress to.
if n_down_channels is None:
n_down_channels = n_channels
for c in range(convs_per_block):
residual = snt.Conv2D(n_down_channels,
(3, 3),
padding='SAME',
initializers=initializers,
regularizers=regularizers)(residual)
if c < convs_per_block - 1:
residual = activation_fn(residual)
incoming_channels = input_features.shape[-1]
if incoming_channels != n_channels:
skip = snt.Conv2D(n_channels,
(1, 1),
padding='SAME',
initializers=initializers,
regularizers=regularizers)(skip)
if n_down_channels != n_channels:
residual = snt.Conv2D(n_channels,
(1, 1),
padding='SAME',
initializers=initializers,
regularizers=regularizers)(residual)
return skip + residual
def resize_up(input_features, scale=2):
"""Nearest neighbor rescaling-operation for the input features.
Args:
input_features: A tensor of shape (b, h, w, c).
scale: An integer specifying the scaling factor.
Returns: A tensor of shape (b, scale * h, scale * w, c).
"""
assert scale >= 1
_, size_x, size_y, _ = input_features.shape.as_list()
new_size_x = int(round(size_x * scale))
new_size_y = int(round(size_y * scale))
return tf.image.resize(
input_features,
[new_size_x, new_size_y],
align_corners=True,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
def resize_down(input_features, scale=2):
"""Average pooling rescaling-operation for the input features.
Args:
input_features: A tensor of shape (b, h, w, c).
scale: An integer specifying the scaling factor.
Returns: A tensor of shape (b, h / scale, w / scale, c).
"""
assert scale >= 1
return tf.nn.avg_pool2d(
input_features,
ksize=(1, scale, scale, 1),
strides=(1, scale, scale, 1),
padding='VALID')
| deepmind-research-master | hierarchical_probabilistic_unet/unet_utils.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing code for computing various metrics for training and evaluation."""
from typing import Callable, Dict, Optional
import distrax
import haiku as hk
import jax
import jax.nn as nn
import jax.numpy as jnp
import numpy as np
import physics_inspired_models.utils as utils
_ReconstructFunc = Callable[[utils.Params, jnp.ndarray, jnp.ndarray, bool],
distrax.Distribution]
def calculate_small_latents(dist, threshold=0.5):
"""Calculates the number of active latents by thresholding the variance of their distribution."""
if not isinstance(dist, distrax.Normal):
raise NotImplementedError()
latent_means = dist.mean()
latent_stddevs = dist.variance()
small_latents = jnp.sum(
(latent_stddevs < threshold) & (jnp.abs(latent_means) > 0.1), axis=1)
return jnp.mean(small_latents)
def compute_scale(
targets: jnp.ndarray,
rescale_by: str
) -> jnp.ndarray:
"""Compute a scaling factor based on targets shape and the rescale_by argument."""
if rescale_by == "pixels_and_time":
return jnp.asarray(np.prod(targets.shape[-4:]))
elif rescale_by is not None:
raise ValueError(f"Unrecognized rescale_by={rescale_by}.")
else:
return jnp.ones([])
def compute_data_domain_stats(
p_x: distrax.Distribution,
targets: jnp.ndarray
) -> Dict[str, jnp.ndarray]:
"""Compute several statistics in the data domain, such as L2 and negative log likelihood."""
axis = tuple(range(2, targets.ndim))
l2_over_time = jnp.sum((p_x.mean() - targets) ** 2, axis=axis)
l2 = jnp.sum(l2_over_time, axis=1)
# Calculate relative L2 normalised by image "length"
norm_factor = jnp.sum(targets**2, axis=(2, 3, 4))
l2_over_time_norm = l2_over_time / norm_factor
l2_norm = jnp.sum(l2_over_time_norm, axis=1)
# Compute negative log-likelihood under p(x)
neg_log_p_x_over_time = - np.sum(p_x.log_prob(targets), axis=axis)
neg_log_p_x = jnp.sum(neg_log_p_x_over_time, axis=1)
return dict(
neg_log_p_x_over_time=neg_log_p_x_over_time,
neg_log_p_x=neg_log_p_x,
l2_over_time=l2_over_time,
l2=l2,
l2_over_time_norm=l2_over_time_norm,
l2_norm=l2_norm,
)
def compute_vae_stats(
neg_log_p_x: jnp.ndarray,
rng: jnp.ndarray,
q_z: distrax.Distribution,
prior: distrax.Distribution
) -> Dict[str, jnp.ndarray]:
"""Compute the KL(q(z|x)||p(z)) and the negative ELBO, which are used for VAE models."""
# Compute the KL
kl = distrax.estimate_kl_best_effort(q_z, prior, rng_key=rng, num_samples=1)
kl = np.sum(kl, axis=list(range(1, kl.ndim)))
# Sanity check
assert kl.shape == neg_log_p_x.shape
return dict(
kl=kl,
neg_elbo=neg_log_p_x + kl,
)
def training_statistics(
p_x: distrax.Distribution,
targets: jnp.ndarray,
rescale_by: Optional[str],
rng: Optional[jnp.ndarray] = None,
q_z: Optional[distrax.Distribution] = None,
prior: Optional[distrax.Distribution] = None,
p_x_learned_sigma: bool = False
) -> Dict[str, jnp.ndarray]:
"""Computes various statistics we track during training."""
stats = compute_data_domain_stats(p_x, targets)
if rng is not None and q_z is not None and prior is not None:
stats.update(compute_vae_stats(stats["neg_log_p_x"], rng, q_z, prior))
else:
assert rng is None and q_z is None and prior is None
# Rescale these stats accordingly
scale = compute_scale(targets, rescale_by)
# Note that "_over_time" stats are getting normalised by time here
stats = jax.tree_map(lambda x: x / scale, stats)
if p_x_learned_sigma:
stats["p_x_sigma"] = p_x.variance().reshape([-1])[0]
if q_z is not None:
stats["small_latents"] = calculate_small_latents(q_z)
return stats
def evaluation_only_statistics(
reconstruct_func: _ReconstructFunc,
params: hk.Params,
inputs: jnp.ndarray,
rng: jnp.ndarray,
rescale_by: str,
can_run_backwards: bool,
train_sequence_length: int,
reconstruction_skip: int,
p_x_learned_sigma: bool = False,
) -> Dict[str, jnp.ndarray]:
"""Computes various statistics we track only during evaluation."""
full_trajectory = utils.extract_image(inputs)
prefixes = ("forward", "backward") if can_run_backwards else ("forward",)
full_forward_targets = jax.tree_map(
lambda x: x[:, reconstruction_skip:], full_trajectory)
full_backward_targets = jax.tree_map(
lambda x: x[:, :x.shape[1]-reconstruction_skip], full_trajectory)
train_targets_length = train_sequence_length - reconstruction_skip
full_targets_length = full_forward_targets.shape[1]
stats = dict()
keys = ()
for prefix in prefixes:
# Fully unroll the model and reconstruct the whole sequence
full_prediction = reconstruct_func(params, full_trajectory, rng,
prefix == "forward")
assert isinstance(full_prediction, distrax.Normal)
full_targets = (full_forward_targets if prefix == "forward" else
full_backward_targets)
# In cases where the model can run backwards it is possible to reconstruct
# parts which were indented to be skipped, so here we take care of that.
if full_prediction.mean().shape[1] > full_targets_length:
if prefix == "forward":
full_prediction = jax.tree_map(lambda x: x[:, -full_targets_length:],
full_prediction)
else:
full_prediction = jax.tree_map(lambda x: x[:, :full_targets_length],
full_prediction)
# Based on the prefix and suffix fetch correct predictions and targets
for suffix in ("train", "extrapolation", "full"):
if prefix == "forward" and suffix == "train":
predict, targets = jax.tree_map(lambda x: x[:, :train_targets_length],
(full_prediction, full_targets))
elif prefix == "forward" and suffix == "extrapolation":
predict, targets = jax.tree_map(lambda x: x[:, train_targets_length:],
(full_prediction, full_targets))
elif prefix == "backward" and suffix == "train":
predict, targets = jax.tree_map(lambda x: x[:, -train_targets_length:],
(full_prediction, full_targets))
elif prefix == "backward" and suffix == "extrapolation":
predict, targets = jax.tree_map(lambda x: x[:, :-train_targets_length],
(full_prediction, full_targets))
else:
predict, targets = full_prediction, full_targets
# Compute train statistics
train_stats = training_statistics(predict, targets, rescale_by,
p_x_learned_sigma=p_x_learned_sigma)
for key, value in train_stats.items():
stats[prefix + "_" + suffix + "_" + key] = value
# Copy all stats keys
keys = tuple(train_stats.keys())
# Make a combined metric summing forward and backward
if can_run_backwards:
# Also compute
for suffix in ("train", "extrapolation", "full"):
for key in keys:
forward = stats["forward_" + suffix + "_" + key]
backward = stats["backward_" + suffix + "_" + key]
combined = (forward + backward) / 2
stats["combined_" + suffix + "_" + key] = combined
return stats
def geco_objective(
l2_loss,
kl,
alpha,
kappa,
constraint_ema,
lambda_var,
is_training
) -> Dict[str, jnp.ndarray]:
"""Computes the objective for GECO and some of it statistics used ofr updates."""
# C_t
constraint_t = l2_loss - kappa
if is_training:
# We update C_ma only during training
constraint_ema = alpha * constraint_ema + (1 - alpha) * constraint_t
lagrange = nn.softplus(lambda_var)
lagrange = jnp.broadcast_to(lagrange, constraint_ema.shape)
# Add this special op for getting all gradients correct
loss = utils.geco_lagrange_product(lagrange, constraint_ema, constraint_t)
return dict(
loss=loss + kl,
geco_multiplier=lagrange,
geco_constraint=constraint_t,
geco_constraint_ema=constraint_ema
)
def elbo_objective(neg_log_p_x, kl, final_beta, beta_delay, step):
"""Computes objective for optimizing the Evidence Lower Bound (ELBO)."""
if beta_delay == 0:
beta = final_beta
else:
delayed_beta = jnp.minimum(float(step) / float(beta_delay), 1.0)
beta = delayed_beta * final_beta
return dict(
loss=neg_log_p_x + beta * kl,
elbo_beta=beta
)
| deepmind-research-master | physics_inspired_models/metrics.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing model evaluation metric."""
import _thread as thread
import sys
import threading
import time
import warnings
from absl import logging
import distrax
import numpy as np
from sklearn import linear_model
from sklearn import model_selection
from sklearn import preprocessing
def quit_function(fn_name):
logging.error('%s took too long', fn_name)
sys.stderr.flush()
thread.interrupt_main()
def exit_after(s):
"""Use as decorator to exit function after s seconds."""
def outer(fn):
def inner(*args, **kwargs):
timer = threading.Timer(s, quit_function, args=[fn.__name__])
timer.start()
try:
result = fn(*args, **kwargs)
finally:
timer.cancel()
return result
return inner
return outer
@exit_after(400)
def do_grid_search(data_x_exp, data_y, clf, parameters, cv):
scoring_choice = 'explained_variance'
regressor = model_selection.GridSearchCV(
clf, parameters, cv=cv, refit=True, scoring=scoring_choice)
regressor.fit(data_x_exp, data_y)
return regressor
def symplectic_matrix(dim):
"""Return anti-symmetric identity matrix of given dimensionality."""
half_dims = int(dim/2)
eye = np.eye(half_dims)
zeros = np.zeros([half_dims, half_dims])
top_rows = np.concatenate([zeros, - eye], axis=1)
bottom_rows = np.concatenate([eye, zeros], axis=1)
return np.concatenate([top_rows, bottom_rows], axis=0)
def create_latent_mask(z0, dist_std_threshold=0.5):
"""Create mask based on informativeness of each latent dimension.
For stochastic models those latent dimensions that are too close to the prior
are likely to be uninformative and can be ignored.
Args:
z0: distribution or array of phase space
dist_std_threshold: informative latents have average inferred stds <
dist_std_threshold
Returns:
latent_mask_final: boolean mask of the same dimensionality as z0
"""
if isinstance(z0, distrax.Normal):
std_vals = np.mean(z0.variance(), axis=0)
elif isinstance(z0, distrax.Distribution):
raise NotImplementedError()
else:
# If the latent is deterministic, pass through all dimensions
return np.array([True]*z0.shape[-1])
tensor_shape = std_vals.shape
half_dims = int(tensor_shape[-1] / 2)
std_vals_q = std_vals[:half_dims]
std_vals_p = std_vals[half_dims:]
# Keep both q and corresponding p as either one is informative
informative_latents_inds = np.array([
x for x in range(len(std_vals_q)) if
std_vals_q[x] < dist_std_threshold or std_vals_p[x] < dist_std_threshold
])
if informative_latents_inds.shape[0] > 0:
latent_mask_final = np.zeros_like(std_vals_q)
latent_mask_final[informative_latents_inds] = 1
latent_mask_final = np.concatenate([latent_mask_final, latent_mask_final])
latent_mask_final = latent_mask_final == 1
return latent_mask_final
else:
return np.array([True]*tensor_shape[-1])
def standardize_data(data):
"""Applies the sklearn standardization to the data."""
scaler = preprocessing.StandardScaler()
scaler.fit(data)
return scaler.transform(data)
def find_best_polynomial(data_x, data_y, max_poly_order, rsq_threshold,
max_dim_n=32,
alpha_sweep=None,
max_iter=1000, cv=2):
"""Find minimal polynomial expansion that is sufficient to explain data using Lasso regression."""
rsq = 0
poly_order = 1
if not np.any(alpha_sweep):
alpha_sweep = [1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2]
# Avoid a large polynomial expansion for large latent sizes
if data_x.shape[-1] > max_dim_n:
print(f'>WARNING! Data is too high dimensional at {data_x.shape[-1]}')
print('>WARNING! Setting max_poly_order = 1')
max_poly_order = 1
while rsq < rsq_threshold and poly_order <= max_poly_order:
time_start = time.perf_counter()
poly = preprocessing.PolynomialFeatures(poly_order, include_bias=False)
data_x_exp = poly.fit_transform(data_x)
time_end = time.perf_counter()
print(
f'Took {time_end-time_start}s to create polynomial features of order '
f'{poly_order} and size {data_x_exp.shape[1]}.')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
time_start = time.perf_counter()
clf = linear_model.Lasso(
random_state=0, max_iter=max_iter, normalize=False, warm_start=False)
parameters = {'alpha': alpha_sweep}
try:
regressor = do_grid_search(data_x_exp, data_y, clf, parameters, cv)
time_end = time.perf_counter()
print(f'Took {time_end-time_start}s to do regression grid search.')
# Get rsq results
time_start = time.perf_counter()
clf = linear_model.Lasso(
random_state=0,
alpha=regressor.best_params_['alpha'],
max_iter=max_iter,
normalize=False,
warm_start=False)
clf.fit(data_x_exp, data_y)
rsq = clf.score(data_x_exp, data_y)
time_end = time.perf_counter()
print(f'Took {time_end-time_start}s to get rsq results.')
old_regressor = regressor
old_poly_order = poly_order
old_poly = poly
old_data_x_exp = data_x_exp
old_rsq = rsq
old_clf = clf
print(f'Polynomial of order {poly_order} with '
f' alpha={regressor.best_params_} RSQ: {rsq}')
poly_order += 1
except KeyboardInterrupt:
time_end = time.perf_counter()
print(f'Timed out after {time_end-time_start}s of doing grid search.')
# pytype: disable=name-error # py39-upgrade
print(f'Continuing with previous poly_order={old_poly_order}...')
regressor = old_regressor
poly_order = old_poly_order
poly = old_poly
data_x_exp = old_data_x_exp
rsq = old_rsq
clf = old_clf
# pytype: enable=name-error # py39-upgrade
print(f'Polynomial of order {poly_order} with '
f' alpha={regressor.best_params_} RSQ: {rsq}')
break
return clf, poly, data_x_exp, rsq
def eval_monomial_grad(feature, x, w, grad_acc):
"""Accumulates gradient from polynomial features and their weights."""
features = feature.split(' ')
variable_indices = []
grads = np.ones(len(features)) * w
for i, feature in enumerate(features):
name_and_power = feature.split('^')
if len(name_and_power) == 1:
name, power = name_and_power[0], 1
else:
name, power = name_and_power
power = int(power)
var_index = int(name[1:])
variable_indices.append(var_index)
new_prod = np.ones_like(grads) * (x[var_index] ** power)
# This needs a special case, for situation where x[index] = 0.0
if power == 1:
new_prod[i] = 1.0
else:
new_prod[i] = power * (x[var_index] ** (power - 1))
grads = grads * new_prod
grad_acc[variable_indices] += grads
return grad_acc
def compute_jacobian_manual(x, polynomial_features, weight_matrix, tolerance):
"""Computes the jacobian manually."""
# Put together the equation for each output var
# polynomial_features = np.array(polynomial_obj.get_feature_names())
weight_mask = np.abs(weight_matrix) > tolerance
weight_matrix = weight_mask * weight_matrix
jacobians = list()
for i in range(weight_matrix.shape[0]):
grad_accumulator = np.zeros_like(x)
for j, feature in enumerate(polynomial_features):
eval_monomial_grad(feature, x, weight_matrix[i, j], grad_accumulator)
jacobians.append(grad_accumulator)
return np.stack(jacobians)
def calculate_jacobian_prod(jacobian, noise_eps=1e-6):
"""Calculates AA*, where A=JEJ^T and A*=JE^TJ^T, which should be I."""
# Add noise as 0 in jacobian creates issues in calculations later
jacobian = jacobian + noise_eps
sym_matrix = symplectic_matrix(jacobian.shape[1])
pred = np.matmul(jacobian, sym_matrix)
pred = np.matmul(pred, np.transpose(jacobian))
pred_t = np.matmul(jacobian, np.transpose(sym_matrix))
pred_t = np.matmul(pred_t, np.transpose(jacobian))
pred_id = np.matmul(pred, pred_t)
return pred_id
def normalise_jacobian_prods(jacobian_preds):
"""Normalises Jacobians evaluated at various points by a constant."""
stacked_preds = np.stack(jacobian_preds)
# For each attempt at estimating E, get the max term, and take their average
normalisation_factor = np.mean(np.max(np.abs(stacked_preds), axis=(1, 2)))
if normalisation_factor != 0:
stacked_preds = stacked_preds/normalisation_factor
return stacked_preds
def calculate_symetric_score(
gt_data,
model_data,
max_poly_order,
max_sym_score,
rsq_threshold,
sym_threshold,
evaluation_point_n,
trajectory_n=1,
weight_tolerance=1e-5,
alpha_sweep=None,
max_iter=1000,
cv=2):
"""Finds minimal polynomial expansion to explain data using Lasso regression, gets the Jacobian of the mapping and calculates how symplectic the map is."""
model_data = model_data[..., :gt_data.shape[0], :]
# Fing polynomial expansion that explains enough variance in the gt data
print('Finding best polynomial expansion...')
time_start = time.perf_counter()
# Clean up model data to ensure it doesn't contain NaN, infinity
# or values too large for dtype('float32')
model_data = np.nan_to_num(model_data)
model_data = np.clip(model_data, -999999, 999999)
clf, poly, model_data_exp, best_rsq = find_best_polynomial(
model_data, gt_data, max_poly_order, rsq_threshold,
32, alpha_sweep, max_iter, cv)
time_end = time.perf_counter()
print(f'Took {time_end - time_start}s to find best polynomial.')
# Calculate Symplecticity score
all_raw_scores = []
features = np.array(poly.get_feature_names())
points_per_trajectory = int(len(gt_data) / trajectory_n)
for trajectory in range(trajectory_n):
random_data_inds = np.random.permutation(
range(points_per_trajectory))[:evaluation_point_n]
jacobian_preds = []
for point_ind in random_data_inds:
input_data_point = model_data[points_per_trajectory * trajectory +
point_ind]
time_start = time.perf_counter()
jacobian = compute_jacobian_manual(input_data_point, features,
clf.coef_, weight_tolerance)
pred = calculate_jacobian_prod(jacobian)
jacobian_preds.append(pred)
time_end = time.perf_counter()
print(f'Took {time_end - time_start}s to evaluate jacobian '
f'around point {point_ind}.')
# Normalise
normalised_jacobian_preds = normalise_jacobian_prods(jacobian_preds)
# The score is measured as the deviation from I
identity = np.eye(normalised_jacobian_preds.shape[-1])
scores = np.mean(np.power(normalised_jacobian_preds - identity, 2),
axis=(1, 2))
all_raw_scores.append(scores)
sym_score = np.min([np.mean(all_raw_scores), max_sym_score])
# Calculate final SyMetric score
if best_rsq > rsq_threshold and sym_score < sym_threshold:
sy_metric = 1.0
else:
sy_metric = 0.0
results = {
'poly_exp_order': poly.get_params()['degree'],
'rsq': best_rsq,
'sym': sym_score,
'SyMetric': sy_metric,
}
with np.printoptions(precision=4, suppress=True):
print(f'----------------FINAL RESULTS FOR {trajectory_n} '
'TRAJECTORIES------------------')
print(f'BEST POLYNOMIAL EXPANSION ORDER: {results["poly_exp_order"]}')
print(f'BEST RSQ (1-best): {results["rsq"]}')
print(f'SYMPLECTICITY SCORE AROUND ALL POINTS AND ALL '
f'TRAJECTORIES (0-best): {sym_score}')
print(f'SyMETRIC SCORE: {sy_metric}')
print(f'----------------FINAL RESULTS FOR {trajectory_n} '
f'TRAJECTORIES------------------')
return results, clf, poly, model_data_exp
| deepmind-research-master | physics_inspired_models/eval_metric.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the implementations of the various numerical integrators.
Higher order methods mostly taken from [1].
References:
[1] Leimkuhler, Benedict and Sebastian Reich. Simulating hamiltonian dynamics.
Vol. 14. Cambridge university press, 2004.
[2] Forest, Etienne and Ronald D. Ruth. Fourth-order symplectic integration.
Physica D: Nonlinear Phenomena 43.1 (1990): 105-117.
[3] Blanes, Sergio and Per Christian Moan. Practical symplectic partitioned
Runge–Kutta and Runge–Kutta–Nyström methods. Journal of Computational and
Applied Mathematics 142.2 (2002): 313-330.
[4] McLachlan, Robert I. On the numerical integration of ordinary differential
equations by symmetric composition methods. SIAM Journal on Scientific
Computing 16.1 (1995): 151-168.
[5] Yoshida, Haruo. Construction of higher order symplectic integrators.
Physics letters A 150.5-7 (1990): 262-268.
[6] Süli, Endre; Mayers, David (2003), An Introduction to Numerical Analysis,
Cambridge University Press, ISBN 0-521-00794-1.
[7] Hairer, Ernst; Nørsett, Syvert Paul; Wanner, Gerhard (1993), Solving
ordinary differential equations I: Nonstiff problems, Berlin, New York:
Springer-Verlag, ISBN 978-3-540-56670-0.
"""
from typing import Callable, Dict, Optional, Sequence, Tuple, TypeVar, Union
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import phase_space
import jax
from jax import lax
from jax.experimental import ode
import jax.numpy as jnp
import numpy as np
M = TypeVar("M")
TM = TypeVar("TM")
TimeInterval = Union[jnp.ndarray, Tuple[float, float]]
# _____ _
# / ____| | |
# | | __ ___ _ __ ___ _ __ __ _| |
# | | |_ |/ _ \ '_ \ / _ \ '__/ _` | |
# | |__| | __/ | | | __/ | | (_| | |
# \_____|\___|_| |_|\___|_| \__,_|_|
# _____ _ _ _
# |_ _| | | | | (_)
# | | _ __ | |_ ___ __ _ _ __ __ _| |_ _ ___ _ __
# | | | '_ \| __/ _ \/ _` | '__/ _` | __| |/ _ \| '_ \
# _| |_| | | | || __/ (_| | | | (_| | |_| | (_) | | | |
# |_____|_| |_|\__\___|\__, |_| \__,_|\__|_|\___/|_| |_|
# __/ |
# |___/
GeneralTangentFunction = Callable[
[
Optional[Union[float, jnp.ndarray]], # t
M # y
],
TM # dy_dt
]
GeneralIntegrator = Callable[
[
GeneralTangentFunction,
Optional[Union[float, jnp.ndarray]], # t
M, # y
jnp.ndarray, # dt
],
M # y_next
]
def solve_ivp_dt(
fun: GeneralTangentFunction,
y0: M,
t0: Union[float, jnp.ndarray],
dt: Union[float, jnp.ndarray],
method: Union[str, GeneralIntegrator],
num_steps: Optional[int] = None,
steps_per_dt: int = 1,
use_scan: bool = True,
ode_int_kwargs: Optional[Dict[str, Union[float, int]]] = None
) -> Tuple[jnp.ndarray, M]:
"""Solve an initial value problem for a system of ODEs using explicit method.
This function numerically integrates a system of ordinary differential
equations given an initial value::
dy / dt = f(t, y)
y(t0) = y0
Here t is a one-dimensional independent variable (time), y(t) is an
n-dimensional vector-valued function (state), and an n-dimensional
vector-valued function f(t, y) determines the differential equations.
The goal is to find y(t) approximately satisfying the differential
equations, given an initial value y(t0)=y0.
All of the solvers supported here are explicit and non-adaptive. This makes
them easy to run with a fixed amount of computation and ensures solutions are
easily differentiable.
Args:
fun: callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here `t` is a scalar representing the time instance. `y` can be any
type `M`, including a flat array, that is registered as a
pytree. In addition, there is a type denoted as `TM` that represents
the tangent space to `M`. It is assumed that any element of `TM` can be
multiplied by arrays and scalars, can be added to other `TM` instances
as well as they can be right added to an element of `M`, that is
add(M, TM) exists. The function should return an element of `TM` that
defines the time derivative of `y`.
y0: an instance of `M`
Initial state at `t_span[0]`.
t0: float or array.
The initial time point of integration.
dt: array
Array containing all consecutive increments in time, at which the integral
to be evaluated. The size of this array along axis 0 defines the number of
steps that the integrator would do.
method: string or `GeneralIntegrator`
The integrator method to use. Possible values for string are:
* general_euler - see `GeneralEuler`
* rk2 - see `RungaKutta2`
* rk4 - see `RungaKutta4`
* rk38 - see `RungaKutta38`
num_steps: Optional int.
If provided the `dt` will be treated as the same per step time interval,
applied for this many steps. In other words setting this argument is
equivalent to replicating `dt` num_steps times and stacking over axis=0.
steps_per_dt: int
This determines the overall step size. Between any two values of t_eval
the step size is `dt = (t_eval[i+1] - t_eval[i]) / steps_per_dt.
use_scan: bool
Whether for the loop to use `lax.scan` or a python loop
ode_int_kwargs: dict
Extra arguments to be passed to `ode.odeint` when method="adaptive"
Returns:
t: array
Time points at which the solution is evaluated.
y : an instance of M
Values of the solution at `t`.
"""
if method == "adaptive":
ndim = y0.q.ndim if isinstance(y0, phase_space.PhaseSpace) else y0.ndim
signs = jnp.asarray(jnp.sign(dt))
signs = signs.reshape([-1] + [1] * (ndim - 1))
if isinstance(dt, float) or dt.ndim == 0:
true_t_eval = t0 + dt * np.arange(1, num_steps + 1)
else:
true_t_eval = t0 + dt[None] * np.arange(1, num_steps + 1)[:, None]
if isinstance(dt, float):
dt = np.asarray(dt)
if isinstance(dt, np.ndarray) and dt.ndim > 0:
if np.all(np.abs(dt) != np.abs(dt[0])):
raise ValueError("Not all values of `dt` where the same.")
elif isinstance(dt, jnp.ndarray) and dt.ndim > 0:
raise ValueError("The code here works only when `dy_dt` is time "
"independent and `np.abs(dt)` is the same. For this we "
"allow calling this only with numpy (not jax.numpy) "
"arrays.")
dt: jnp.ndarray = jnp.abs(jnp.asarray(dt))
dt = dt.reshape([-1])[0]
t_eval = t0 + dt * np.arange(num_steps + 1)
outputs = ode.odeint(
func=lambda y_, t_: fun(None, y_) * signs,
y0=y0,
t=jnp.abs(t_eval - t0),
**(ode_int_kwargs or dict())
)
# Note that we do not return the initial point
return true_t_eval, jax.tree_map(lambda x: x[1:], outputs)
method = get_integrator(method)
if num_steps is not None:
dt = jnp.repeat(jnp.asarray(dt)[None], repeats=num_steps, axis=0)
t_eval = t0 + jnp.cumsum(dt, axis=0)
t0 = jnp.ones_like(t_eval[..., :1]) * t0
t = jnp.concatenate([t0, t_eval[..., :-1]], axis=-1)
def loop_body(y_: M, t_dt: Tuple[jnp.ndarray, jnp.ndarray]) -> Tuple[M, M]:
t_, dt_ = t_dt
dt_: jnp.ndarray = dt_ / steps_per_dt
for _ in range(steps_per_dt):
y_ = method(fun, t_, y_, dt_)
t_ = t_ + dt_
return y_, y_
if use_scan:
return t_eval, lax.scan(loop_body, init=y0, xs=(t, dt))[1]
else:
y = [y0]
for t_and_dt_i in zip(t, dt):
y.append(loop_body(y[-1], t_and_dt_i)[0])
# Note that we do not return the initial point
return t_eval, jax.tree_map(lambda *args: jnp.stack(args, axis=0),
*y[1:])
def solve_ivp_dt_two_directions(
fun: GeneralTangentFunction,
y0: M,
t0: Union[float, jnp.ndarray],
dt: Union[float, jnp.ndarray],
method: Union[str, GeneralIntegrator],
num_steps_forward: int,
num_steps_backward: int,
include_y0: bool = True,
steps_per_dt: int = 1,
use_scan: bool = True,
ode_int_kwargs: Optional[Dict[str, Union[float, int]]] = None
) -> M:
"""Equivalent to `solve_ivp_dt` but you can specify unrolling the problem for a fixed number of steps in both time directions."""
yt = []
if num_steps_backward > 0:
yt_bck = solve_ivp_dt(
fun=fun,
y0=y0,
t0=t0,
dt=- dt,
method=method,
num_steps=num_steps_backward,
steps_per_dt=steps_per_dt,
use_scan=use_scan,
ode_int_kwargs=ode_int_kwargs
)[1]
yt.append(jax.tree_map(lambda x: jnp.flip(x, axis=0), yt_bck))
if include_y0:
yt.append(jax.tree_map(lambda x: x[None], y0))
if num_steps_forward > 0:
yt_fwd = solve_ivp_dt(
fun=fun,
y0=y0,
t0=t0,
dt=dt,
method=method,
num_steps=num_steps_forward,
steps_per_dt=steps_per_dt,
use_scan=use_scan,
ode_int_kwargs=ode_int_kwargs
)[1]
yt.append(yt_fwd)
if len(yt) > 1:
return jax.tree_map(lambda *a: jnp.concatenate(a, axis=0), *yt)
else:
return yt[0]
def solve_ivp_t_eval(
fun: GeneralTangentFunction,
t_span: TimeInterval,
y0: M,
method: Union[str, GeneralIntegrator],
t_eval: Optional[jnp.ndarray] = None,
steps_per_dt: int = 1,
use_scan: bool = True,
ode_int_kwargs: Optional[Dict[str, Union[float, int]]] = None
) -> Tuple[jnp.ndarray, M]:
"""Solve an initial value problem for a system of ODEs using an explicit method.
This function numerically integrates a system of ordinary differential
equations given an initial value::
dy / dt = f(t, y)
y(t0) = y0
Here t is a one-dimensional independent variable (time), y(t) is an
n-dimensional vector-valued function (state), and an n-dimensional
vector-valued function f(t, y) determines the differential equations.
The goal is to find y(t) approximately satisfying the differential
equations, given an initial value y(t0)=y0.
All of the solvers supported here are explicit and non-adaptive. This in
terms makes them easy to run with fixed amount of computation and
the solutions to be easily differentiable.
Args:
fun: callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here `t` is a scalar representing the time instance. `y` can be any
type `M`, including a flat array, that is registered as a
pytree. In addition, there is a type denoted as `TM` that represents
the tangent space to `M`. It is assumed that any element of `TM` can be
multiplied by arrays and scalars, can be added to other `TM` instances
as well as they can be right added to an element of `M`, that is
add(M, TM) exists. The function should return an element of `TM` that
defines the time derivative of `y`.
t_span: 2-tuple of floats
Interval of integration (t0, tf). The solver starts with t=t0 and
integrates until it reaches t=tf.
y0: an instance of `M`
Initial state at `t_span[0]`.
method: string or `GeneralIntegrator`
The integrator method to use. Possible values for string are:
* general_euler - see `GeneralEuler`
* rk2 - see `RungaKutta2`
* rk4 - see `RungaKutta4`
* rk38 - see `RungaKutta38`
t_eval: array or None.
Times at which to store the computed solution. Must be sorted and lie
within `t_span`. If None then t_eval = [t_span[-1]]
steps_per_dt: int
This determines the overall step size. Between any two values of t_eval
the step size is `dt = (t_eval[i+1] - t_eval[i]) / steps_per_dt.
use_scan: bool
Whether for the loop to use `lax.scan` or a python loop
ode_int_kwargs: dict
Extra arguments to be passed to `ode.odeint` when method="adaptive"
Returns:
t: array
Time points at which the solution is evaluated.
y : an instance of M
Values of the solution at `t`.
"""
# Check for t_eval
if t_eval is None:
t_eval = np.asarray([t_span[-1]])
if isinstance(t_span[0], float) and isinstance(t_span[1], float):
t_span = np.asarray(t_span)
elif isinstance(t_span[0], float) and isinstance(t_span[1], jnp.ndarray):
t_span = (np.full_like(t_span[1], t_span[0]), t_span[1])
t_span = np.stack(t_span, axis=0)
elif isinstance(t_span[1], float) and isinstance(t_span[0], jnp.ndarray):
t_span = (t_span[0], jnp.full_like(t_span[0], t_span[1]))
t_span = np.stack(t_span, axis=0)
else:
t_span = np.stack(t_span, axis=0)
def check_span(span, ts):
# Verify t_span and t_eval
if span[0] < span[1]:
# Forward in time
if not np.all(np.logical_and(span[0] <= ts, ts <= span[1])):
raise ValueError("Values in `t_eval` are not within `t_span`.")
if not np.all(ts[:-1] < ts[1:]):
raise ValueError("Values in `t_eval` are not properly sorted.")
else:
# Backward in time
if not np.all(np.logical_and(span[0] >= ts, ts >= span[1])):
raise ValueError("Values in `t_eval` are not within `t_span`.")
if not np.all(ts[:-1] > ts[1:]):
raise ValueError("Values in `t_eval` are not properly sorted.")
if t_span.ndim == 1:
check_span(t_span, t_eval)
elif t_span.ndim == 2:
if t_eval.ndim != 2:
raise ValueError("t_eval should have rank 2.")
for i in range(t_span.shape[1]):
check_span(t_span[:, i], t_eval[:, i])
t = np.concatenate([t_span[:1], t_eval[:-1]], axis=0)
return solve_ivp_dt(
fun=fun,
y0=y0,
t0=t_span[0],
dt=t_eval - t,
method=method,
steps_per_dt=steps_per_dt,
use_scan=use_scan,
ode_int_kwargs=ode_int_kwargs
)
class RungaKutta(GeneralIntegrator):
"""A general Runga-Kutta integrator defined using a Butcher tableau."""
def __init__(
self,
a_tableau: Sequence[Sequence[float]],
b_tableau: Sequence[float],
c_tableau: Sequence[float],
order: int):
if len(b_tableau) != len(c_tableau) + 1:
raise ValueError("The length of b_tableau should be exactly one more than"
" the length of c_tableau.")
if len(b_tableau) != len(a_tableau) + 1:
raise ValueError("The length of b_tableau should be exactly one more than"
" the length of a_tableau.")
self.a_tableau = a_tableau
self.b_tableau = b_tableau
self.c_tableau = c_tableau
self.order = order
def __call__(
self,
tangent_func: GeneralTangentFunction,
t: jnp.ndarray,
y: M,
dt: jnp.ndarray
) -> M: # pytype: disable=invalid-annotation
k = [tangent_func(t, y)]
zero = jax.tree_map(jnp.zeros_like, k[0])
# We always broadcast opposite to numpy (e.g. leading dims (batch) count)
if dt.ndim > 0:
dt = dt.reshape(dt.shape + (1,) * (y.ndim - dt.ndim))
if t.ndim > 0:
t = t.reshape(t.shape + (1,) * (y.ndim - t.ndim))
for c_n, a_n_row in zip(self.c_tableau, self.a_tableau):
t_n = t + dt * c_n
products = [a_i * k_i for a_i, k_i in zip(a_n_row, k) if a_i != 0.0]
delta_n = sum(products, zero)
y_n = y + dt * delta_n
k.append(tangent_func(t_n, y_n))
products = [b_i * k_i for b_i, k_i in zip(self.b_tableau, k) if b_i != 0.0]
delta = sum(products, zero)
return y + dt * delta
class GeneralEuler(RungaKutta):
"""The standard Euler method (for general ODE problems)."""
def __init__(self):
super().__init__(
a_tableau=[],
b_tableau=[1.0],
c_tableau=[],
order=1
)
class RungaKutta2(RungaKutta):
"""The second order Runga-Kutta method corresponding to the mid-point rule."""
def __init__(self):
super().__init__(
a_tableau=[[1.0 / 2.0]],
b_tableau=[0.0, 1.0],
c_tableau=[1.0 / 2.0],
order=2
)
class RungaKutta4(RungaKutta):
"""The fourth order Runga-Kutta method from [6]."""
def __init__(self):
super().__init__(
a_tableau=[[1.0 / 2.0],
[0.0, 1.0 / 2.0],
[0.0, 0.0, 1.0]],
b_tableau=[1.0 / 6.0, 1.0 / 3.0, 1.0 / 3.0, 1.0 / 6.0],
c_tableau=[1.0 / 2.0, 1.0 / 2.0, 1.0],
order=4
)
class RungaKutta38(RungaKutta):
"""The fourth order 3/8 rule Runga-Kutta method from [7]."""
def __init__(self):
super().__init__(
a_tableau=[[1.0 / 3.0],
[-1.0 / 3.0, 1.0],
[1.0, -1.0, 1.0]],
b_tableau=[1.0 / 8.0, 3.0 / 8.0, 3.0 / 8.0, 1.0 / 8.0],
c_tableau=[1.0 / 3.0, 2.0 / 3.0, 1.0],
order=4
)
# _____ _ _ _
# / ____| | | | | (_)
# | (___ _ _ _ __ ___ _ __ | | ___ ___| |_ _ ___
# \___ \| | | | '_ ` _ \| '_ \| |/ _ \/ __| __| |/ __|
# ____) | |_| | | | | | | |_) | | __/ (__| |_| | (__
# |_____/ \__, |_| |_| |_| .__/|_|\___|\___|\__|_|\___|
# __/ | | |
# |___/ |_|
# _____ _ _ _
# |_ _| | | | | (_)
# | | _ __ | |_ ___ __ _ _ __ __ _| |_ _ ___ _ __
# | | | '_ \| __/ _ \/ _` | '__/ _` | __| |/ _ \| '_ \
# _| |_| | | | || __/ (_| | | | (_| | |_| | (_) | | | |
# |_____|_| |_|\__\___|\__, |_| \__,_|\__|_|\___/|_| |_|
# __/ |
# |___/
SymplecticIntegrator = Callable[
[
phase_space.SymplecticTangentFunction,
jnp.ndarray, # t
phase_space.PhaseSpace, # (q, p)
jnp.ndarray, # dt
],
phase_space.PhaseSpace # (q_next, p_next)
]
def solve_hamiltonian_ivp_dt(
hamiltonian: phase_space.HamiltonianFunction,
y0: phase_space.PhaseSpace,
t0: Union[float, jnp.ndarray],
dt: Union[float, jnp.ndarray],
method: Union[str, SymplecticIntegrator],
num_steps: Optional[int] = None,
steps_per_dt: int = 1,
use_scan: bool = True,
ode_int_kwargs: Optional[Dict[str, Union[float, int]]] = None
) -> Tuple[jnp.ndarray, phase_space.PhaseSpace]:
"""Solve an initial value problem for a Hamiltonian system.
This function numerically integrates a Hamiltonian system given an
initial value::
dq / dt = dH / dp
dp / dt = - dH / dq
q(t0), p(t0) = y0.q, y0.p
Here t is a one-dimensional independent variable (time), y(t) is an
n-dimensional vector-valued function (state), and an n-dimensional
vector-valued function H(t, q, p) determines the value of the Hamiltonian.
The goal is to find q(t) and p(t) approximately satisfying the differential
equations, given an initial values q(t0), p(t0) = y0.q, y0.p
All of the solvers supported here are explicit and non-adaptive. This in
terms makes them easy to run with fixed amount of computation and
the solutions to be easily differentiable.
Args:
hamiltonian: callable
The Hamiltonian function. The calling signature is ``h(t, s)``, where
`s` is an instance of `PhaseSpace`.
y0: an instance of `M`
Initial state at t=t0.
t0: float or array.
The initial time point of integration.
dt: array
Array containing all consecutive increments in time, at which the integral
to be evaluated. The size of this array along axis 0 defines the number of
steps that the integrator would do.
method: string or `GeneralIntegrator`
The integrator method to use. Possible values for string are:
* symp_euler - see `SymplecticEuler`
* symp_euler_q - a `SymplecticEuler` with position_first=True
* symp_euler_p - a `SymplecticEuler` with position_first=False
* leap_frog - see `LeapFrog`
* leap_frog_q - a `LeapFrog` with position_first=True
* leap_frog_p - a `LeapFrog` with position_first=False
* stormer_verlet - same as leap_frog
* stormer_verlet_q - same as leap_frog_q
* stormer_verlet_p - same as leap_frog_p
* ruth4 - see `Ruth4`,
* sym4 - see `Symmetric4`
* sym6 - see `Symmetric6`
* so4 - see `SymmetricSo4`
* so4_q - a `SymmetricSo4` with position_first=True
* so4_p - a `SymmetricSo4` with position_first=False
* so6 - see `SymmetricSo6`
* so6_q - a `SymmetricSo6` with position_first=True
* so6_p - a `SymmetricSo6` with position_first=False
* so8 - see `SymmetricSo8`
* so8_q - a `SymmetricSo8` with position_first=True
* so8_p - a `SymmetricSo8` with position_first=False
num_steps: Optional int.
If provided the `dt` will be treated as the same per step time interval,
applied for this many steps. In other words setting this argument is
equivalent to replicating `dt` num_steps times and stacking over axis=0.
steps_per_dt: int
This determines the overall step size. Between any two values of t_eval
the step size is `dt = (t_eval[i+1] - t_eval[i]) / steps_per_dt.
use_scan: bool
Whether for the loop to use `lax.scan` or a python loop
ode_int_kwargs: dict
Extra arguments to be passed to `ode.odeint` when method="adaptive"
Returns:
t: array
Time points at which the solution is evaluated.
y : an instance of M
Values of the solution at `t`.
"""
if not isinstance(y0, phase_space.PhaseSpace):
raise ValueError("The initial state must be an instance of `PhaseSpace`.")
dy_dt = phase_space.poisson_bracket_with_q_and_p(hamiltonian)
return solve_ivp_dt(
fun=dy_dt,
y0=y0,
t0=t0,
dt=dt,
method=method,
num_steps=num_steps,
steps_per_dt=steps_per_dt,
use_scan=use_scan,
ode_int_kwargs=ode_int_kwargs
)
def solve_hamiltonian_ivp_t_eval(
hamiltonian: phase_space.HamiltonianFunction,
t_span: TimeInterval,
y0: phase_space.PhaseSpace,
method: Union[str, SymplecticIntegrator],
t_eval: Optional[jnp.ndarray] = None,
steps_per_dt: int = 1,
use_scan: bool = True,
ode_int_kwargs: Optional[Dict[str, Union[float, int]]] = None
) -> Tuple[jnp.ndarray, phase_space.PhaseSpace]:
"""Solve an initial value problem for a Hamiltonian system.
This function numerically integrates a Hamiltonian system given an
initial value::
dq / dt = dH / dp
dp / dt = - dH / dq
q(t0), p(t0) = y0.q, y0.p
Here t is a one-dimensional independent variable (time), y(t) is an
n-dimensional vector-valued function (state), and an n-dimensional
vector-valued function H(t, q, p) determines the value of the Hamiltonian.
The goal is to find q(t) and p(t) approximately satisfying the differential
equations, given an initial values q(t0), p(t0) = y0.q, y0.p
All of the solvers supported here are explicit and non-adaptive. This in
terms makes them easy to run with fixed amount of computation and
the solutions to be easily differentiable.
Args:
hamiltonian: callable
The Hamiltonian function. The calling signature is ``h(t, s)``, where
`s` is an instance of `PhaseSpace`.
t_span: 2-tuple of floats
Interval of integration (t0, tf). The solver starts with t=t0 and
integrates until it reaches t=tf.
y0: an instance of `M`
Initial state at `t_span[0]`.
method: string or `GeneralIntegrator`
The integrator method to use. Possible values for string are:
* symp_euler - see `SymplecticEuler`
* symp_euler_q - a `SymplecticEuler` with position_first=True
* symp_euler_p - a `SymplecticEuler` with position_first=False
* leap_frog - see `LeapFrog`
* leap_frog_q - a `LeapFrog` with position_first=True
* leap_frog_p - a `LeapFrog` with position_first=False
* stormer_verlet - same as leap_frog
* stormer_verlet_q - same as leap_frog_q
* stormer_verlet_p - same as leap_frog_p
* ruth4 - see `Ruth4`,
* sym4 - see `Symmetric4`
* sym6 - see `Symmetric6`
* so4 - see `SymmetricSo4`
* so4_q - a `SymmetricSo4` with position_first=True
* so4_p - a `SymmetricSo4` with position_first=False
* so6 - see `SymmetricSo6`
* so6_q - a `SymmetricSo6` with position_first=True
* so6_p - a `SymmetricSo6` with position_first=False
* so8 - see `SymmetricSo8`
* so8_q - a `SymmetricSo8` with position_first=True
* so8_p - a `SymmetricSo8` with position_first=False
t_eval: array or None.
Times at which to store the computed solution. Must be sorted and lie
within `t_span`. If None then t_eval = [t_span[-1]]
steps_per_dt: int
This determines the overall step size. Between any two values of t_eval
the step size is `dt = (t_eval[i+1] - t_eval[i]) / steps_per_dt.
use_scan: bool
Whether for the loop to use `lax.scan` or a python loop
ode_int_kwargs: dict
Extra argumrnts to be passed to `ode.odeint` when method="adaptive"
Returns:
t: array
Time points at which the solution is evaluated.
y : an instance of M
Values of the solution at `t`.
"""
if not isinstance(y0, phase_space.PhaseSpace):
raise ValueError("The initial state must be an instance of `PhaseSpace`.")
dy_dt = phase_space.poisson_bracket_with_q_and_p(hamiltonian)
if method == "adaptive":
dy_dt = phase_space.transform_symplectic_tangent_function_using_array(dy_dt)
return solve_ivp_t_eval(
fun=dy_dt,
t_span=t_span,
y0=y0,
method=method,
t_eval=t_eval,
steps_per_dt=steps_per_dt,
use_scan=use_scan,
ode_int_kwargs=ode_int_kwargs
)
class CompositionSymplectic(SymplecticIntegrator):
"""A generalized symplectic integrator based on compositions.
Simulates Hamiltonian dynamics using a composition of symplectic steps:
q_{0} = q_init, p_{0} = p_init
for i in [1, n]:
p_{i+1} = p_{i} - c_{i} * dH/dq(q_{i}) * dt
q_{i+1} = q_{i} + d_{i} * dH/dp(p_{i+1}) * dt
q_next = q_{n}, p_next = p_{n}
This integrator always starts with updating the momentum.
The order argument is used mainly for testing to estimate the error when
integrating various systems.
"""
def __init__(
self,
momentum_coefficients: Sequence[float],
position_coefficients: Sequence[float],
order: int):
if len(position_coefficients) != len(momentum_coefficients):
raise ValueError("The number of momentum_coefficients and "
"position_coefficients must be the same.")
if not np.allclose(sum(position_coefficients), 1.0):
raise ValueError("The sum of the position_coefficients "
"must be equal to 1.")
if not np.allclose(sum(momentum_coefficients), 1.0):
raise ValueError("The sum of the momentum_coefficients "
"must be equal to 1.")
self.momentum_coefficients = momentum_coefficients
self.position_coefficients = position_coefficients
self.order = order
def __call__(
self,
tangent_func: phase_space.SymplecticTangentFunction,
t: jnp.ndarray,
y: phase_space.PhaseSpace,
dt: jnp.ndarray
) -> phase_space.PhaseSpace:
q, p = y.q, y.p
# This is intentional to prevent a bug where one uses y later
del y
# We always broadcast opposite to numpy (e.g. leading dims (batch) count)
if dt.ndim > 0:
dt = dt.reshape(dt.shape + (1,) * (q.ndim - dt.ndim))
if t.ndim > 0:
t = t.reshape(t.shape + (1,) * (q.ndim - t.ndim))
t_q = t
t_p = t
for c, d in zip(self.momentum_coefficients, self.position_coefficients):
# Update momentum
if c != 0.0:
dp_dt = tangent_func(t_p, phase_space.PhaseSpace(q, p)).p
p = p + c * dt * dp_dt
t_p = t_p + c * dt
# Update position
if d != 0.0:
dq_dt = tangent_func(t_q, phase_space.PhaseSpace(q, p)).q
q = q + d * dt * dq_dt
t_q = t_q + d * dt
return phase_space.PhaseSpace(position=q, momentum=p)
class SymplecticEuler(CompositionSymplectic):
"""The symplectic Euler method (for Hamiltonian systems).
If position_first = True:
q_{t+1} = q_{t} + dH/dp(p_{t}) * dt
p_{t+1} = p_{t} - dH/dq(q_{t+1}) * dt
else:
p_{t+1} = p_{t} - dH/dq(q_{t}) * dt
q_{t+1} = q_{t} + dH/dp(p_{t+1}) * dt
"""
def __init__(self, position_first=True):
if position_first:
super().__init__(
momentum_coefficients=[0.0, 1.0],
position_coefficients=[1.0, 0.0],
order=1
)
else:
super().__init__(
momentum_coefficients=[1.0],
position_coefficients=[1.0],
order=1
)
class SymmetricCompositionSymplectic(CompositionSymplectic):
"""A generalized composition integrator that is symmetric.
The integrators produced are always of the form:
[update_q, update_p, ..., update_p, update_q]
or
[update_p, update_q, ..., update_q, update_p]
based on the position_first argument. The method will expect which ever is
updated first to have one more coefficient.
"""
def __init__(
self,
momentum_coefficients: Sequence[float],
position_coefficients: Sequence[float],
position_first: bool,
order: int):
position_coefficients = list(position_coefficients)
momentum_coefficients = list(momentum_coefficients)
if position_first:
if len(position_coefficients) != len(momentum_coefficients) + 1:
raise ValueError("The number of position_coefficients must be one more "
"than momentum_coefficients when position_first=True.")
momentum_coefficients = [0.0] + momentum_coefficients
else:
if len(position_coefficients) + 1 != len(momentum_coefficients):
raise ValueError("The number of momentum_coefficients must be one more "
"than position_coefficients when position_first=True.")
position_coefficients = position_coefficients + [0.0]
super().__init__(
position_coefficients=position_coefficients,
momentum_coefficients=momentum_coefficients,
order=order
)
def symmetrize_coefficients(
coefficients: Sequence[float],
odd_number: bool
) -> Sequence[float]:
"""Symmetrizes the coefficients for an integrator."""
coefficients = list(coefficients)
if odd_number:
final = 1.0 - 2.0 * sum(coefficients)
return coefficients + [final] + coefficients[::-1]
else:
final = 0.5 - sum(coefficients)
return coefficients + [final, final] + coefficients[::-1]
class LeapFrog(SymmetricCompositionSymplectic):
"""The standard Leap-Frog method (also known as Stormer-Verlet).
If position_first = True:
q_half = q_{t} + dH/dp(p_{t}) * dt / 2
p_{t+1} = p_{t} - dH/dq(q_half) * dt
q_{t+1} = q_half + dH/dp(p_{t+1}) * dt / 2
else:
p_half = p_{t} - dH/dq(q_{t}) * dt / 2
q_{t+1} = q_{t} + dH/dp(p_half) * dt
p_{t+1} = p_half - dH/dq(q_{t+1}) * dt / 2
"""
def __init__(self, position_first=False):
if position_first:
super().__init__(
position_coefficients=[0.5, 0.5],
momentum_coefficients=[1.0],
position_first=True,
order=2
)
else:
super().__init__(
position_coefficients=[1.0],
momentum_coefficients=[0.5, 0.5],
position_first=False,
order=2
)
class Ruth4(SymmetricCompositionSymplectic):
"""The Fourth order method from [2]."""
def __init__(self):
cbrt_2 = float(np.cbrt(2.0))
c = [1.0 / (2.0 - cbrt_2)]
# 3: [c1, 1.0 - 2*c1, c1]
c = symmetrize_coefficients(c, odd_number=True)
d = [1.0 / (4.0 - 2.0 * cbrt_2)]
# 4: [d1, 0.5 - d1, 0.5 - d1, d1]
d = symmetrize_coefficients(d, odd_number=False)
super().__init__(
position_coefficients=d,
momentum_coefficients=c,
position_first=True,
order=4
)
class Symmetric4(SymmetricCompositionSymplectic):
"""The fourth order method from Table 6.1 in [1] (originally from [3])."""
def __init__(self):
c = [0.0792036964311957, 0.353172906049774, -0.0420650803577195]
# 7 : [c1, c2, c3, 1.0 - c1 - c2 - c3, c3, c2, c1]
c = symmetrize_coefficients(c, odd_number=True)
d = [0.209515106613362, -0.143851773179818]
# 6: [d1, d2, 0.5 - d1, 0.5 - d1, d2, d1]
d = symmetrize_coefficients(d, odd_number=False)
super().__init__(
position_coefficients=d,
momentum_coefficients=c,
position_first=False,
order=4
)
class Symmetric6(SymmetricCompositionSymplectic):
"""The sixth order method from Table 6.1 in [1] (originally from [3])."""
def __init__(self):
c = [0.0502627644003922, 0.413514300428344, 0.0450798897943977,
-0.188054853819569, 0.541960678450780]
# 11 : [c1, c2, c3, c4, c5, 1.0 - sum(ci), c5, c4, c3, c2, c1]
c = symmetrize_coefficients(c, odd_number=True)
d = [0.148816447901042, -0.132385865767784, 0.067307604692185,
0.432666402578175]
# 10: [d1, d2, d3, d4, 0.5 - sum(di), 0.5 - sum(di), d4, d3, d2, d1]
d = symmetrize_coefficients(d, odd_number=False)
super().__init__(
position_coefficients=d,
momentum_coefficients=c,
position_first=False,
order=4
)
def coefficients_based_on_composing_second_order(
weights: Sequence[float]
) -> Tuple[Sequence[float], Sequence[float]]:
"""Constructs the coefficients for methods based on second-order schemes."""
coefficients_0 = []
coefficients_1 = []
coefficients_0.append(weights[0] / 2.0)
for i in range(len(weights) - 1):
coefficients_1.append(weights[i])
coefficients_0.append((weights[i] + weights[i + 1]) / 2.0)
coefficients_1.append(weights[-1])
coefficients_0.append(weights[-1] / 2.0)
return coefficients_0, coefficients_1
class SymmetricSo4(SymmetricCompositionSymplectic):
"""The fourth order method from Table 6.2 in [1] (originally from [4])."""
def __init__(self, position_first: bool = False):
w = [0.28, 0.62546642846767004501]
# 5
w = symmetrize_coefficients(w, odd_number=True)
c0, c1 = coefficients_based_on_composing_second_order(w)
c_q, c_p = (c0, c1) if position_first else (c1, c0)
super().__init__(
position_coefficients=c_q,
momentum_coefficients=c_p,
position_first=position_first,
order=4
)
class SymmetricSo6(SymmetricCompositionSymplectic):
"""The sixth order method from Table 6.2 in [1] (originally from [5])."""
def __init__(self, position_first: bool = False):
w = [0.78451361047755726382, 0.23557321335935813368,
-1.17767998417887100695]
# 7
w = symmetrize_coefficients(w, odd_number=True)
c0, c1 = coefficients_based_on_composing_second_order(w)
c_q, c_p = (c0, c1) if position_first else (c1, c0)
super().__init__(
position_coefficients=c_q,
momentum_coefficients=c_p,
position_first=position_first,
order=6
)
class SymmetricSo8(SymmetricCompositionSymplectic):
"""The eighth order method from Table 6.2 in [1] (originally from [4])."""
def __init__(self, position_first: bool = False):
w = [0.74167036435061295345, -0.40910082580003159400,
0.19075471029623837995, -0.57386247111608226666,
0.29906418130365592384, 0.33462491824529818378,
0.31529309239676659663]
# 15
w = symmetrize_coefficients(w, odd_number=True)
c0, c1 = coefficients_based_on_composing_second_order(w)
c_q, c_p = (c0, c1) if position_first else (c1, c0)
super().__init__(
position_coefficients=c_q,
momentum_coefficients=c_p,
position_first=position_first,
order=8
)
general_integrators = dict(
general_euler=GeneralEuler(),
rk2=RungaKutta2(),
rk4=RungaKutta4(),
rk38=RungaKutta38()
)
symplectic_integrators = dict(
symp_euler=SymplecticEuler(position_first=True),
symp_euler_q=SymplecticEuler(position_first=True),
symp_euler_p=SymplecticEuler(position_first=False),
leap_frog=LeapFrog(position_first=False),
leap_frog_q=LeapFrog(position_first=True),
leap_frog_p=LeapFrog(position_first=False),
stormer_verlet=LeapFrog(position_first=False),
stormer_verlet_q=LeapFrog(position_first=True),
stormer_verlet_p=LeapFrog(position_first=False),
ruth4=Ruth4(),
sym4=Symmetric4(),
sym6=Symmetric6(),
so4=SymmetricSo4(position_first=False),
so4_q=SymmetricSo4(position_first=True),
so4_p=SymmetricSo4(position_first=False),
so6=SymmetricSo6(position_first=False),
so6_q=SymmetricSo6(position_first=True),
so6_p=SymmetricSo6(position_first=False),
so8=SymmetricSo8(position_first=False),
so8_q=SymmetricSo8(position_first=True),
so8_p=SymmetricSo8(position_first=False),
)
def get_integrator(
name_or_callable: Union[str, GeneralIntegrator]
) -> GeneralIntegrator:
"""Returns any integrator with the provided name or the argument."""
if isinstance(name_or_callable, str):
if name_or_callable in general_integrators:
return general_integrators[name_or_callable]
elif name_or_callable in symplectic_integrators:
return symplectic_integrators[name_or_callable]
else:
raise ValueError(f"Unrecognized integrator with name {name_or_callable}.")
if not callable(name_or_callable):
raise ValueError(f"Expected a callable, but got {type(name_or_callable)}.")
return name_or_callable
| deepmind-research-master | physics_inspired_models/integrators.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| deepmind-research-master | physics_inspired_models/__init__.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
from setuptools import setup
REQUIRED_PACKAGES = (
"dm_hamiltonian_dynamics_suite@git+https://github.com/deepmind/dm_hamiltonian_dynamics_suite", # pylint: disable=line-too-long.
"absl-py>=0.12.0",
"numpy>=1.16.4",
"scikit-learn>=1.0",
"typing>=3.7.4.3",
"jax==0.2.20",
"jaxline==0.0.3",
"distrax==0.0.2",
"optax==0.0.6",
"dm-haiku==0.0.3",
)
LONG_DESCRIPTION = "\n".join([
"A codebase containing the implementation of the following models:",
"Hamiltonian Generative Network (HGN)",
"Lagrangian Generative Network (LGN)",
"Neural ODE",
"Recurrent Generative Network (RGN)",
"and RNN, LSTM and GRU.",
"This is code accompanying the publication of:"
])
setup(
name="physics_inspired_models",
version="0.0.1",
description="Implementation of multiple physically inspired models.",
long_description=LONG_DESCRIPTION,
url="https://github.com/deepmind/deepmind-research/physics_inspired_models",
author="DeepMind",
package_dir={"physics_inspired_models": "."},
packages=["physics_inspired_models", "physics_inspired_models.models"],
install_requires=REQUIRED_PACKAGES,
platforms=["any"],
license="Apache License, Version 2.0",
)
| deepmind-research-master | physics_inspired_models/setup.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities functions for Jax."""
from collections import abc
import functools
from typing import Any, Callable, Dict, Mapping, Union
import distrax
import jax
from jax import core
from jax import lax
from jax import nn
import jax.numpy as jnp
from jax.tree_util import register_pytree_node
from jaxline import utils
import numpy as np
HaikuParams = Mapping[str, Mapping[str, jnp.ndarray]]
Params = Union[Mapping[str, jnp.ndarray], HaikuParams, jnp.ndarray]
_Activation = Callable[[jnp.ndarray], jnp.ndarray]
tf_leaky_relu = functools.partial(nn.leaky_relu, negative_slope=0.2)
def filter_only_scalar_stats(stats):
return {k: v for k, v in stats.items() if v.size == 1}
def to_numpy(obj):
return jax.tree_map(np.array, obj)
@jax.custom_gradient
def geco_lagrange_product(lagrange_multiplier, constraint_ema, constraint_t):
"""Modifies the gradients so that they work as described in GECO.
The evaluation gives:
lagrange * C_ema
The gradient w.r.t lagrange:
- g * C_t
The gradient w.r.t constraint_ema:
0.0
The gradient w.r.t constraint_t:
g * lagrange
Note that if you pass the same value for `constraint_ema` and `constraint_t`
this would only flip the gradient for the lagrange multiplier.
Args:
lagrange_multiplier: The lagrange multiplier
constraint_ema: The moving average of the constraint
constraint_t: The current constraint
Returns:
"""
def grad(gradient):
return (- gradient * constraint_t,
jnp.zeros_like(constraint_ema),
gradient * lagrange_multiplier)
return lagrange_multiplier * constraint_ema, grad
def bcast_if(x, t, n):
return [x] * n if isinstance(x, t) else x
def stack_time_into_channels(
images: jnp.ndarray,
data_format: str
) -> jnp.ndarray:
axis = data_format.index("C")
list_of_time = [jnp.squeeze(v, axis=1) for v in
jnp.split(images, images.shape[1], axis=1)]
return jnp.concatenate(list_of_time, axis)
def stack_device_dim_into_batch(obj):
return jax.tree_map(lambda x: x.reshape((-1,) + x.shape[2:]), obj)
def nearest_neighbour_upsampling(x, scale, data_format="NHWC"):
"""Performs nearest-neighbour upsampling."""
if data_format == "NCHW":
b, c, h, w = x.shape
x = jnp.reshape(x, [b, c, h, 1, w, 1])
ones = jnp.ones([1, 1, 1, scale, 1, scale], dtype=x.dtype)
return jnp.reshape(x * ones, [b, c, scale * h, scale * w])
elif data_format == "NHWC":
b, h, w, c = x.shape
x = jnp.reshape(x, [b, h, 1, w, 1, c])
ones = jnp.ones([1, 1, scale, 1, scale, 1], dtype=x.dtype)
return jnp.reshape(x * ones, [b, scale * h, scale * w, c])
else:
raise ValueError(f"Unrecognized data_format={data_format}.")
def get_activation(arg: Union[_Activation, str]) -> _Activation:
"""Returns an activation from provided string."""
if isinstance(arg, str):
# Try fetch in order - [this module, jax.nn, jax.numpy]
if arg in globals():
return globals()[arg]
if hasattr(nn, arg):
return getattr(nn, arg)
elif hasattr(jnp, arg):
return getattr(jnp, arg)
else:
raise ValueError(f"Unrecognized activation with name {arg}.")
if not callable(arg):
raise ValueError(f"Expected a callable, but got {type(arg)}")
return arg
def merge_first_dims(x: jnp.ndarray, num_dims_to_merge: int = 2) -> jnp.ndarray:
return x.reshape((-1,) + x.shape[num_dims_to_merge:])
def extract_image(
inputs: Union[jnp.ndarray, Mapping[str, jnp.ndarray]]
) -> jnp.ndarray:
"""Extracts a tensor with key `image` or `x_image` if it is a dict, otherwise returns the inputs."""
if isinstance(inputs, dict):
if "image" in inputs:
return inputs["image"]
else:
return inputs["x_image"]
elif isinstance(inputs, jnp.ndarray):
return inputs
raise NotImplementedError(f"Not implemented of inputs of type"
f" {type(inputs)}.")
def extract_gt_state(inputs: Any) -> jnp.ndarray:
if isinstance(inputs, dict):
return inputs["x"]
elif not isinstance(inputs, jnp.ndarray):
raise NotImplementedError(f"Not implemented of inputs of type"
f" {type(inputs)}.")
return inputs
def reshape_latents_conv_to_flat(conv_latents, axis_n_to_keep=1):
q, p = jnp.split(conv_latents, 2, axis=-1)
q = jax.tree_map(lambda x: x.reshape(x.shape[:axis_n_to_keep] + (-1,)), q)
p = jax.tree_map(lambda x: x.reshape(x.shape[:axis_n_to_keep] + (-1,)), p)
flat_latents = jnp.concatenate([q, p], axis=-1)
return flat_latents
def triu_matrix_from_v(x, ndim):
assert x.shape[-1] == (ndim * (ndim + 1)) // 2
matrix = jnp.zeros(x.shape[:-1] + (ndim, ndim))
idx = jnp.triu_indices(ndim)
index_update = lambda x, idx, y: x.at[idx].set(y)
for _ in range(x.ndim - 1):
index_update = jax.vmap(index_update, in_axes=(0, None, 0))
return index_update(matrix, idx, x)
def flatten_dict(d, parent_key: str = "", sep: str = "_") -> Dict[str, Any]:
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, abc.MutableMapping):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def convert_to_pytype(target, reference):
"""Makes target the same pytype as reference, by jax.tree_flatten."""
_, pytree = jax.tree_flatten(reference)
leaves, _ = jax.tree_flatten(target)
return jax.tree_unflatten(pytree, leaves)
def func_if_not_scalar(func):
"""Makes a function that uses func only on non-scalar values."""
@functools.wraps(func)
def wrapped(array, axis=0):
if array.ndim == 0:
return array
return func(array, axis=axis)
return wrapped
mean_if_not_scalar = func_if_not_scalar(jnp.mean)
class MultiBatchAccumulator(object):
"""Class for abstracting statistics accumulation over multiple batches."""
def __init__(self):
self._obj = None
self._obj_max = None
self._obj_min = None
self._num_samples = None
def add(self, averaged_values, num_samples):
"""Adds an element to the moving average and the max."""
if self._obj is None:
self._obj_max = jax.tree_map(lambda y: y * 1.0, averaged_values)
self._obj_min = jax.tree_map(lambda y: y * 1.0, averaged_values)
self._obj = jax.tree_map(lambda y: y * num_samples, averaged_values)
self._num_samples = num_samples
else:
self._obj_max = jax.tree_map(jnp.maximum, self._obj_max,
averaged_values)
self._obj_min = jax.tree_map(jnp.minimum, self._obj_min,
averaged_values)
self._obj = jax.tree_map(lambda x, y: x + y * num_samples, self._obj,
averaged_values)
self._num_samples += num_samples
def value(self):
return jax.tree_map(lambda x: x / self._num_samples, self._obj)
def max(self):
return jax.tree_map(float, self._obj_max)
def min(self):
return jax.tree_map(float, self._obj_min)
def sum(self):
return self._obj
register_pytree_node(
distrax.Normal,
lambda instance: ([instance.loc, instance.scale], None),
lambda _, args: distrax.Normal(*args)
)
def inner_product(x: Any, y: Any) -> jnp.ndarray:
products = jax.tree_map(lambda x_, y_: jnp.sum(x_ * y_), x, y)
return sum(jax.tree_leaves(products))
get_first = utils.get_first
bcast_local_devices = utils.bcast_local_devices
py_prefetch = utils.py_prefetch
p_split = jax.pmap(lambda x, num: list(jax.random.split(x, num)),
static_broadcasted_argnums=1)
def wrap_if_pmap(p_func):
def p_func_if_pmap(obj, axis_name):
try:
core.axis_frame(axis_name)
return p_func(obj, axis_name)
except NameError:
return obj
return p_func_if_pmap
pmean_if_pmap = wrap_if_pmap(lax.pmean)
psum_if_pmap = wrap_if_pmap(lax.psum)
| deepmind-research-master | physics_inspired_models/utils.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing all of the configurations for various models."""
import copy
import os
from jaxline import base_config
import ml_collections as collections
_DATASETS_PATH_VAR_NAME = "DM_HAMILTONIAN_DYNAMICS_SUITE_DATASETS"
def get_config(arg_string):
"""Return config object for training."""
args = arg_string.split(",")
if len(args) != 3:
raise ValueError("You must provide exactly three arguments separated by a "
"comma - model_config_name,sweep_index,dataset_name.")
model_config_name, sweep_index, dataset_name = args
sweep_index = int(sweep_index)
config = base_config.get_base_config()
config.random_seed = 123109801
config.eval_modes = ("eval", "eval_metric")
# Get the model config and the sweeps
if model_config_name not in globals():
raise ValueError(f"The config name {model_config_name} does not exist in "
f"jaxline_configs.py")
config_and_sweep_fn = globals()[model_config_name]
model_config, sweeps = config_and_sweep_fn()
if not os.environ.get(_DATASETS_PATH_VAR_NAME, None):
raise ValueError(f"You need to set the {_DATASETS_PATH_VAR_NAME}")
dm_hamiltonian_suite_path = os.environ[_DATASETS_PATH_VAR_NAME]
dataset_folder = os.path.join(dm_hamiltonian_suite_path, dataset_name)
# Experiment config. Note that batch_size is per device.
# In the experiments we run on 4 GPUs, so the effective batch size was 128.
config.experiment_kwargs = collections.ConfigDict(
dict(
config=dict(
dataset_folder=dataset_folder,
model_kwargs=model_config,
num_extrapolation_steps=60,
drop_stats_containing=("neg_log_p_x", "l2_over_time", "neg_elbo"),
optimizer=dict(
name="adam",
kwargs=dict(
learning_rate=1.5e-4,
b1=0.9,
b2=0.999,
)
),
training=dict(
batch_size=32,
burnin_steps=5,
num_epochs=None,
lagging_vae=False
),
evaluation=dict(
batch_size=64,
),
evaluation_metric=dict(
batch_size=5,
batch_n=20,
num_eval_metric_steps=60,
max_poly_order=5,
max_jacobian_score=1000,
rsq_threshold=0.9,
sym_threshold=0.05,
evaluation_point_n=10,
weight_tolerance=1e-03,
max_iter=1000,
cv=2,
alpha_min_logspace=-4,
alpha_max_logspace=-0.5,
alpha_step_n=10,
calculate_fully_after_steps=40000,
),
evaluation_metric_mlp=dict(
batch_size=64,
batch_n=10000,
datapoint_param_multiplier=1000,
num_eval_metric_steps=60,
evaluation_point_n=10,
evaluation_trajectory_n=50,
rsq_threshold=0.9,
sym_threshold=0.05,
ridge_lambda=0.01,
model=dict(
num_units=4,
num_layers=4,
activation="tanh",
),
optimizer=dict(
name="adam",
kwargs=dict(
learning_rate=1.5e-3,
)
),
),
evaluation_vpt=dict(
batch_size=5,
batch_n=2,
vpt_threshold=0.025,
)
)
)
)
# Training loop config.
config.training_steps = int(500000)
config.interval_type = "steps"
config.log_tensors_interval = 50
config.log_train_data_interval = 50
config.log_all_train_data = False
config.save_checkpoint_interval = 100
config.checkpoint_dir = "/tmp/physics_inspired_models/"
config.train_checkpoint_all_hosts = False
config.eval_specific_checkpoint_dir = ""
config.update_from_flattened_dict(sweeps[sweep_index])
return config
config_prefix = "experiment_kwargs.config."
model_prefix = config_prefix + "model_kwargs."
default_encoder_kwargs = collections.ConfigDict(dict(
conv_channels=64,
num_blocks=3,
blocks_depth=2,
activation="leaky_relu",
))
default_decoder_kwargs = collections.ConfigDict(dict(
conv_channels=64,
num_blocks=3,
blocks_depth=2,
activation="leaky_relu",
))
default_latent_system_net_kwargs = collections.ConfigDict(dict(
conv_channels=64,
num_units=250,
num_layers=5,
activation="swish",
))
default_latent_system_kwargs = collections.ConfigDict(dict(
# Physics model arguments
input_space=collections.config_dict.placeholder(str),
simulation_space=collections.config_dict.placeholder(str),
potential_func_form="separable_net",
kinetic_func_form=collections.config_dict.placeholder(str),
hgn_kinetic_func_form="separable_net",
lgn_kinetic_func_form="matrix_dep_quad",
parametrize_mass_matrix=collections.config_dict.placeholder(bool),
hgn_parametrize_mass_matrix=False,
lgn_parametrize_mass_matrix=True,
mass_eps=1.0,
# ODE model arguments
integrator_method=collections.config_dict.placeholder(str),
# RGN model arguments
residual=collections.config_dict.placeholder(bool),
# General arguments
net_kwargs=default_latent_system_net_kwargs
))
default_config_dict = collections.ConfigDict(dict(
name=collections.config_dict.placeholder(str),
latent_system_dim=32,
latent_system_net_type="mlp",
latent_system_kwargs=default_latent_system_kwargs,
encoder_aggregation_type="linear_projection",
decoder_de_aggregation_type=collections.config_dict.placeholder(str),
encoder_kwargs=default_encoder_kwargs,
decoder_kwargs=default_decoder_kwargs,
has_latent_transform=False,
num_inference_steps=5,
num_target_steps=60,
latent_training_type="forward",
# Choices: overlap_by_one, no_overlap, include_inference
training_data_split="overlap_by_one",
objective_type="ELBO",
elbo_beta_delay=0,
elbo_beta_final=1.0,
geco_kappa=0.001,
geco_alpha=0.0,
dt=0.125,
))
hgn_paper_encoder_kwargs = collections.ConfigDict(dict(
conv_channels=[[32, 64], [64, 64], [64]],
num_blocks=3,
blocks_depth=2,
activation="relu",
kernel_shapes=[2, 4],
padding=["VALID", "SAME"],
))
hgn_paper_decoder_kwargs = collections.ConfigDict(dict(
conv_channels=64,
num_blocks=3,
blocks_depth=2,
activation="tf_leaky_relu",
))
hgn_paper_latent_net_kwargs = collections.ConfigDict(dict(
conv_channels=[32, 64, 64, 64],
num_units=250,
num_layers=5,
activation="softplus",
kernel_shapes=[3, 2, 2, 2, 2],
strides=[1, 2, 1, 2, 1],
padding=["SAME", "VALID", "SAME", "VALID", "SAME"]
))
hgn_paper_latent_system_kwargs = collections.ConfigDict(dict(
potential_func_form="separable_net",
kinetic_func_form="separable_net",
parametrize_mass_matrix=False,
net_kwargs=hgn_paper_latent_net_kwargs
))
hgn_paper_latent_transform_kwargs = collections.ConfigDict(dict(
num_layers=5,
conv_channels=64,
num_units=64,
activation="relu",
))
hgn_paper_config = copy.deepcopy(default_config_dict)
hgn_paper_config.training_data_split = "include_inference"
hgn_paper_config.latent_system_net_type = "conv"
hgn_paper_config.encoder_aggregation_type = (collections.config_dict.
placeholder(str))
hgn_paper_config.decoder_de_aggregation_type = (collections.config_dict.
placeholder(str))
hgn_paper_config.latent_system_kwargs = hgn_paper_latent_system_kwargs
hgn_paper_config.encoder_kwargs = hgn_paper_encoder_kwargs
hgn_paper_config.decoder_kwargs = hgn_paper_decoder_kwargs
hgn_paper_config.has_latent_transform = True
hgn_paper_config.latent_transform_kwargs = hgn_paper_latent_transform_kwargs
hgn_paper_config.num_inference_steps = 31
hgn_paper_config.num_target_steps = 0
hgn_paper_config.objective_type = "GECO"
forward_overlap_by_one = {
model_prefix + "latent_training_type": "forward",
model_prefix + "training_data_split": "overlap_by_one",
}
forward_backward_include_inference = {
model_prefix + "latent_training_type": "forward_backward",
model_prefix + "training_data_split": "include_inference",
}
latent_training_sweep = [
forward_overlap_by_one,
forward_backward_include_inference,
]
def sym_metric_hgn_plus_plus_sweep():
"""HGN++ experimental sweep for the SyMetric paper."""
model_config = copy.deepcopy(default_config_dict)
model_config.name = "HGN"
sweeps = list()
for elbo_beta_final in [0.001, 0.1, 1.0, 2.0]:
sweeps.append({
config_prefix + "optimizer.kwargs.learning_rate": 1.5e-4,
model_prefix + "latent_training_type": "forward",
model_prefix + "training_data_split": "overlap_by_one",
model_prefix + "elbo_beta_final": elbo_beta_final,
})
for elbo_beta_final in [0.001, 0.1, 1.0, 2.0]:
sweeps.append({
config_prefix + "optimizer.kwargs.learning_rate": 1.5e-4,
model_prefix + "latent_training_type": "forward_backward",
model_prefix + "training_data_split": "include_inference",
model_prefix + "elbo_beta_final": elbo_beta_final,
})
return model_config, sweeps
def sym_metric_hgn_sweep():
"""HGN experimental sweep for the SyMetric paper."""
model_config = copy.deepcopy(hgn_paper_config)
model_config.name = "HGN"
return model_config, list(dict())
def benchmark_hgn_overlap_sweep():
"""HGN++ sweep for the benchmark paper."""
model_config = copy.deepcopy(default_config_dict)
model_config.name = "HGN"
sweeps = list()
for elbo_beta_final in [0.001, 0.1, 1.0, 2.0]:
for train_dict in latent_training_sweep:
sweeps.append({
config_prefix + "optimizer.kwargs.learning_rate": 1.5e-4,
model_prefix + "elbo_beta_final": elbo_beta_final,
})
sweeps[-1].update(train_dict)
return model_config, sweeps
def benchmark_lgn_sweep():
"""LGN sweep for the benchmark paper."""
model_config = copy.deepcopy(default_config_dict)
model_config.name = "LGN"
sweeps = list()
for elbo_beta_final in [0.001, 0.1, 1.0, 2.0]:
for train_dict in latent_training_sweep:
sweeps.append({
config_prefix + "optimizer.kwargs.learning_rate": 1.5e-4,
model_prefix + "latent_system_kwargs.kinetic_func_form":
"matrix_dep_pure_quad",
model_prefix + "elbo_beta_final": elbo_beta_final,
})
sweeps[-1].update(train_dict)
return model_config, sweeps
def benchmark_ode_sweep():
"""Neural ODE sweep for the benchmark paper."""
model_config = copy.deepcopy(default_config_dict)
model_config.name = "ODE"
sweeps = list()
for elbo_beta_final in [0.001, 0.1, 1.0, 2.0]:
for integrator in ("adaptive", "rk2"):
for train_dict in latent_training_sweep:
sweeps.append({
config_prefix + "optimizer.kwargs.learning_rate": 1.5e-4,
model_prefix + "integrator_method": integrator,
model_prefix + "elbo_beta_final": elbo_beta_final,
})
sweeps[-1].update(train_dict)
return model_config, sweeps
def benchmark_rgn_sweep():
"""RGN sweep for the benchmark paper."""
model_config = copy.deepcopy(default_config_dict)
model_config.name = "RGN"
sweeps = list()
for elbo_beta_final in [0.001, 0.1, 1.0, 2.0]:
for residual in (True, False):
sweeps.append({
config_prefix + "optimizer.kwargs.learning_rate": 1.5e-4,
model_prefix + "latent_system_kwargs.residual": residual,
model_prefix + "elbo_beta_final": elbo_beta_final,
})
return model_config, sweeps
def benchmark_ar_sweep():
"""AR sweep for the benchmark paper."""
model_config = copy.deepcopy(default_config_dict)
model_config.name = "AR"
model_config.latent_dynamics_type = "vanilla"
sweeps = list()
for elbo_beta_final in [0.001, 0.1, 1.0, 2.0]:
for ar_type in ("vanilla", "lstm", "gru"):
sweeps.append({
config_prefix + "optimizer.kwargs.learning_rate": 1.5e-4,
model_prefix + "latent_dynamics_type": ar_type,
model_prefix + "elbo_beta_final": elbo_beta_final,
})
return model_config, sweeps
| deepmind-research-master | physics_inspired_models/jaxline_configs.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The training script for the HGN models."""
import functools
from absl import app
from absl import flags
from absl import logging
from dm_hamiltonian_dynamics_suite import load_datasets
import haiku as hk
import jax
import jax.numpy as jnp
from jaxline import experiment
from jaxline import platform
import numpy as np
import optax
from physics_inspired_models import eval_metric
from physics_inspired_models import utils
from physics_inspired_models.models import common
AutoregressiveModel = common.autoregressive.TeacherForcingAutoregressiveModel
class HGNExperiment(experiment.AbstractExperiment):
"""HGN experiment."""
CHECKPOINT_ATTRS = {
"_params": "params",
"_state": "state",
"_opt_state": "opt_state",
}
NON_BROADCAST_CHECKPOINT_ATTRS = {
"_python_step": "python_step"
}
def __init__(self, mode, init_rng, config):
super().__init__(mode=mode)
self.mode = mode
self.init_rng = init_rng
self.config = config
# Checkpointed experiment state.
self._python_step = None
self._params = None
self._state = None
self._opt_state = None
# Input pipelines.
self._train_input = None
self._step_fn = None
self._burnin_fn = None
self._eval_input = None
self._eval_batch = None
self._eval_input_metric = None
self._eval_input_vpt = None
self._compute_gt_state_and_latents = None
self._get_reconstructions = None
self._get_samples = None
# Construct the model
model_kwargs = dict(**self.config.model_kwargs)
self.model = common.construct_model(**model_kwargs)
# Construct the optimizer
optimizer_ctor = getattr(optax, self.config.optimizer.name)
self.optimizer = optimizer_ctor(**self.config.optimizer.kwargs)
self.model_init = jax.pmap(self.model.init)
self.opt_init = jax.pmap(self.optimizer.init)
logging.info("Number of hosts: %d/%d",
jax.process_index(), jax.process_count())
logging.info("Number of local devices: %d/%d", jax.local_device_count(),
jax.device_count())
def _process_stats(self, stats, axis_name=None):
keys_to_remove = list()
for key in stats.keys():
for dropped_keys in self.config.drop_stats_containing:
if dropped_keys in key:
keys_to_remove.append(key)
break
for key in keys_to_remove:
stats.pop(key)
# Take average statistics
stats = jax.tree_map(utils.mean_if_not_scalar, stats)
stats = utils.filter_only_scalar_stats(stats)
if axis_name is not None:
stats = utils.pmean_if_pmap(stats, axis_name="i")
return stats
# _ _
# | |_ _ __ __ _(_)_ __
# | __| '__/ _` | | '_ \
# | |_| | | (_| | | | | |
# \__|_| \__,_|_|_| |_|
#
def step(self, global_step, rng, **unused_args):
"""See base class."""
if self._train_input is None:
self._initialize_train()
# Do a small burnin to accumulate any persistent network state
if self._python_step == 0 and self._state:
for _ in range(self.config.training.burnin_steps):
rng, key = utils.p_split(rng, 2)
batch = next(self._train_input)
self._state = self._burnin_fn(self._params, self._state, key, batch)
self._state = jax.tree_map(
lambda x: x / self.config.training.burnin_steps, self._state)
batch = next(self._train_input)
self._params, self._state, self._opt_state, stats = self._step_fn(
self._params, self._state, self._opt_state, rng, batch, global_step)
self._python_step += 1
stats = utils.get_first(stats)
logging.info("global_step: %d, %s", self._python_step,
jax.tree_map(float, stats))
return stats
def _initialize_train(self):
self._train_input = utils.py_prefetch(
load_datasets.dataset_as_iter(self._build_train_input))
self._burnin_fn = jax.pmap(
self._jax_burnin_fn, axis_name="i", donate_argnums=list(range(1, 4)))
self._step_fn = jax.pmap(
self._jax_train_step_fn, axis_name="i", donate_argnums=list(range(5)))
if self._params is not None:
logging.info("Not running initialization - loaded from checkpoint.")
assert self._opt_state is not None
return
logging.info("Initializing parameters - NOT loading from checkpoint.")
# Use the same rng on all devices, so that the initialization is identical
init_rng = utils.bcast_local_devices(self.init_rng)
# Initialize the parameters and the optimizer
batch = next(self._train_input)
self._params, self._state = self.model_init(init_rng, batch)
self._python_step = 0
self._opt_state = self.opt_init(self._params)
def _build_train_input(self):
batch_size = self.config.training.batch_size
return load_datasets.load_dataset(
path=self.config.dataset_folder,
tfrecord_prefix="train",
sub_sample_length=self.model.train_sequence_length,
per_device_batch_size=batch_size,
num_epochs=self.config.training.num_epochs,
drop_remainder=True,
multi_device=True,
shuffle=True,
shuffle_buffer=100 * batch_size,
cache=False,
keys_to_preserve=["image"],
)
def _jax_train_step_fn(self, params, state, opt_state, rng_key, batch, step):
# The loss and the stats are averaged over the batch
def loss_func(*args):
outs = self.model.training_objectives(*args, is_training=True)
# Average everything over the batch
return jax.tree_map(utils.mean_if_not_scalar, outs)
# Compute gradients
grad_fn = jax.grad(loss_func, has_aux=True)
grads, (state, stats, _) = grad_fn(params, state, rng_key, batch, step)
# Average everything over the devices (e.g. average and sync)
grads, state = utils.pmean_if_pmap((grads, state), axis_name="i")
# Apply updates
updates, opt_state = self.optimizer.update(grads, opt_state)
params = optax.apply_updates(params, updates)
return params, state, opt_state, self._process_stats(stats, axis_name="i")
def _jax_burnin_fn(self, params, state, rng_key, batch):
_, (new_state, _, _) = self.model.training_objectives(
params, state, rng_key, batch, jnp.zeros([]), is_training=True)
new_state = jax.tree_map(utils.mean_if_not_scalar, new_state)
new_state = utils.pmean_if_pmap(new_state, axis_name="i")
new_state = hk.data_structures.to_mutable_dict(new_state)
new_state = hk.data_structures.to_immutable_dict(new_state)
return jax.tree_map(jnp.add, new_state, state)
# _
# _____ ____ _| |
# / _ \ \ / / _` | |
# | __/\ V / (_| | |
# \___| \_/ \__,_|_|
#
def evaluate(self, global_step, rng, writer):
"""See base class."""
logging.info("Starting evaluation.")
if self.mode == "eval":
if self._eval_input is None:
self._initialize_eval()
self._initialize_eval_vpt()
key1, _ = utils.p_split(rng, 2)
stats = utils.to_numpy(self._eval_epoch(global_step, key1))
stats.update(utils.to_numpy(self._eval_epoch_vpt(global_step, rng)))
elif self.mode == "eval_metric":
if self._eval_input_metric is None:
self._initialize_eval_metric()
stats = utils.to_numpy(self._eval_epoch_metric(global_step, rng))
else:
raise NotImplementedError()
logging.info("Finished evaluation.")
return stats
def _eval_epoch(self, step, rng):
"""Evaluates an epoch."""
accumulator = utils.MultiBatchAccumulator()
for batch in self._eval_input():
rng, key = utils.p_split(rng, 2)
stats, num_samples = utils.get_first(
self._eval_batch(self._params, self._state, key, batch, step)
)
accumulator.add(stats, num_samples)
return accumulator.value()
def _eval_epoch_metric(self, step, rng):
"""Evaluates an epoch."""
# To prevent from calculating SyMetric early on in training where a large
# polynomial expansion is likely to be required and the score is likely
# to be bad anyway, we only compute using a single batch to save compute
if step[0] > self.config.evaluation_metric.calculate_fully_after_steps:
batch_n = self.config.evaluation_metric.batch_n
else:
batch_n = 1
logging.info("Step: %d, batch_n: %d", step[0], batch_n)
accumulator = utils.MultiBatchAccumulator()
for _ in range(self.config.evaluation_metric.batch_n):
batch = next(self._eval_input_metric)
rng, key = utils.p_split(rng, 2)
stats = self._eval_batch_metric(
self._params, key, batch,
eval_seq_len=self.config.evaluation_metric.num_eval_metric_steps,
)
accumulator.add(stats, 1)
stats = utils.flatten_dict(accumulator.value())
max_keys = ("sym", "SyMetric")
for k, v in utils.flatten_dict(accumulator.max()).items():
if any(m in k for m in max_keys):
stats[k + "_max"] = v
min_keys = ("sym", "SyMetric")
for k, v in utils.flatten_dict(accumulator.min()).items():
if any(m in k for m in min_keys):
stats[k + "_min"] = v
sum_keys = ("sym", "SyMetric")
for k, v in utils.flatten_dict(accumulator.sum()).items():
if any(m in k for m in sum_keys):
stats[k + "_sum"] = v
return stats
def _eval_epoch_vpt(self, step, rng):
"""Evaluates an epoch."""
accumulator = utils.MultiBatchAccumulator()
for _ in range(self.config.evaluation_vpt.batch_n):
batch = next(self._eval_input_vpt)
rng, key = utils.p_split(rng, 2)
stats = self._eval_batch_vpt(self._params, self._state, key, batch)
accumulator.add(stats, 1)
stats = utils.flatten_dict(accumulator.value())
return stats
def _reconstruct_and_align(self, rng_key, full_trajectory, prefix, suffix):
if hasattr(self.model, "training_data_split"):
if self.model.training_data_split == "overlap_by_one":
reconstruction_skip = self.model.num_inference_steps - 1
elif self.model.training_data_split == "no_overlap":
reconstruction_skip = self.model.num_inference_steps
elif self.model.training_data_split == "include_inference":
reconstruction_skip = 0
else:
raise NotImplementedError()
else:
reconstruction_skip = 1
full_forward_targets = jax.tree_map(
lambda x: x[:, :, reconstruction_skip:], full_trajectory)
full_backward_targets = jax.tree_map(
lambda x: x[:, :, :x.shape[2] - reconstruction_skip], full_trajectory)
train_targets_length = (self.model.train_sequence_length -
reconstruction_skip)
full_targets_length = full_forward_targets.shape[2]
# Fully unroll the model and reconstruct the whole sequence, take the mean
full_prediction = self._get_reconstructions(self._params, full_trajectory,
rng_key, prefix == "forward",
True).mean()
full_targets = (full_forward_targets if prefix == "forward" else
full_backward_targets)
# In cases where the model can run backwards it is possible to reconstruct
# parts which were indented to be skipped, so here we take care of that.
if full_prediction.mean().shape[2] > full_targets_length:
if prefix == "forward":
full_prediction = jax.tree_map(
lambda x: x[:, :, -full_targets_length:], full_prediction)
else:
full_prediction = jax.tree_map(
lambda x: x[:, :, :full_targets_length], full_prediction)
# Based on the prefix and suffix fetch correct predictions and targets
if prefix == "forward" and suffix == "train":
predict, targets = jax.tree_map(
lambda x: x[:, :, :train_targets_length],
(full_prediction, full_targets))
elif prefix == "forward" and suffix == "extrapolation":
predict, targets = jax.tree_map(
lambda x: x[:, :, train_targets_length:],
(full_prediction, full_targets))
elif prefix == "backward" and suffix == "train":
predict, targets = jax.tree_map(
lambda x: x[:, :, -train_targets_length:],
(full_prediction, full_targets))
elif prefix == "backward" and suffix == "extrapolation":
predict, targets = jax.tree_map(
lambda x: x[:, :, :-train_targets_length],
(full_prediction, full_targets))
else:
predict, targets = full_prediction, full_targets
return predict, targets
def _initialize_eval(self):
length = (self.model.train_sequence_length +
self.config.num_extrapolation_steps)
batch_size = self.config.evaluation.batch_size
self._eval_input = load_datasets.dataset_as_iter(
load_datasets.load_dataset,
path=self.config.dataset_folder,
tfrecord_prefix="test",
sub_sample_length=length,
per_device_batch_size=batch_size,
num_epochs=1,
drop_remainder=False,
shuffle=False,
cache=False,
keys_to_preserve=["image"]
)
self._eval_batch = jax.pmap(
self._jax_eval_step_fn, axis_name="i")
self._get_reconstructions = jax.pmap(
self.model.reconstruct, axis_name="i",
static_broadcasted_argnums=(3, 4))
if isinstance(self.model,
common.deterministic_vae.DeterministicLatentsGenerativeModel):
self._get_samples = jax.pmap(
self.model.sample_trajectories_from_prior,
static_broadcasted_argnums=(1, 3, 4))
def _initialize_eval_metric(self):
self._eval_input_metric = utils.py_prefetch(
load_datasets.dataset_as_iter(
load_datasets.load_dataset,
path=self.config.dataset_folder,
tfrecord_prefix="test",
sub_sample_length=None,
per_device_batch_size=self.config.evaluation_metric.batch_size,
num_epochs=None,
drop_remainder=False,
cache=False,
shuffle=False,
keys_to_preserve=["image", "x"]
)
)
def compute_gt_state_and_latents(*args):
# Note that the `dt` has to be passed as a kwargs argument
if len(args) == 4:
return self.model.gt_state_and_latents(*args[:4])
elif len(args) == 5:
return self.model.gt_state_and_latents(*args[:4], dt=args[4])
else:
raise NotImplementedError()
self._compute_gt_state_and_latents = jax.pmap(
compute_gt_state_and_latents, static_broadcasted_argnums=3)
def _initialize_eval_vpt(self):
dataset_name = self.config.dataset_folder.split("/")[-1]
dataset_folder = self.config.dataset_folder
if dataset_name in ("hnn_mass_spring_dt_0_05",
"mass_spring_colors_v1_dt_0_05",
"hnn_pendulum_dt_0_05",
"pendulum_colors_v1_dt_0_05",
"matrix_rps_dt_0_1",
"matrix_mp_dt_0_1"):
dataset_folder += "_long_trajectory"
self._eval_input_vpt = utils.py_prefetch(
load_datasets.dataset_as_iter(
load_datasets.load_dataset,
path=dataset_folder,
tfrecord_prefix="test",
sub_sample_length=None,
per_device_batch_size=self.config.evaluation_vpt.batch_size,
num_epochs=None,
drop_remainder=False,
cache=False,
shuffle=False,
keys_to_preserve=["image", "x"]
)
)
self._get_reconstructions = jax.pmap(
self.model.reconstruct, axis_name="i",
static_broadcasted_argnums=(3, 4))
def _jax_eval_step_fn(self, params, state, rng_key, batch, step):
# We care only about the statistics
_, (_, stats, _) = self.model.training_objectives(params, state, rng_key,
batch, step,
is_training=False)
# Compute the full batch size
batch_size = jax.tree_flatten(batch)[0][0].shape[0]
batch_size = utils.psum_if_pmap(batch_size, axis_name="i")
return self._process_stats(stats, axis_name="i"), batch_size
def _eval_batch_vpt(self, params, state, rng_key, batch):
full_trajectory = utils.extract_image(batch)
prefixes = ("forward",
"backward") if self.model.can_run_backwards else ("forward",)
stats = dict()
vpt_abs_scores = []
vpt_rel_scores = []
seq_length = None
for prefix in prefixes:
reconstruction, gt_images = self._reconstruct_and_align(
rng_key, full_trajectory, prefix, "extrapolation")
seq_length = gt_images.shape[2]
mse_norm = np.mean(
(gt_images - reconstruction)**2, axis=(3, 4, 5)) / np.mean(
gt_images**2, axis=(3, 4, 5))
vpt_scores = []
for i in range(mse_norm.shape[1]):
vpt_ind = np.argwhere(
mse_norm[:, i:i + 1, :] > self.config.evaluation_vpt.vpt_threshold)
if vpt_ind.shape[0] > 0:
vpt_ind = vpt_ind[0][2]
else:
vpt_ind = mse_norm.shape[-1]
vpt_scores.append(vpt_ind)
vpt_abs_scores.append(np.median(vpt_scores))
vpt_rel_scores.append(np.median(vpt_scores) / seq_length)
scores = {"vpt_abs": vpt_abs_scores[-1], "vpt_rel": vpt_rel_scores[-1]}
scores = utils.to_numpy(scores)
scores = utils.filter_only_scalar_stats(scores)
stats[prefix] = scores
stats["vpt_abs"] = utils.to_numpy(np.mean(vpt_abs_scores))
stats["vpt_rel"] = utils.to_numpy(np.mean(vpt_rel_scores))
logging.info("vpt_abs: %s, seq_length: %d}",
str(vpt_abs_scores), seq_length)
return stats
def _eval_batch_metric(self, params, rng, batch, eval_seq_len=200):
# Initialise alpha values for Lasso regression
alpha_sweep = np.logspace(self.config.evaluation_metric.alpha_min_logspace,
self.config.evaluation_metric.alpha_max_logspace,
self.config.evaluation_metric.alpha_step_n)
trajectory_n = self.config.evaluation_metric.batch_size
subsection = f"{trajectory_n}tr"
stats = dict()
# Get data
(gt_trajectory,
model_trajectory,
informative_dim_n) = self._get_gt_and_model_phase_space_for_eval(
params, rng, batch, eval_seq_len)
# Calculate SyMetric scores
if informative_dim_n > 1:
scores, *_ = eval_metric.calculate_symetric_score(
gt_trajectory,
model_trajectory,
self.config.evaluation_metric.max_poly_order,
self.config.evaluation_metric.max_jacobian_score,
self.config.evaluation_metric.rsq_threshold,
self.config.evaluation_metric.sym_threshold,
self.config.evaluation_metric.evaluation_point_n,
trajectory_n=trajectory_n,
weight_tolerance=self.config.evaluation_metric.weight_tolerance,
alpha_sweep=alpha_sweep,
max_iter=self.config.evaluation_metric.max_iter,
cv=self.config.evaluation_metric.cv)
scores["unmasked_latents"] = informative_dim_n
scores = utils.to_numpy(scores)
scores = utils.filter_only_scalar_stats(scores)
stats[subsection] = scores
else:
scores = {
"poly_exp_order":
self.config.evaluation_metric.max_poly_order,
"rsq":
0,
"sym":
self.config.evaluation_metric.max_jacobian_score,
"SyMetric": 0.0,
"unmasked_latents":
informative_dim_n
}
scores = utils.to_numpy(scores)
scores = utils.filter_only_scalar_stats(scores)
stats[subsection] = scores
return stats
def _get_gt_and_model_phase_space_for_eval(self, params, rng, batch,
eval_seq_len):
# Get data
gt_data, model_data, z0 = utils.stack_device_dim_into_batch(
self._compute_gt_state_and_latents(params, rng, batch, eval_seq_len)
)
if isinstance(self.model, AutoregressiveModel):
# These models return the `z` for the whole sequence
z0 = z0[:, 0]
# If latent space is image like, reshape it down to vector
if self.model.latent_system_net_type == "conv":
z0 = jax.tree_map(utils.reshape_latents_conv_to_flat, z0)
model_data = jax.tree_map(
lambda x: utils.reshape_latents_conv_to_flat(x, axis_n_to_keep=2),
model_data)
# Create mask to get rid of uninformative latents
latent_mask = eval_metric.create_latent_mask(z0)
informative_dim_n = np.sum(latent_mask)
model_data = model_data[:, :, latent_mask]
logging.info("Masking out model data, leaving dim_n=%d dimensions.",
model_data.shape[-1])
gt_trajectory = np.reshape(
gt_data,
[np.product(gt_data.shape[:-1]), gt_data.shape[-1]]
)
model_trajectory = np.reshape(model_data, [
np.product(model_data.shape[:-1]), model_data.shape[-1]
])
# Standardize data
gt_trajectory = eval_metric.standardize_data(gt_trajectory)
model_trajectory = eval_metric.standardize_data(model_trajectory)
return gt_trajectory, model_trajectory, informative_dim_n
if __name__ == "__main__":
flags.mark_flag_as_required("config")
logging.set_stderrthreshold(logging.INFO)
app.run(functools.partial(platform.main, HGNExperiment))
| deepmind-research-master | physics_inspired_models/jaxline_train.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for all autoregressive models."""
import functools
from typing import Any, Dict, Mapping, Optional, Sequence, Tuple, Union
import distrax
import haiku as hk
from jax import lax
import jax.numpy as jnp
import jax.random as jnr
import physics_inspired_models.metrics as metrics
import physics_inspired_models.models.base as base
import physics_inspired_models.models.networks as nets
import physics_inspired_models.utils as utils
class TeacherForcingAutoregressiveModel(base.SequenceModel):
"""A standard autoregressive model trained via teacher forcing."""
def __init__(
self,
latent_system_dim: int,
latent_system_net_type: str,
latent_system_kwargs: Dict[str, Any],
latent_dynamics_type: str,
encoder_aggregation_type: Optional[str],
decoder_de_aggregation_type: Optional[str],
encoder_kwargs: Dict[str, Any],
decoder_kwargs: Dict[str, Any],
num_inference_steps: int,
num_target_steps: int,
name: Optional[str] = None,
**kwargs
):
# Remove any parameters from vae models
encoder_kwargs = dict(**encoder_kwargs)
encoder_kwargs["distribution_name"] = None
if kwargs.get("has_latent_transform", False):
raise ValueError("We do not support AR models with latent transform.")
super().__init__(
can_run_backwards=False,
latent_system_dim=latent_system_dim,
latent_system_net_type=latent_system_net_type,
latent_system_kwargs=latent_system_kwargs,
encoder_aggregation_type=encoder_aggregation_type,
decoder_de_aggregation_type=decoder_de_aggregation_type,
encoder_kwargs=encoder_kwargs,
decoder_kwargs=decoder_kwargs,
num_inference_steps=num_inference_steps,
num_target_steps=num_target_steps,
name=name,
**kwargs
)
self.latent_dynamics_type = latent_dynamics_type
# Arguments checks
if self.latent_system_net_type != "mlp":
raise ValueError("Currently we do not support non-mlp AR models.")
def recurrence_function(sequence, initial_state=None):
core = nets.make_flexible_recurrent_net(
core_type=latent_dynamics_type,
net_type=latent_system_net_type,
output_dims=self.latent_system_dim,
**self.latent_system_kwargs["net_kwargs"])
initial_state = initial_state or core.initial_state(sequence.shape[1])
core(sequence[0], initial_state)
return hk.dynamic_unroll(core, sequence, initial_state)
self.recurrence = hk.transform(recurrence_function)
def process_inputs_for_encoder(self, x: jnp.ndarray) -> jnp.ndarray:
return x
def process_latents_for_dynamics(self, z: jnp.ndarray) -> jnp.ndarray:
return z
def process_latents_for_decoder(self, z: jnp.ndarray) -> jnp.ndarray:
return z
@property
def inferred_index(self) -> int:
return self.num_inference_steps - 1
@property
def train_sequence_length(self) -> int:
return self.num_target_steps
def train_data_split(
self,
images: jnp.ndarray
) -> Tuple[jnp.ndarray, jnp.ndarray, Mapping[str, Any]]:
images = images[:, :self.train_sequence_length]
inference_data = images[:, :-1]
target_data = images[:, 1:]
return inference_data, target_data, dict(
num_steps_forward=1,
num_steps_backward=0,
include_z0=False)
def unroll_without_inputs(
self,
params: utils.Params,
rng: jnp.ndarray,
x_init: jnp.ndarray,
h_init: jnp.ndarray,
num_steps: int,
is_training: bool
) -> Tuple[Tuple[distrax.Distribution, jnp.ndarray], Any]:
if num_steps < 1:
raise ValueError("`num_steps` must be at least 1.")
def step_fn(carry, key):
x_last, h_last = carry
enc_key, dec_key = jnr.split(key)
z_in_next = self.encoder.apply(params, enc_key, x_last,
is_training=is_training)
z_next, h_next = self.recurrence.apply(params, None, z_in_next[None],
h_last)
p_x_next = self.decode_latents(params, dec_key, z_next[0],
is_training=is_training)
return (p_x_next.mean(), h_next), (p_x_next, z_next[0])
return lax.scan(
step_fn,
init=(x_init, h_init),
xs=jnr.split(rng, num_steps)
)
def unroll_latent_dynamics(
self,
z: jnp.ndarray,
params: utils.Params,
key: jnp.ndarray,
num_steps_forward: int,
num_steps_backward: int,
include_z0: bool,
is_training: bool,
**kwargs: Any
) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:
init_key, unroll_key, dec_key = jnr.split(key, 3)
if num_steps_backward != 0:
raise ValueError("This model can not run backwards.")
# Change 'z' time dimension to be first
z = jnp.swapaxes(z, 0, 1)
# Run recurrent model on inputs
z_0, h_0 = self.recurrence.apply(params, init_key, z)
if num_steps_forward == 1:
z_t = z_0
elif num_steps_forward > 1:
p_x_0 = self.decode_latents(params, dec_key, z_0[-1], is_training=False)
_, (_, z_t) = self.unroll_without_inputs(
params=params,
rng=unroll_key,
x_init=p_x_0.mean(),
h_init=h_0,
num_steps=num_steps_forward-1,
is_training=is_training
)
z_t = jnp.concatenate([z_0, z_t], axis=0)
else:
raise ValueError("num_steps_forward should be at least 1.")
# Make time dimension second
return jnp.swapaxes(z_t, 0, 1), dict()
def _models_core(
self,
params: utils.Params,
keys: jnp.ndarray,
image_data: jnp.ndarray,
is_training: bool,
**unroll_kwargs: Any
) -> Tuple[distrax.Distribution, jnp.ndarray, jnp.ndarray]:
enc_key, _, transform_key, unroll_key, dec_key, _ = keys
# Calculate latent input representation
inference_data = self.process_inputs_for_encoder(image_data)
z_raw = self.encoder.apply(params, enc_key, inference_data,
is_training=is_training)
# Apply latent transformation (should be identity)
z0 = self.apply_latent_transform(params, transform_key, z_raw,
is_training=is_training)
z0 = self.process_latents_for_dynamics(z0)
# Calculate latent output representation
decoder_z, _ = self.unroll_latent_dynamics(
z=z0,
params=params,
key=unroll_key,
is_training=is_training,
**unroll_kwargs
)
decoder_z = self.process_latents_for_decoder(decoder_z)
# Compute p(x|z)
p_x = self.decode_latents(params, dec_key, decoder_z,
is_training=is_training)
return p_x, z0, decoder_z
def training_objectives(
self,
params: hk.Params,
state: hk.State,
rng: jnp.ndarray,
inputs: jnp.ndarray,
step: jnp.ndarray,
is_training: bool = True,
use_mean_for_eval_stats: bool = True
) -> Tuple[jnp.ndarray, Sequence[Dict[str, jnp.ndarray]]]:
"""Computes the training objective and any supporting stats."""
# Split all rng keys
keys = jnr.split(rng, 6)
# Process training data
images = utils.extract_image(inputs)
image_data, target_data, unroll_kwargs = self.train_data_split(images)
p_x, _, _ = self._models_core(
params=params,
keys=keys,
image_data=image_data,
is_training=is_training,
**unroll_kwargs
)
# Compute training statistics
stats = metrics.training_statistics(
p_x=p_x,
targets=target_data,
rescale_by=self.rescale_by,
p_x_learned_sigma=self.decoder_kwargs.get("learned_sigma", False)
)
# The loss is just the negative log-likelihood (e.g. the L2 loss)
stats["loss"] = stats["neg_log_p_x"]
if not is_training:
# Optionally add the evaluation stats when not training
# Add also the evaluation statistics
# We need to be able to set `use_mean = False` for some of the tests
stats.update(metrics.evaluation_only_statistics(
reconstruct_func=functools.partial(
self.reconstruct, use_mean=use_mean_for_eval_stats),
params=params,
inputs=inputs,
rng=rng,
rescale_by=self.rescale_by,
can_run_backwards=self.can_run_backwards,
train_sequence_length=self.train_sequence_length,
reconstruction_skip=1,
p_x_learned_sigma=self.decoder_kwargs.get("learned_sigma", False)
))
return stats["loss"], (dict(), stats, dict())
def reconstruct(
self,
params: utils.Params,
inputs: jnp.ndarray,
rng: jnp.ndarray,
forward: bool,
use_mean: bool = True,
) -> distrax.Distribution:
"""Reconstructs the input sequence."""
if not forward:
raise ValueError("This model can not run backwards.")
images = utils.extract_image(inputs)
image_data = images[:, :self.num_inference_steps]
return self._models_core(
params=params,
keys=jnr.split(rng, 6),
image_data=image_data,
is_training=False,
num_steps_forward=images.shape[1] - self.num_inference_steps,
num_steps_backward=0,
include_z0=False,
)[0]
def gt_state_and_latents(
self,
params: hk.Params,
rng: jnp.ndarray,
inputs: Dict[str, jnp.ndarray],
seq_length: int,
is_training: bool = False,
unroll_direction: str = "forward",
**kwargs: Dict[str, Any]
) -> Tuple[jnp.ndarray, jnp.ndarray,
Union[distrax.Distribution, jnp.ndarray]]:
"""Computes the ground state and matching latents."""
assert unroll_direction == "forward"
images = utils.extract_image(inputs)
gt_state = utils.extract_gt_state(inputs)
image_data = images[:, :self.num_inference_steps]
gt_state = gt_state[:, 1:seq_length + 1]
_, z_in, z_out = self._models_core(
params=params,
keys=jnr.split(rng, 6),
image_data=image_data,
is_training=False,
num_steps_forward=images.shape[1] - self.num_inference_steps,
num_steps_backward=0,
include_z0=False,
)
return gt_state, z_out, z_in
def _init_non_model_params_and_state(
self,
rng: jnp.ndarray
) -> Tuple[Dict[str, jnp.ndarray], Dict[str, jnp.ndarray]]:
return dict(), dict()
def _init_latent_system(
self,
rng: jnp.ndarray,
z: jnp.ndarray,
**kwargs: Any
) -> utils.Params:
return self.recurrence.init(rng, z)
| deepmind-research-master | physics_inspired_models/models/autoregressive.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| deepmind-research-master | physics_inspired_models/models/__init__.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing all of the networks as Haiku modules."""
from typing import Any, Callable, Mapping, Optional, Sequence, Union
from absl import logging
import distrax
import haiku as hk
import jax.numpy as jnp
from physics_inspired_models import utils
Activation = Union[str, Callable[[jnp.ndarray], jnp.ndarray]]
class DenseNet(hk.Module):
"""A feed forward network (MLP)."""
def __init__(
self,
num_units: Sequence[int],
activate_final: bool = False,
activation: Activation = "leaky_relu",
name: Optional[str] = None):
super().__init__(name=name)
self.num_units = num_units
self.num_layers = len(self.num_units)
self.activate_final = activate_final
self.activation = utils.get_activation(activation)
self.linear_modules = []
for i in range(self.num_layers):
self.linear_modules.append(
hk.Linear(
output_size=self.num_units[i],
name=f"ff_{i}"
)
)
def __call__(self, inputs: jnp.ndarray, is_training: bool):
net = inputs
for i, linear in enumerate(self.linear_modules):
net = linear(net)
if i < self.num_layers - 1 or self.activate_final:
net = self.activation(net)
return net
class Conv2DNet(hk.Module):
"""Convolutional Network."""
def __init__(
self,
output_channels: Sequence[int],
kernel_shapes: Union[int, Sequence[int]] = 3,
strides: Union[int, Sequence[int]] = 1,
padding: Union[str, Sequence[str]] = "SAME",
data_format: str = "NHWC",
with_batch_norm: bool = False,
activate_final: bool = False,
activation: Activation = "leaky_relu",
name: Optional[str] = None):
super().__init__(name=name)
self.output_channels = tuple(output_channels)
self.num_layers = len(self.output_channels)
self.kernel_shapes = utils.bcast_if(kernel_shapes, int, self.num_layers)
self.strides = utils.bcast_if(strides, int, self.num_layers)
self.padding = utils.bcast_if(padding, str, self.num_layers)
self.data_format = data_format
self.with_batch_norm = with_batch_norm
self.activate_final = activate_final
self.activation = utils.get_activation(activation)
if len(self.kernel_shapes) != self.num_layers:
raise ValueError(f"Kernel shapes is of size {len(self.kernel_shapes)}, "
f"while output_channels is of size{self.num_layers}.")
if len(self.strides) != self.num_layers:
raise ValueError(f"Strides is of size {len(self.kernel_shapes)}, while "
f"output_channels is of size{self.num_layers}.")
if len(self.padding) != self.num_layers:
raise ValueError(f"Padding is of size {len(self.padding)}, while "
f"output_channels is of size{self.num_layers}.")
self.conv_modules = []
self.bn_modules = []
for i in range(self.num_layers):
self.conv_modules.append(
hk.Conv2D(
output_channels=self.output_channels[i],
kernel_shape=self.kernel_shapes[i],
stride=self.strides[i],
padding=self.padding[i],
data_format=data_format,
name=f"conv_2d_{i}")
)
if with_batch_norm:
self.bn_modules.append(
hk.BatchNorm(
create_offset=True,
create_scale=False,
decay_rate=0.999,
name=f"batch_norm_{i}")
)
else:
self.bn_modules.append(None)
def __call__(self, inputs: jnp.ndarray, is_training: bool):
assert inputs.ndim == 4
net = inputs
for i, (conv, bn) in enumerate(zip(self.conv_modules, self.bn_modules)):
net = conv(net)
# Batch norm
if bn is not None:
net = bn(net, is_training=is_training)
if i < self.num_layers - 1 or self.activate_final:
net = self.activation(net)
return net
class SpatialConvEncoder(hk.Module):
"""Spatial Convolutional Encoder for learning the Hamiltonian."""
def __init__(
self,
latent_dim: int,
conv_channels: Union[Sequence[int], int],
num_blocks: int,
blocks_depth: int = 2,
distribution_name: str = "diagonal_normal",
aggregation_type: Optional[str] = None,
data_format: str = "NHWC",
activation: Activation = "leaky_relu",
scale_factor: int = 2,
kernel_shapes: Union[Sequence[int], int] = 3,
padding: Union[Sequence[str], str] = "SAME",
name: Optional[str] = None):
super().__init__(name=name)
if aggregation_type not in (None, "max", "mean", "linear_projection"):
raise ValueError(f"Unrecognized aggregation_type={aggregation_type}.")
self.latent_dim = latent_dim
self.conv_channels = conv_channels
self.num_blocks = num_blocks
self.scale_factor = scale_factor
self.data_format = data_format
self.distribution_name = distribution_name
self.aggregation_type = aggregation_type
# Compute the required size of the output
if distribution_name is None:
self.output_dim = latent_dim
elif distribution_name == "diagonal_normal":
self.output_dim = 2 * latent_dim
else:
raise ValueError(f"Unrecognized distribution_name={distribution_name}.")
if isinstance(conv_channels, int):
conv_channels = [[conv_channels] * blocks_depth
for _ in range(num_blocks)]
conv_channels[-1] += [self.output_dim]
else:
assert isinstance(conv_channels, (list, tuple))
assert len(conv_channels) == num_blocks
conv_channels = list(list(c) for c in conv_channels)
conv_channels[-1].append(self.output_dim)
if isinstance(kernel_shapes, tuple):
kernel_shapes = list(kernel_shapes)
# Convolutional blocks
self.blocks = []
for i, channels in enumerate(conv_channels):
if isinstance(kernel_shapes, int):
extra_kernel_shapes = 0
else:
extra_kernel_shapes = [3] * (len(channels) - len(kernel_shapes))
self.blocks.append(Conv2DNet(
output_channels=channels,
kernel_shapes=kernel_shapes + extra_kernel_shapes,
strides=[self.scale_factor] + [1] * (len(channels) - 1),
padding=padding,
data_format=data_format,
with_batch_norm=False,
activate_final=i < num_blocks - 1,
activation=activation,
name=f"block_{i}"
))
def spatial_aggregation(self, x: jnp.ndarray) -> jnp.ndarray:
if self.aggregation_type is None:
return x
axis = (1, 2) if self.data_format == "NHWC" else (2, 3)
if self.aggregation_type == "max":
return jnp.max(x, axis=axis)
if self.aggregation_type == "mean":
return jnp.mean(x, axis=axis)
if self.aggregation_type == "linear_projection":
x = x.reshape(x.shape[:-3] + (-1,))
return hk.Linear(self.output_dim, name="LinearProjection")(x)
raise NotImplementedError()
def make_distribution(self, net_output: jnp.ndarray) -> distrax.Distribution:
if self.distribution_name is None:
return net_output
elif self.distribution_name == "diagonal_normal":
if self.aggregation_type is None:
split_axis, num_axes = self.data_format.index("C"), 3
else:
split_axis, num_axes = 1, 1
# Add an extra axis if the input has more than 1 batch dimension
split_axis += net_output.ndim - num_axes - 1
loc, log_scale = jnp.split(net_output, 2, axis=split_axis)
return distrax.Normal(loc, jnp.exp(log_scale))
else:
raise NotImplementedError()
def __call__(
self,
inputs: jnp.ndarray,
is_training: bool
) -> Union[jnp.ndarray, distrax.Distribution]:
# Treat any extra dimensions (like time) as the batch
batched_shape = inputs.shape[:-3]
net = jnp.reshape(inputs, (-1,) + inputs.shape[-3:])
# Apply all blocks in sequence
for block in self.blocks:
net = block(net, is_training=is_training)
# Final projection
net = self.spatial_aggregation(net)
# Reshape back to correct dimensions (like batch + time)
net = jnp.reshape(net, batched_shape + net.shape[1:])
# Return a distribution over the observations
return self.make_distribution(net)
class SpatialConvDecoder(hk.Module):
"""Spatial Convolutional Decoder for learning the Hamiltonian."""
def __init__(
self,
initial_spatial_shape: Sequence[int],
conv_channels: Union[Sequence[int], int],
num_blocks: int,
max_de_aggregation_dims: int,
blocks_depth: int = 2,
scale_factor: int = 2,
output_channels: int = 3,
h_const_channels: int = 2,
data_format: str = "NHWC",
activation: Activation = "leaky_relu",
learned_sigma: bool = False,
de_aggregation_type: Optional[str] = None,
final_activation: Activation = "sigmoid",
discard_half_de_aggregated: bool = False,
kernel_shapes: Union[Sequence[int], int] = 3,
padding: Union[Sequence[str], str] = "SAME",
name: Optional[str] = None):
super().__init__(name=name)
if de_aggregation_type not in (None, "tile", "linear_projection"):
raise ValueError(f"Unrecognized de_aggregation_type="
f"{de_aggregation_type}.")
self.num_blocks = num_blocks
self.scale_factor = scale_factor
self.h_const_channels = h_const_channels
self.data_format = data_format
self.learned_sigma = learned_sigma
self.initial_spatial_shape = tuple(initial_spatial_shape)
self.final_activation = utils.get_activation(final_activation)
self.de_aggregation_type = de_aggregation_type
self.max_de_aggregation_dims = max_de_aggregation_dims
self.discard_half_de_aggregated = discard_half_de_aggregated
if isinstance(conv_channels, int):
conv_channels = [[conv_channels] * blocks_depth
for _ in range(num_blocks)]
conv_channels[-1] += [output_channels]
else:
assert isinstance(conv_channels, (list, tuple))
assert len(conv_channels) == num_blocks
conv_channels = list(list(c) for c in conv_channels)
conv_channels[-1].append(output_channels)
# Convolutional blocks
self.blocks = []
for i, channels in enumerate(conv_channels):
is_final_block = i == num_blocks - 1
self.blocks.append(
Conv2DNet( # pylint: disable=g-complex-comprehension
output_channels=channels,
kernel_shapes=kernel_shapes,
strides=1,
padding=padding,
data_format=data_format,
with_batch_norm=False,
activate_final=not is_final_block,
activation=activation,
name=f"block_{i}"
))
def spatial_de_aggregation(self, x: jnp.ndarray) -> jnp.ndarray:
if self.de_aggregation_type is None:
assert x.ndim >= 4
if self.data_format == "NHWC":
assert x.shape[1:3] == self.initial_spatial_shape
elif self.data_format == "NCHW":
assert x.shape[2:4] == self.initial_spatial_shape
return x
elif self.de_aggregation_type == "linear_projection":
assert x.ndim == 2
n, d = x.shape
d = min(d, self.max_de_aggregation_dims or d)
out_d = d * self.initial_spatial_shape[0] * self.initial_spatial_shape[1]
x = hk.Linear(out_d, name="LinearProjection")(x)
if self.data_format == "NHWC":
shape = (n,) + self.initial_spatial_shape + (d,)
else:
shape = (n, d) + self.initial_spatial_shape
return x.reshape(shape)
elif self.de_aggregation_type == "tile":
assert x.ndim == 2
if self.data_format == "NHWC":
repeats = (1,) + self.initial_spatial_shape + (1,)
x = x[:, None, None, :]
else:
repeats = (1, 1) + self.initial_spatial_shape
x = x[:, :, None, None]
return jnp.tile(x, repeats)
else:
raise NotImplementedError()
def add_constant_channels(self, inputs: jnp.ndarray) -> jnp.ndarray:
# --------------------------------------------
# This is purely for TF compatibility purposes
if self.discard_half_de_aggregated:
axis = self.data_format.index("C")
inputs, _ = jnp.split(inputs, 2, axis=axis)
# --------------------------------------------
# An extra constant channels
if self.data_format == "NHWC":
h_shape = self.initial_spatial_shape + (self.h_const_channels,)
else:
h_shape = (self.h_const_channels,) + self.initial_spatial_shape
h_const = hk.get_parameter("h", h_shape, dtype=inputs.dtype,
init=hk.initializers.Constant(1))
h_const = jnp.tile(h_const, reps=[inputs.shape[0], 1, 1, 1])
return jnp.concatenate([h_const, inputs], axis=self.data_format.index("C"))
def make_distribution(self, net_output: jnp.ndarray) -> distrax.Distribution:
if self.learned_sigma:
init = hk.initializers.Constant(- jnp.log(2.0) / 2.0)
log_scale = hk.get_parameter("log_scale", shape=(),
dtype=net_output.dtype, init=init)
scale = jnp.full_like(net_output, jnp.exp(log_scale))
else:
scale = jnp.full_like(net_output, 1 / jnp.sqrt(2.0))
return distrax.Normal(net_output, scale)
def __call__(
self,
inputs: jnp.ndarray,
is_training: bool
) -> distrax.Distribution:
# Apply the spatial de-aggregation
inputs = self.spatial_de_aggregation(inputs)
# Add the parameterized constant channels
net = self.add_constant_channels(inputs)
# Apply all the blocks
for block in self.blocks:
# Up-sample the image
net = utils.nearest_neighbour_upsampling(net, self.scale_factor)
# Apply the convolutional block
net = block(net, is_training=is_training)
# Apply any specific output nonlinearity
net = self.final_activation(net)
# Construct the distribution over the observations
return self.make_distribution(net)
def make_flexible_net(
net_type: str,
output_dims: int,
conv_channels: Union[Sequence[int], int],
num_units: Union[Sequence[int], int],
num_layers: Optional[int],
activation: Activation,
activate_final: bool = False,
kernel_shapes: Union[Sequence[int], int] = 3,
strides: Union[Sequence[int], int] = 1,
padding: Union[Sequence[str], str] = "SAME",
name: Optional[str] = None,
**unused_kwargs: Mapping[str, Any]
):
"""Commonly used for creating a flexible network."""
if unused_kwargs:
logging.warning("Unused kwargs of `make_flexible_net`: %s",
str(unused_kwargs))
if net_type == "mlp":
if isinstance(num_units, int):
assert num_layers is not None
num_units = [num_units] * (num_layers - 1) + [output_dims]
else:
num_units = list(num_units) + [output_dims]
return DenseNet(
num_units=num_units,
activation=activation,
activate_final=activate_final,
name=name
)
elif net_type == "conv":
if isinstance(conv_channels, int):
assert num_layers is not None
conv_channels = [conv_channels] * (num_layers - 1) + [output_dims]
else:
conv_channels = list(conv_channels) + [output_dims]
return Conv2DNet(
output_channels=conv_channels,
kernel_shapes=kernel_shapes,
strides=strides,
padding=padding,
activation=activation,
activate_final=activate_final,
name=name
)
elif net_type == "transformer":
raise NotImplementedError()
else:
raise ValueError(f"Unrecognized net_type={net_type}.")
def make_flexible_recurrent_net(
core_type: str,
net_type: str,
output_dims: int,
num_units: Union[Sequence[int], int],
num_layers: Optional[int],
activation: Activation,
activate_final: bool = False,
name: Optional[str] = None,
**unused_kwargs
):
"""Commonly used for creating a flexible recurrences."""
if net_type != "mlp":
raise ValueError("We do not support convolutional recurrent nets atm.")
if unused_kwargs:
logging.warning("Unused kwargs of `make_flexible_recurrent_net`: %s",
str(unused_kwargs))
if isinstance(num_units, (list, tuple)):
num_units = list(num_units) + [output_dims]
num_layers = len(num_units)
else:
assert num_layers is not None
num_units = [num_units] * (num_layers - 1) + [output_dims]
name = name or f"{core_type.upper()}"
activation = utils.get_activation(activation)
core_list = []
for i, n in enumerate(num_units):
if core_type.lower() == "vanilla":
core_list.append(hk.VanillaRNN(hidden_size=n, name=f"{name}_{i}"))
elif core_type.lower() == "lstm":
core_list.append(hk.LSTM(hidden_size=n, name=f"{name}_{i}"))
elif core_type.lower() == "gru":
core_list.append(hk.GRU(hidden_size=n, name=f"{name}_{i}"))
else:
raise ValueError(f"Unrecognized core_type={core_type}.")
if i != num_layers - 1:
core_list.append(activation)
if activate_final:
core_list.append(activation)
return hk.DeepRNN(core_list, name="RNN")
| deepmind-research-master | physics_inspired_models/models/networks.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for all models."""
from typing import Any, Dict, Optional
import physics_inspired_models.models.autoregressive as autoregressive
import physics_inspired_models.models.deterministic_vae as deterministic_vae
_physics_arguments = (
"input_space", "simulation_space", "potential_func_form",
"kinetic_func_form", "hgn_kinetic_func_form", "lgn_kinetic_func_form",
"parametrize_mass_matrix", "hgn_parametrize_mass_matrix",
"lgn_parametrize_mass_matrix", "mass_eps"
)
def construct_model(
name: str,
*args,
**kwargs: Dict[str, Any]
):
"""Constructs the correct instance of a model given the short name."""
latent_dynamics_type: Optional[str] = kwargs.pop("latent_dynamics_type", None) # pytype: disable=annotation-type-mismatch
latent_system_kwargs = dict(**kwargs.pop("latent_system_kwargs", dict()))
if name == "AR":
assert latent_dynamics_type in ("vanilla", "lstm", "gru")
# This arguments are not part of the AR models
for k in _physics_arguments + ("integrator_method", "residual"):
latent_system_kwargs.pop(k, None)
return autoregressive.TeacherForcingAutoregressiveModel(
*args,
latent_dynamics_type=latent_dynamics_type,
latent_system_kwargs=latent_system_kwargs,
**kwargs
)
elif name == "RGN":
assert latent_dynamics_type in ("Discrete", None)
latent_dynamics_type = "Discrete"
# This arguments are not part of the RGN models
for k in _physics_arguments + ("integrator_method",):
latent_system_kwargs.pop(k, None)
elif name == "ODE":
assert latent_dynamics_type in ("ODE", None)
latent_dynamics_type = "ODE"
# This arguments are not part of the ODE models
for k in _physics_arguments + ("residual",):
latent_system_kwargs.pop(k, None)
elif name == "HGN":
assert latent_dynamics_type in ("Physics", None)
latent_dynamics_type = "Physics"
assert latent_system_kwargs.get("input_space", None) in ("momentum", None)
latent_system_kwargs["input_space"] = "momentum"
assert (latent_system_kwargs.get("simulation_space", None)
in ("momentum", None))
latent_system_kwargs["simulation_space"] = "momentum"
# Kinetic func form
hgn_specific = latent_system_kwargs.pop("hgn_kinetic_func_form", None)
if hgn_specific is not None:
latent_system_kwargs["kinetic_func_form"] = hgn_specific
# Mass matrix
hgn_specific = latent_system_kwargs.pop("hgn_parametrize_mass_matrix",
None)
if hgn_specific is not None:
latent_system_kwargs["parametrize_mass_matrix"] = hgn_specific
# This arguments are not part of the HGN models
latent_system_kwargs.pop("residual", None)
latent_system_kwargs.pop("lgn_kinetic_func_form", None)
latent_system_kwargs.pop("lgn_parametrize_mass_matrix", None)
elif name == "LGN":
assert latent_dynamics_type in ("Physics", None)
latent_dynamics_type = "Physics"
assert latent_system_kwargs.get("input_space", None) in ("velocity", None)
latent_system_kwargs["input_space"] = "velocity"
assert (latent_system_kwargs.get("simulation_space", None) in
("velocity", None))
latent_system_kwargs["simulation_space"] = "velocity"
# Kinetic func form
lgn_specific = latent_system_kwargs.pop("lgn_kinetic_func_form", None)
if lgn_specific is not None:
latent_system_kwargs["kinetic_func_form"] = lgn_specific
# Mass matrix
lgn_specific = latent_system_kwargs.pop("lgn_parametrize_mass_matrix",
None)
if lgn_specific is not None:
latent_system_kwargs["parametrize_mass_matrix"] = lgn_specific
# This arguments are not part of the HGN models
latent_system_kwargs.pop("residual", None)
latent_system_kwargs.pop("hgn_kinetic_func_form", None)
latent_system_kwargs.pop("hgn_parametrize_mass_matrix", None)
elif name == "PGN":
assert latent_dynamics_type in ("Physics", None)
latent_dynamics_type = "Physics"
# This arguments are not part of the PGN models
latent_system_kwargs.pop("residual")
latent_system_kwargs.pop("hgn_kinetic_func_form", None)
latent_system_kwargs.pop("hgn_parametrize_mass_matrix", None)
latent_system_kwargs.pop("lgn_kinetic_func_form", None)
latent_system_kwargs.pop("lgn_parametrize_mass_matrix", None)
else:
raise NotImplementedError()
return deterministic_vae.DeterministicLatentsGenerativeModel(
*args,
latent_dynamics_type=latent_dynamics_type,
latent_system_kwargs=latent_system_kwargs,
**kwargs)
| deepmind-research-master | physics_inspired_models/models/common.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the main models code."""
import functools
from typing import Any, Dict, Mapping, Optional, Sequence, Tuple, Union
import distrax
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import phase_space
import haiku as hk
import jax.numpy as jnp
import jax.random as jnr
import numpy as np
from physics_inspired_models import metrics
from physics_inspired_models import utils
from physics_inspired_models.models import base
from physics_inspired_models.models import dynamics
_ArrayOrPhase = Union[jnp.ndarray, phase_space.PhaseSpace]
class DeterministicLatentsGenerativeModel(base.SequenceModel[_ArrayOrPhase]):
"""Common class for generative models with deterministic latent dynamics."""
def __init__(
self,
latent_system_dim: int,
latent_system_net_type: str,
latent_system_kwargs: Dict[str, Any],
latent_dynamics_type: str,
encoder_aggregation_type: Optional[str],
decoder_de_aggregation_type: Optional[str],
encoder_kwargs: Dict[str, Any],
decoder_kwargs: Dict[str, Any],
num_inference_steps: int,
num_target_steps: int,
latent_training_type: str,
training_data_split: str,
objective_type: str,
dt: float = 0.125,
render_from_q_only: bool = True,
prior_type: str = "standard_normal",
use_analytical_kl: bool = True,
geco_kappa: float = 0.001,
geco_alpha: Optional[float] = 0.0,
elbo_beta_delay: int = 0,
elbo_beta_final: float = 1.0,
name: Optional[str] = None,
**kwargs
):
can_run_backwards = latent_dynamics_type in ("ODE", "Physics")
# Verify arguments
if objective_type not in ("GECO", "ELBO", "NON-PROB"):
raise ValueError(f"Unrecognized training type - {objective_type}")
if geco_alpha is None:
geco_alpha = 0
if geco_alpha < 0 or geco_alpha >= 1:
raise ValueError("GECO alpha parameter must be in [0, 1).")
if prior_type not in ("standard_normal", "made", "made_gated"):
raise ValueError(f"Unrecognized prior_type='{prior_type}.")
if (latent_training_type == "forward_backward" and
training_data_split != "include_inference"):
raise ValueError("Training forward_backward works only when "
"training_data_split=include_inference.")
if (latent_training_type == "forward_backward" and
num_inference_steps % 2 == 0):
raise ValueError("Training forward_backward works only when "
"num_inference_steps are odd.")
if latent_training_type == "forward_backward" and not can_run_backwards:
raise ValueError("Training forward_backward works only when the model can"
" be run backwards.")
if prior_type != "standard_normal":
raise ValueError("For now we support only `standard_normal`.")
super().__init__(
can_run_backwards=can_run_backwards,
latent_system_dim=latent_system_dim,
latent_system_net_type=latent_system_net_type,
latent_system_kwargs=latent_system_kwargs,
encoder_aggregation_type=encoder_aggregation_type,
decoder_de_aggregation_type=decoder_de_aggregation_type,
encoder_kwargs=encoder_kwargs,
decoder_kwargs=decoder_kwargs,
num_inference_steps=num_inference_steps,
num_target_steps=num_target_steps,
name=name,
**kwargs
)
# VAE specific arguments
self.prior_type = prior_type
self.objective_type = objective_type
self.use_analytical_kl = use_analytical_kl
self.geco_kappa = geco_kappa
self.geco_alpha = geco_alpha
self.elbo_beta_delay = elbo_beta_delay
self.elbo_beta_final = jnp.asarray(elbo_beta_final)
# The dynamics module and arguments
self.latent_dynamics_type = latent_dynamics_type
self.latent_training_type = latent_training_type
self.training_data_split = training_data_split
self.dt = dt
self.render_from_q_only = render_from_q_only
latent_system_kwargs["net_kwargs"] = dict(
latent_system_kwargs["net_kwargs"])
latent_system_kwargs["net_kwargs"]["net_type"] = self.latent_system_net_type
if self.latent_dynamics_type == "Physics":
# Note that here system_dim means the dimensionality of `q` and `p`.
model_constructor = functools.partial(
dynamics.PhysicsSimulationNetwork,
system_dim=self.latent_system_dim // 2,
name="Physics",
**latent_system_kwargs
)
elif self.latent_dynamics_type == "ODE":
model_constructor = functools.partial(
dynamics.OdeNetwork,
system_dim=self.latent_system_dim,
name="ODE",
**latent_system_kwargs
)
elif self.latent_dynamics_type == "Discrete":
model_constructor = functools.partial(
dynamics.DiscreteDynamicsNetwork,
system_dim=self.latent_system_dim,
name="Discrete",
**latent_system_kwargs
)
else:
raise NotImplementedError()
self.dynamics = hk.transform(
lambda *args, **kwargs_: model_constructor()(*args, **kwargs_)) # pylint: disable=unnecessary-lambda
def process_inputs_for_encoder(self, x: jnp.ndarray) -> jnp.ndarray:
return utils.stack_time_into_channels(x, self.data_format)
def process_latents_for_dynamics(self, z: jnp.ndarray) -> _ArrayOrPhase:
if self.latent_dynamics_type == "Physics":
return phase_space.PhaseSpace.from_state(z)
return z
def process_latents_for_decoder(self, z: _ArrayOrPhase) -> jnp.ndarray:
if self.latent_dynamics_type == "Physics":
return z.q if self.render_from_q_only else z.single_state
return z
@property
def inferred_index(self) -> int:
if self.latent_training_type == "forward":
return self.num_inference_steps - 1
elif self.latent_training_type == "forward_backward":
assert self.num_inference_steps % 2 == 1
return self.num_inference_steps // 2
else:
raise NotImplementedError()
@property
def targets_index_offset(self) -> int:
if self.training_data_split == "overlap_by_one":
return -1
elif self.training_data_split == "no_overlap":
return 0
elif self.training_data_split == "include_inference":
return - self.num_inference_steps
else:
raise NotImplementedError()
@property
def targets_length(self) -> int:
if self.training_data_split == "include_inference":
return self.num_inference_steps + self.num_target_steps
return self.num_target_steps
@property
def train_sequence_length(self) -> int:
"""Computes the total length of a sequence needed for training."""
if self.training_data_split == "overlap_by_one":
# Input - [-------------------------------------------------]
# Inference - [---------------]
# Targets - [---------------------------------]
return self.num_inference_steps + self.num_target_steps - 1
elif self.training_data_split == "no_overlap":
# Input - [-------------------------------------------------]
# Inference - [---------------]
# Targets - [--------------------------------]
return self.num_inference_steps + self.num_target_steps
elif self.training_data_split == "include_inference":
# Input - [-------------------------------------------------]
# Inference - [---------------]
# Targets - [-------------------------------------------------]
return self.num_inference_steps + self.num_target_steps
else:
raise NotImplementedError()
def train_data_split(
self,
images: jnp.ndarray
) -> Tuple[jnp.ndarray, jnp.ndarray, Mapping[str, Any]]:
images = images[:, :self.train_sequence_length]
inf_idx = self.num_inference_steps
t_idx = self.num_inference_steps + self.targets_index_offset
if self.latent_training_type == "forward":
inference_data = images[:, :inf_idx]
target_data = images[:, t_idx:]
if self.training_data_split == "include_inference":
num_steps_backward = self.inferred_index
else:
num_steps_backward = 0
num_steps_forward = self.num_target_steps
if self.training_data_split == "overlap_by_one":
num_steps_forward -= 1
unroll_kwargs = dict(
num_steps_backward=num_steps_backward,
include_z0=self.training_data_split != "no_overlap",
num_steps_forward=num_steps_forward,
dt=self.dt
)
elif self.latent_training_type == "forward_backward":
assert self.training_data_split == "include_inference"
n_fwd = images.shape[0] // 2
inference_fwd = images[:n_fwd, :inf_idx]
targets_fwd = images[:n_fwd, t_idx:]
inference_bckwd = images[n_fwd:, -inf_idx:]
targets_bckwd = jnp.flip(images[n_fwd:, :images.shape[1] - t_idx], axis=1)
inference_data = jnp.concatenate([inference_fwd, inference_bckwd], axis=0)
target_data = jnp.concatenate([targets_fwd, targets_bckwd], axis=0)
# This needs to by numpy rather than jax.numpy, because we make some
# verification checks in `integrators.py:149-161`.
dt_fwd = np.full([n_fwd], self.dt)
dt_bckwd = np.full([images.shape[0] - n_fwd], self.dt)
dt = np.concatenate([dt_fwd, -dt_bckwd], axis=0)
unroll_kwargs = dict(
num_steps_backward=self.inferred_index,
include_z0=True,
num_steps_forward=self.targets_length - self.inferred_index - 1,
dt=dt
)
else:
raise NotImplementedError()
return inference_data, target_data, unroll_kwargs
def prior(self) -> distrax.Distribution:
"""Given the parameters returns the prior distribution of the model."""
# Allow to run with both the full parameters and only the priors
if self.prior_type == "standard_normal":
# assert self.prior_nets is None and self.gated_made is None
if self.latent_system_net_type == "mlp":
event_shape = (self.latent_system_dim,)
elif self.latent_system_net_type == "conv":
if self.data_format == "NHWC":
event_shape = self.latent_spatial_shape + (self.latent_system_dim,)
else:
event_shape = (self.latent_system_dim,) + self.latent_spatial_shape
else:
raise NotImplementedError()
return distrax.Normal(jnp.zeros(event_shape), jnp.ones(event_shape))
else:
raise ValueError(f"Unrecognized prior_type='{self.prior_type}'.")
def sample_latent_from_prior(
self,
params: utils.Params,
rng: jnp.ndarray,
num_samples: int = 1,
**kwargs: Any) -> jnp.ndarray:
"""Takes sample from the prior (and optionally puts them through the latent transform function."""
_, sample_key, transf_key = jnr.split(rng, 3)
prior = self.prior()
z_raw = prior.sample(seed=sample_key, sample_shape=[num_samples])
return self.apply_latent_transform(params, transf_key, z_raw, **kwargs)
def sample_trajectories_from_prior(
self,
params: utils.Params,
num_steps: int,
rng: jnp.ndarray,
num_samples: int = 1,
is_training: bool = False,
**kwargs
) -> distrax.Distribution:
"""Generates samples from the prior (unconditional generation)."""
sample_key, unroll_key, dec_key = jnr.split(rng, 3)
z0 = self.sample_latent_from_prior(params, sample_key, num_samples,
is_training=is_training)
z, _ = self.unroll_latent_dynamics(
z=self.process_latents_for_dynamics(z0),
params=params,
key=unroll_key,
num_steps_forward=num_steps,
num_steps_backward=0,
include_z0=True,
is_training=is_training,
**kwargs
)
z = self.process_latents_for_decoder(z)
return self.decode_latents(params, dec_key, z, is_training=is_training)
def verify_unroll_args(
self,
num_steps_forward: int,
num_steps_backward: int,
include_z0: bool
) -> None:
if num_steps_forward < 0 or num_steps_backward < 0:
raise ValueError("num_steps_forward and num_steps_backward can not be "
"negative.")
if num_steps_forward == 0 and num_steps_backward == 0:
raise ValueError("You need one of num_steps_forward or "
"num_of_steps_backward to be positive.")
if num_steps_forward > 0 and num_steps_backward > 0 and not include_z0:
raise ValueError("When both num_steps_forward and num_steps_backward are "
"positive include_t0 should be True.")
if num_steps_backward > 0 and not self.can_run_backwards:
raise ValueError("This model can not be unrolled backward in time.")
def unroll_latent_dynamics(
self,
z: phase_space.PhaseSpace,
params: hk.Params,
key: jnp.ndarray,
num_steps_forward: int,
num_steps_backward: int,
include_z0: bool,
is_training: bool,
**kwargs: Any
) -> Tuple[_ArrayOrPhase, Mapping[str, jnp.ndarray]]:
self.verify_unroll_args(num_steps_forward, num_steps_backward, include_z0)
return self.dynamics.apply(
params,
key,
y0=z,
dt=kwargs.pop("dt", self.dt),
num_steps_forward=num_steps_forward,
num_steps_backward=num_steps_backward,
include_y0=include_z0,
return_stats=True,
is_training=is_training
)
def _models_core(
self,
params: utils.Params,
keys: jnp.ndarray,
image_data: jnp.ndarray,
use_mean: bool,
is_training: bool,
**unroll_kwargs: Any
) -> Tuple[distrax.Distribution, distrax.Distribution, distrax.Distribution,
jnp.ndarray, jnp.ndarray, Mapping[str, jnp.ndarray]]:
enc_key, sample_key, transform_key, unroll_key, dec_key, _ = keys
# Calculate the approximate posterior q(z|x)
inference_data = self.process_inputs_for_encoder(image_data)
q_z: distrax.Distribution = self.encoder.apply(params, enc_key,
inference_data,
is_training=is_training)
# Sample latent variables or take the mean
z_raw = q_z.mean() if use_mean else q_z.sample(seed=sample_key)
# Apply latent transformation
z0 = self.apply_latent_transform(params, transform_key, z_raw,
is_training=is_training)
# Unroll the latent variable
z, dyn_stats = self.unroll_latent_dynamics(
z=self.process_latents_for_dynamics(z0),
params=params,
key=unroll_key,
is_training=is_training,
**unroll_kwargs
)
decoder_z = self.process_latents_for_decoder(z)
# Compute p(x|z)
p_x = self.decode_latents(params, dec_key, decoder_z,
is_training=is_training)
z = z.single_state if isinstance(z, phase_space.PhaseSpace) else z
return p_x, q_z, self.prior(), z0, z, dyn_stats
def training_objectives(
self,
params: utils.Params,
state: hk.State,
rng: jnp.ndarray,
inputs: jnp.ndarray,
step: jnp.ndarray,
is_training: bool = True,
use_mean_for_eval_stats: bool = True
) -> Tuple[jnp.ndarray, Sequence[Dict[str, jnp.ndarray]]]:
# Split all rng keys
keys = jnr.split(rng, 6)
# Process training data
images = utils.extract_image(inputs)
image_data, target_data, unroll_kwargs = self.train_data_split(images)
p_x, q_z, prior, _, _, dyn_stats = self._models_core(
params=params,
keys=keys,
image_data=image_data,
use_mean=False,
is_training=is_training,
**unroll_kwargs
)
# Note: we reuse the rng key used to sample the latent variable here
# so that it can be reused to evaluate a (non-analytical) KL at that sample.
stats = metrics.training_statistics(
p_x=p_x,
targets=target_data,
rescale_by=self.rescale_by,
rng=keys[1],
q_z=q_z,
prior=prior,
p_x_learned_sigma=self.decoder_kwargs.get("learned_sigma", False)
)
stats.update(dyn_stats)
# Compute other (non-reported statistics)
z_stats = dict()
other_stats = dict(x_reconstruct=p_x.mean(), z_stats=z_stats)
# The loss computation and GECO state update
new_state = dict()
if self.objective_type == "GECO":
geco_stats = metrics.geco_objective(
l2_loss=stats["l2"],
kl=stats["kl"],
alpha=self.geco_alpha,
kappa=self.geco_kappa,
constraint_ema=state["GECO"]["geco_constraint_ema"],
lambda_var=params["GECO"]["geco_lambda_var"],
is_training=is_training
)
new_state["GECO"] = dict(
geco_constraint_ema=geco_stats["geco_constraint_ema"])
stats.update(geco_stats)
elif self.objective_type == "ELBO":
elbo_stats = metrics.elbo_objective(
neg_log_p_x=stats["neg_log_p_x"],
kl=stats["kl"],
final_beta=self.elbo_beta_final,
beta_delay=self.elbo_beta_delay,
step=step
)
stats.update(elbo_stats)
elif self.objective_type == "NON-PROB":
stats["loss"] = stats["neg_log_p_x"]
else:
raise ValueError()
if not is_training:
if self.training_data_split == "overlap_by_one":
reconstruction_skip = self.num_inference_steps - 1
elif self.training_data_split == "no_overlap":
reconstruction_skip = self.num_inference_steps
elif self.training_data_split == "include_inference":
reconstruction_skip = 0
else:
raise NotImplementedError()
# We intentionally reuse the same rng as the training, in order to be able
# to run tests and verify that the evaluation and reconstruction work
# correctly.
# We need to be able to set `use_mean = False` for some of the tests
stats.update(metrics.evaluation_only_statistics(
reconstruct_func=functools.partial(
self.reconstruct, use_mean=use_mean_for_eval_stats),
params=params,
inputs=inputs,
rng=rng,
rescale_by=self.rescale_by,
can_run_backwards=self.can_run_backwards,
train_sequence_length=self.train_sequence_length,
reconstruction_skip=reconstruction_skip,
p_x_learned_sigma=self.decoder_kwargs.get("learned_sigma", False)
))
# Make new state the same type as state
new_state = utils.convert_to_pytype(new_state, state)
return stats["loss"], (new_state, stats, other_stats)
def reconstruct(
self,
params: utils.Params,
inputs: jnp.ndarray,
rng: Optional[jnp.ndarray],
forward: bool,
use_mean: bool = True,
) -> distrax.Distribution:
if not self.can_run_backwards and not forward:
raise ValueError("This model can not be run backwards.")
images = utils.extract_image(inputs)
# This is intentionally matching the split for the training stats
if forward:
num_steps_backward = self.inferred_index
num_steps_forward = images.shape[1] - num_steps_backward - 1
else:
num_steps_forward = self.num_inference_steps - self.inferred_index - 1
num_steps_backward = images.shape[1] - num_steps_forward - 1
if not self.can_run_backwards:
num_steps_backward = 0
if forward:
image_data = images[:, :self.num_inference_steps]
else:
image_data = images[:, -self.num_inference_steps:]
return self._models_core(
params=params,
keys=jnr.split(rng, 6),
image_data=image_data,
use_mean=use_mean,
is_training=False,
num_steps_forward=num_steps_forward,
num_steps_backward=num_steps_backward,
include_z0=True,
)[0]
def gt_state_and_latents(
self,
params: hk.Params,
rng: jnp.ndarray,
inputs: Dict[str, jnp.ndarray],
seq_length: int,
is_training: bool = False,
unroll_direction: str = "forward",
**kwargs: Dict[str, Any]
) -> Tuple[jnp.ndarray, jnp.ndarray,
Union[distrax.Distribution, jnp.ndarray]]:
"""Computes the ground state and matching latents."""
assert unroll_direction in ("forward", "backward")
if unroll_direction == "backward" and not self.can_run_backwards:
raise ValueError("This model can not be unrolled backwards.")
images = utils.extract_image(inputs)
gt_state = utils.extract_gt_state(inputs)
if unroll_direction == "forward":
image_data = images[:, :self.num_inference_steps]
if self.can_run_backwards:
num_steps_backward = self.inferred_index
gt_start_idx = 0
else:
num_steps_backward = 0
gt_start_idx = self.inferred_index
num_steps_forward = seq_length - num_steps_backward - 1
gt_state = gt_state[:, gt_start_idx: seq_length + gt_start_idx]
elif unroll_direction == "backward":
inference_start_idx = seq_length - self.num_inference_steps
image_data = images[:, inference_start_idx: seq_length]
num_steps_forward = self.num_inference_steps - self.inferred_index - 1
num_steps_backward = seq_length - num_steps_forward - 1
gt_state = gt_state[:, :seq_length]
else:
raise NotImplementedError()
_, q_z, _, z0, z, _ = self._models_core(
params=params,
keys=jnr.split(rng, 6),
image_data=image_data,
use_mean=True,
is_training=False,
num_steps_forward=num_steps_forward,
num_steps_backward=num_steps_backward,
include_z0=True,
)
if self.has_latent_transform:
return gt_state, z, z0
else:
return gt_state, z, q_z
def _init_non_model_params_and_state(
self,
rng: jnp.ndarray
) -> Tuple[utils.Params, utils.Params]:
if self.objective_type == "GECO":
# Initialize such that softplus(lambda_var) = 1
geco_lambda_var = jnp.asarray(jnp.log(jnp.e - 1.0))
geco_constraint_ema = jnp.asarray(0.0)
return (dict(GECO=dict(geco_lambda_var=geco_lambda_var)),
dict(GECO=dict(geco_constraint_ema=geco_constraint_ema)))
else:
return dict(), dict()
def _init_latent_system(
self,
rng: jnp.ndarray,
z: jnp.ndarray,
**kwargs: Mapping[str, Any]
) -> hk.Params:
"""Initializes the parameters of the latent system."""
return self.dynamics.init(
rng,
y0=z,
dt=self.dt,
num_steps_forward=1,
num_steps_backward=0,
include_y0=True,
**kwargs
)
| deepmind-research-master | physics_inspired_models/models/deterministic_vae.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the base abstract classes for sequence models."""
import abc
from typing import Any, Dict, Generic, Mapping, Optional, Sequence, Tuple, TypeVar, Union
from absl import logging
import distrax
import haiku as hk
import jax
import jax.numpy as jnp
import jax.random as jnr
from physics_inspired_models import utils
from physics_inspired_models.models import networks
T = TypeVar("T")
class SequenceModel(abc.ABC, Generic[T]):
"""An abstract class for sequence models."""
def __init__(
self,
can_run_backwards: bool,
latent_system_dim: int,
latent_system_net_type: str,
latent_system_kwargs: Dict[str, Any],
encoder_aggregation_type: Optional[str],
decoder_de_aggregation_type: Optional[str],
encoder_kwargs: Dict[str, Any],
decoder_kwargs: Dict[str, Any],
num_inference_steps: int,
num_target_steps: int,
name: str,
latent_spatial_shape: Optional[Tuple[int, int]] = (4, 4),
has_latent_transform: bool = False,
latent_transform_kwargs: Optional[Dict[str, Any]] = None,
rescale_by: Optional[str] = "pixels_and_time",
data_format: str = "NHWC",
**unused_kwargs
):
# Arguments checks
encoder_kwargs = encoder_kwargs or dict()
decoder_kwargs = decoder_kwargs or dict()
# Set the decoder de-aggregation type the "same" type as the encoder if not
# provided
if (decoder_de_aggregation_type is None and
encoder_aggregation_type is not None):
if encoder_aggregation_type == "linear_projection":
decoder_de_aggregation_type = "linear_projection"
elif encoder_aggregation_type in ("mean", "max"):
decoder_de_aggregation_type = "tile"
else:
raise ValueError(f"Unrecognized encoder_aggregation_type="
f"{encoder_aggregation_type}")
if latent_system_net_type == "conv":
if encoder_aggregation_type is not None:
raise ValueError("When the latent system is convolutional, the encoder "
"aggregation type should be None.")
if decoder_de_aggregation_type is not None:
raise ValueError("When the latent system is convolutional, the decoder "
"aggregation type should be None.")
else:
if encoder_aggregation_type is None:
raise ValueError("When the latent system is not convolutional, the "
"you must provide an encoder aggregation type.")
if decoder_de_aggregation_type is None:
raise ValueError("When the latent system is not convolutional, the "
"you must provide an decoder aggregation type.")
if has_latent_transform and latent_transform_kwargs is None:
raise ValueError("When using latent transformation you have to provide "
"the latent_transform_kwargs argument.")
if unused_kwargs:
logging.warning("Unused kwargs: %s", str(unused_kwargs))
super().__init__(**unused_kwargs)
self.can_run_backwards = can_run_backwards
self.latent_system_dim = latent_system_dim
self.latent_system_kwargs = latent_system_kwargs
self.latent_system_net_type = latent_system_net_type
self.latent_spatial_shape = latent_spatial_shape
self.num_inference_steps = num_inference_steps
self.num_target_steps = num_target_steps
self.rescale_by = rescale_by
self.data_format = data_format
self.name = name
# Encoder
self.encoder_kwargs = encoder_kwargs
self.encoder = hk.transform(
lambda *args, **kwargs: networks.SpatialConvEncoder( # pylint: disable=unnecessary-lambda,g-long-lambda
latent_dim=latent_system_dim,
aggregation_type=encoder_aggregation_type,
data_format=data_format,
name="Encoder",
**encoder_kwargs
)(*args, **kwargs))
# Decoder
self.decoder_kwargs = decoder_kwargs
self.decoder = hk.transform(
lambda *args, **kwargs: networks.SpatialConvDecoder( # pylint: disable=unnecessary-lambda,g-long-lambda
initial_spatial_shape=self.latent_spatial_shape,
de_aggregation_type=decoder_de_aggregation_type,
data_format=data_format,
max_de_aggregation_dims=self.latent_system_dim // 2,
name="Decoder",
**decoder_kwargs,
)(*args, **kwargs))
self.has_latent_transform = has_latent_transform
if has_latent_transform:
self.latent_transform = hk.transform(
lambda *args, **kwargs: networks.make_flexible_net( # pylint: disable=unnecessary-lambda,g-long-lambda
net_type=latent_system_net_type,
output_dims=latent_system_dim,
name="LatentTransform",
**latent_transform_kwargs
)(*args, **kwargs))
else:
self.latent_transform = None
self._jit_init = None
@property
@abc.abstractmethod
def train_sequence_length(self) -> int:
"""Computes the total length of a sequence needed for training or evaluation."""
pass
@abc.abstractmethod
def train_data_split(
self,
images: jnp.ndarray,
) -> Tuple[jnp.ndarray, jnp.ndarray, Mapping[str, Any]]:
"""Extracts from the inputs the data splits for training."""
pass
def decode_latents(
self,
params: hk.Params,
rng: jnp.ndarray,
z: jnp.ndarray,
**kwargs: Any
) -> distrax.Distribution:
"""Decodes the latent variable given the parameters of the model."""
# Allow to run with both the full parameters and only the decoders
if self.latent_system_net_type == "mlp":
fixed_dims = 1
elif self.latent_system_net_type == "conv":
fixed_dims = 1 + len(self.latent_spatial_shape)
else:
raise NotImplementedError()
n_shape = z.shape[:-fixed_dims]
z = z.reshape((-1,) + z.shape[-fixed_dims:])
x = self.decoder.apply(params, rng, z, **kwargs)
return jax.tree_map(lambda a: a.reshape(n_shape + a.shape[1:]), x)
def apply_latent_transform(
self,
params: hk.Params,
key: jnp.ndarray,
z: jnp.ndarray,
**kwargs: Any
) -> jnp.ndarray:
if self.latent_transform is not None:
return self.latent_transform.apply(params, key, z, **kwargs)
else:
return z
@abc.abstractmethod
def process_inputs_for_encoder(self, x: jnp.ndarray) -> jnp.ndarray:
pass
@abc.abstractmethod
def process_latents_for_dynamics(self, z: jnp.ndarray) -> T:
pass
@abc.abstractmethod
def process_latents_for_decoder(self, z: T) -> jnp.ndarray:
pass
@abc.abstractmethod
def unroll_latent_dynamics(
self,
z: T,
params: utils.Params,
key: jnp.ndarray,
num_steps_forward: int,
num_steps_backward: int,
include_z0: bool,
is_training: bool,
**kwargs: Any
) -> Tuple[T, Mapping[str, jnp.ndarray]]:
"""Unrolls the latent dynamics starting from z and pre-processing for the decoder."""
pass
@abc.abstractmethod
def reconstruct(
self,
params: utils.Params,
inputs: jnp.ndarray,
rng_key: Optional[jnp.ndarray],
forward: bool,
) -> distrax.Distribution:
"""Using the first `num_inference_steps` parts of inputs reconstructs the rest."""
pass
@abc.abstractmethod
def training_objectives(
self,
params: utils.Params,
state: hk.State,
rng: jnp.ndarray,
inputs: Union[Dict[str, jnp.ndarray], jnp.ndarray],
step: jnp.ndarray,
is_training: bool = True,
use_mean_for_eval_stats: bool = True
) -> Tuple[jnp.ndarray, Sequence[Dict[str, jnp.ndarray]]]:
"""Returns all training objectives statistics and update states."""
pass
@property
@abc.abstractmethod
def inferred_index(self):
"""Returns the time index in the input sequence, for which the encoder infers.
If the encoder takes as input the sequence x[0:n-1], where
`n = self.num_inference_steps`, then this outputs the index `k` relative to
the begging of the input sequence `x_0`, which the encoder infers.
"""
pass
@property
def inferred_right_offset(self):
return self.num_inference_steps - 1 - self.inferred_index
@abc.abstractmethod
def gt_state_and_latents(
self,
params: hk.Params,
rng: jnp.ndarray,
inputs: Dict[str, jnp.ndarray],
seq_len: int,
is_training: bool = False,
unroll_direction: str = "forward",
**kwargs: Dict[str, Any]
) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""Computes the ground state and matching latents."""
pass
@abc.abstractmethod
def _init_non_model_params_and_state(
self,
rng: jnp.ndarray
) -> Tuple[utils.Params, utils.Params]:
"""Initializes any non-model parameters and state."""
pass
@abc.abstractmethod
def _init_latent_system(
self,
rng: jnp.ndarray,
z: jnp.ndarray,
**kwargs: Any
) -> hk.Params:
"""Initializes the parameters of the latent system."""
pass
def _init(
self,
rng: jnp.ndarray,
images: jnp.ndarray
) -> Tuple[hk.Params, hk.State]:
"""Initializes the whole model parameters and state."""
inference_data, _, _ = self.train_data_split(images)
# Initialize parameters and state for the vae training
rng, key = jnr.split(rng)
params, state = self._init_non_model_params_and_state(key)
# Initialize and run encoder
inference_data = self.process_inputs_for_encoder(inference_data)
rng, key = jnr.split(rng)
encoder_params = self.encoder.init(key, inference_data, is_training=True)
rng, key = jnr.split(rng)
z_in = self.encoder.apply(encoder_params, key, inference_data,
is_training=True)
# For probabilistic models this will be a distribution
if isinstance(z_in, distrax.Distribution):
z_in = z_in.mean()
# Initialize and run the optional latent transform
if self.latent_transform is not None:
rng, key = jnr.split(rng)
transform_params = self.latent_transform.init(key, z_in, is_training=True)
rng, key = jnr.split(rng)
z_in = self.latent_transform.apply(transform_params, key, z_in,
is_training=True)
else:
transform_params = dict()
# Initialize and run the latent system
z_in = self.process_latents_for_dynamics(z_in)
rng, key = jnr.split(rng)
latent_params = self._init_latent_system(key, z_in, is_training=True)
rng, key = jnr.split(rng)
z_out, _ = self.unroll_latent_dynamics(
z=z_in,
params=latent_params,
key=key,
num_steps_forward=1,
num_steps_backward=0,
include_z0=False,
is_training=True
)
z_out = self.process_latents_for_decoder(z_out)
# Initialize and run the decoder
rng, key = jnr.split(rng)
decoder_params = self.decoder.init(key, z_out[:, 0], is_training=True)
_ = self.decoder.apply(decoder_params, rng, z_out[:, 0], is_training=True)
# Combine all and make immutable
params = hk.data_structures.merge(params, encoder_params, transform_params,
latent_params, decoder_params)
params = hk.data_structures.to_immutable_dict(params)
state = hk.data_structures.to_immutable_dict(state)
return params, state
def init(
self,
rng: jnp.ndarray,
inputs_or_shape: Union[jnp.ndarray, Mapping[str, jnp.ndarray],
Sequence[int]],
) -> Tuple[utils.Params, hk.State]:
"""Initializes the whole model parameters and state."""
if (isinstance(inputs_or_shape, (tuple, list))
and isinstance(inputs_or_shape[0], int)):
images = jnp.zeros(inputs_or_shape)
else:
images = utils.extract_image(inputs_or_shape)
if self._jit_init is None:
self._jit_init = jax.jit(self._init)
return self._jit_init(rng, images)
| deepmind-research-master | physics_inspired_models/models/base.py |
# Copyright 2020 DeepMind Technologies Limited.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing all of the networks as Haiku modules."""
from typing import Any, Mapping, Optional, Tuple, Union
from dm_hamiltonian_dynamics_suite.hamiltonian_systems import phase_space
import haiku as hk
import jax
import jax.numpy as jnp
from physics_inspired_models import integrators
from physics_inspired_models import utils
from physics_inspired_models.models import networks
_PhysicsSimulationOutput = Union[
phase_space.PhaseSpace,
Tuple[phase_space.PhaseSpace, Mapping[str, jnp.ndarray]]
]
class PhysicsSimulationNetwork(hk.Module):
"""A model for simulating an abstract physical system, whose energy is defined by a neural network."""
def __init__(
self,
system_dim: int,
input_space: str,
simulation_space: str,
potential_func_form: str,
kinetic_func_form: str,
parametrize_mass_matrix: bool,
net_kwargs: Mapping[str, Any],
mass_eps: float = 1.0,
integrator_method: Optional[str] = None,
steps_per_dt: int = 1,
ode_int_kwargs: Optional[Mapping[str, float]] = None,
use_scan: bool = True,
feature_axis: int = -1,
features_extra_dims: Optional[int] = None,
network_creation_func=networks.make_flexible_net,
name: Optional[str] = None
):
"""Initializes the model.
Args:
system_dim: The number of system dimensions. Note that this specifies the
number of dimensions only of the position vectors, not of position and
momentum. Hence the generalized coordinates would be of dimension
`2 * system_dim`.
input_space: Either `velocity` or `momentum`. Specifies whether the inputs
to the model are to be interpreted as `(position, velocity)` or as
`(position, momentum)`.
simulation_space: Either `velocity` or `momentum`. Specifies whether the
model should simulate the dynamics in `(position, velocity)` space
using the Lagrangian formulation or in `(position, momentum)` space
using the Hamiltonian formulation. If this is different than the value
of `input_space` then `kinetic_func_form` must be one of pure_quad,
matrix_diag_quad, matrix_quad, matrix_dep_diag_quad, matrix_dep_quad.
In all other cases one can not compute analytically the form of the
functional (Lagrangian or Hamiltonian) from the other.
potential_func_form: String specifying the form of the potential energy:
* separable_net - The network uses only the position:
U(q, q_dot/p) = f(q) f: R^d -> R
* dep_net - The network uses both the position and velocity/momentum:
U(q, q_dot/p) = f(q, q_dot/p) f: R^d x R^d -> R
* embed_quad - A quadratic of the embedding of a network embedding of
the velocity/momentum:
U(q, q_dot/p) = f(q)^T f(q) / 2 f: R^d -> R^d
kinetic_func_form: String specifying the form of the potential energy:
* separable_net - The network uses only the velocity/momentum:
K(q, q_dot/p) = f(q_dot/p) f: R^d -> R
* dep_net - The network uses both the position and velocity/momentum:
K(q, q_dot/p) = f(q, q_dot/p) f: R^d x R^d -> R
* pure_quad - A quadratic function of the velocity/momentum:
K(q, q_dot/p) = (q_dot/p)^T (q_dot/p) / 2
* matrix_diag_quad - A quadratic function of the velocity/momentum,
where there is diagonal mass matrix, whose log `P` is a parameter:
K(q, q_dot) = q_dot^T M q_dot / 2
K(q, p) = p^T M^-1 p / 2
[if `parameterize_mass_matrix`]
M = diag(exp(P) + mass_eps)
[else]
M^-1 = diag(exp(P) + mass_eps)
* matrix_quad - A quadratic function of the velocity/momentum, where
there is a full mass matrix, whose Cholesky factor L is a parameter:
K(q, q_dot) = q_dot^T M q_dot / 2
K(q, p) = p^T M^-1 p / 2
[if `parameterize_mass_matrix`]
M = LL^T + mass_eps * I
[else]
M^-1 = LL^T + mass_eps * I
* matrix_dep_quad - A quadratic function of the velocity/momentum, where
there is a full mass matrix defined as a function of the position:
K(q, q_dot) = q_dot^T M(q) q_dot / 2
K(q, p) = p^T M(q)^-1 p / 2
[if `parameterize_mass_matrix`]
M(q) = g(q) g(q)^T + mass_eps * I g: R^d -> R^(d(d+1)/2)
[else]
M(q)^-1 = g(q) g(q)^T + mass_eps * I g: R^d -> R^(d(d+1)/2)
* embed_quad - A quadratic of the embedding of a network embedding of
the velocity/momentum:
K(q, q_dot/p) = f(q_dot/p)^T f(q_dot/p) / 2 f: R^d -> R^d
* matrix_dep_diag_embed_quad - A quadratic of the embedding of a network
embedding of the velocity/momentum where there is diagonal mass matrix
defined as a function of the position:
K(q, q_dot) = f(q_dot)^T M(q) f(q_dot) / 2 f: R^d -> R^d
K(q, p) = f(p)^T M(q)^-1 f(p) / 2 f: R^d -> R^d
[if `parameterize_mass_matrix`]
M(q) = diag(exp(g(q)) + mass_eps * I g: R^d -> R^d
[else]
M(q)^-1 = diag(exp(g(q)) + mass_eps * I g: R^d -> R^d
* matrix_dep_embed_quad - A quadratic of the embedding of a network
embedding of the velocity/momentum where there is a full mass matrix
defined as a function of the position:
K(q, q_dot) = f(q_dot)^T M(q) f(q_dot) / 2 f: R^d -> R^d
K(q, p) = f(p)^T M(q)^-1 f(p) / 2 f: R^d -> R^d
[if `parameterize_mass_matrix`]
M(q) = g(q) g(q)^T + mass_eps * I g: R^d -> R^(d(d+1)/2)
[else]
M(q)^-1 = g(q) g(q)^T + mass_eps * I g: R^d -> R^(d(d+1)/2)
For any of the function forms with mass matrices, if we have a
convolutional input it is assumed that the matrix is shared across all
spatial locations.
parametrize_mass_matrix: Defines for the kinetic functional form, whether
the network output defines the mass or the inverse of the mass matrix.
net_kwargs: Any keyword arguments to pass down to the networks.
mass_eps: The additional weight of the identity added to the mass matrix,
when relevant.
integrator_method: What method to use for integrating the system.
steps_per_dt: How many internal steps per a single `dt` step to do.
ode_int_kwargs: Extra arguments when using "implicit" integrator method.
use_scan: Whether to use `lax.scan` for explicit integrators.
feature_axis: The number of the features axis in the inputs.
features_extra_dims: If the inputs have extra features (like spatial for
convolutions) this specifies how many of them there are.
network_creation_func: A function that creates the networks. Should have a
signature `network_creation_func(output_dims, name, **net_kwargs)`.
name: The name of this Haiku module.
"""
super().__init__(name=name)
if input_space not in ("velocity", "momentum"):
raise ValueError("input_space must be either velocity or momentum.")
if simulation_space not in ("velocity", "momentum"):
raise ValueError("simulation_space must be either velocity or momentum.")
if potential_func_form not in ("separable_net", "dep_net", "embed_quad"):
raise ValueError("The potential network can be only a network.")
if kinetic_func_form not in ("separable_net", "dep_net", "pure_quad",
"matrix_diag_quad", "matrix_quad",
"matrix_dep_diag_quad", "matrix_dep_quad",
"embed_quad", "matrix_dep_diag_embed_quad",
"matrix_dep_embed_quad"):
raise ValueError(f"Unrecognized kinetic func form {kinetic_func_form}.")
if input_space != simulation_space:
if kinetic_func_form not in (
"pure_quad", "matrix_diag_quad", "matrix_quad",
"matrix_dep_diag_quad", "matrix_dep_quad"):
raise ValueError(
"When the input and simulation space are not the same, it is "
"possible to simulate the physical system only if kinetic_func_form"
" is one of pure_quad, matrix_diag_quad, matrix_quad, "
"matrix_dep_diag_quad, matrix_dep_quad. In all other cases one can"
"not compute analytically the form of the functional (Lagrangian or"
" Hamiltonian) from the other.")
if feature_axis != -1:
raise ValueError("Currently we only support features_axis=-1.")
if integrator_method is None:
if simulation_space == "velocity":
integrator_method = "rk2"
else:
integrator_method = "leap_frog"
if features_extra_dims is None:
if net_kwargs["net_type"] == "mlp":
features_extra_dims = 0
elif net_kwargs["net_type"] == "conv":
features_extra_dims = 2
else:
raise NotImplementedError()
ode_int_kwargs = dict(ode_int_kwargs or {})
ode_int_kwargs.setdefault("rtol", 1e-6)
ode_int_kwargs.setdefault("atol", 1e-6)
ode_int_kwargs.setdefault("mxstep", 50)
self.system_dim = system_dim
self.input_space = input_space
self.simulation_space = simulation_space
self.potential_func_form = potential_func_form
self.kinetic_func_form = kinetic_func_form
self.parametrize_mass_matrix = parametrize_mass_matrix
self.features_axis = feature_axis
self.features_extra_dims = features_extra_dims
self.integrator_method = integrator_method
self.steps_per_dt = steps_per_dt
self.ode_int_kwargs = ode_int_kwargs
self.net_kwargs = net_kwargs
self.mass_eps = mass_eps
self.use_scan = use_scan
self.name = name
self.potential_net = network_creation_func(
output_dims=1, name="PotentialNet", **net_kwargs)
if kinetic_func_form in ("separable_net", "dep_net"):
self.kinetic_net = network_creation_func(
output_dims=1, name="KineticNet", **net_kwargs)
else:
self.kinetic_net = None
if kinetic_func_form in ("matrix_dep_quad", "matrix_dep_embed_quad"):
output_dims = (system_dim * (system_dim + 1)) // 2
name = "MatrixNet" if parametrize_mass_matrix else "InvMatrixNet"
self.mass_matrix_net = network_creation_func(
output_dims=output_dims, name=name, **net_kwargs)
elif kinetic_func_form in ("matrix_dep_diag_quad",
"matrix_dep_diag_embed_quad",
"matrix_dep_embed_quad"):
name = "MatrixNet" if parametrize_mass_matrix else "InvMatrixNet"
self.mass_matrix_net = network_creation_func(
output_dims=system_dim, name=name, **net_kwargs)
else:
self.mass_matrix_net = None
if kinetic_func_form in ("embed_quad", "matrix_dep_diag_embed_quad",
"matrix_dep_embed_quad"):
self.kinetic_embed_net = network_creation_func(
output_dims=system_dim, name="KineticEmbed", **net_kwargs)
else:
self.kinetic_embed_net = None
def sum_per_dim_energy(self, energy: jnp.ndarray) -> jnp.ndarray:
"""Sums the per dimension energy."""
axis = [-i-1 for i in range(self.features_extra_dims + 1)]
return jnp.sum(energy, axis=axis)
def feature_matrix_vector(self, m, v):
"""A utility function to compute the product of a matrix and vector in the features axis."""
v = jnp.expand_dims(v, axis=self.features_axis-1)
return jnp.sum(m * v, axis=self.features_axis)
def mass_matrix_mul(
self,
q: jnp.ndarray,
v: jnp.ndarray,
**kwargs
) -> jnp.ndarray:
"""Computes the product of the mass matrix with a vector and throws an error if not applicable."""
if self.kinetic_func_form in ("separable_net", "dep_net"):
raise ValueError("It is not possible to compute `M q_dot` when using a "
"network for the kinetic energy.")
if self.kinetic_func_form in ("pure_quad", "embed_quad"):
return v
if self.kinetic_func_form == "matrix_diag_quad":
if self.parametrize_mass_matrix:
m_diag_log = hk.get_parameter("MassMatrixDiagLog",
shape=[self.system_dim],
init=hk.initializers.Constant(0.0))
m_diag = jnp.exp(m_diag_log) + self.mass_eps
else:
m_inv_diag_log = hk.get_parameter("InvMassMatrixDiagLog",
shape=[self.system_dim],
init=hk.initializers.Constant(0.0))
m_diag = 1.0 / (jnp.exp(m_inv_diag_log) + self.mass_eps)
return m_diag * v
if self.kinetic_func_form == "matrix_quad":
if self.parametrize_mass_matrix:
m_triu = hk.get_parameter("MassMatrixU",
shape=[self.system_dim, self.system_dim],
init=hk.initializers.Identity())
m_triu = jnp.triu(m_triu)
m = jnp.matmul(m_triu.T, m_triu)
m = m + self.mass_eps * jnp.eye(self.system_dim)
return self.feature_matrix_vector(m, v)
else:
m_inv_triu = hk.get_parameter("InvMassMatrixU",
shape=[self.system_dim, self.system_dim],
init=hk.initializers.Identity())
m_inv_triu = jnp.triu(m_inv_triu)
m_inv = jnp.matmul(m_inv_triu.T, m_inv_triu)
m_inv = m_inv + self.mass_eps * jnp.eye(self.system_dim)
solve = jnp.linalg.solve
for _ in range(v.ndim + 1 - m_inv.ndim):
solve = jax.vmap(solve, in_axes=(None, 0))
return solve(m_inv, v)
if self.kinetic_func_form in ("matrix_dep_diag_quad",
"matrix_dep_diag_embed_quad"):
if self.parametrize_mass_matrix:
m_diag_log = self.mass_matrix_net(q, **kwargs)
m_diag = jnp.exp(m_diag_log) + self.mass_eps
else:
m_inv_diag_log = self.mass_matrix_net(q, **kwargs)
m_diag = 1.0 / (jnp.exp(m_inv_diag_log) + self.mass_eps)
return m_diag * v
if self.kinetic_func_form in ("matrix_dep_quad",
"matrix_dep_embed_quad"):
if self.parametrize_mass_matrix:
m_triu = self.mass_matrix_net(q, **kwargs)
m_triu = utils.triu_matrix_from_v(m_triu, self.system_dim)
m = jnp.matmul(jnp.swapaxes(m_triu, -1, -2), m_triu)
m = m + self.mass_eps * jnp.eye(self.system_dim)
return self.feature_matrix_vector(m, v)
else:
m_inv_triu = self.mass_matrix_net(q, **kwargs)
m_inv_triu = utils.triu_matrix_from_v(m_inv_triu, self.system_dim)
m_inv = jnp.matmul(jnp.swapaxes(m_inv_triu, -1, -2), m_inv_triu)
m_inv = m_inv + self.mass_eps * jnp.eye(self.system_dim)
return jnp.linalg.solve(m_inv, v)
raise NotImplementedError()
def mass_matrix_inv_mul(
self,
q: jnp.ndarray,
v: jnp.ndarray,
**kwargs
) -> jnp.ndarray:
"""Computes the product of the inverse mass matrix with a vector."""
if self.kinetic_func_form in ("separable_net", "dep_net"):
raise ValueError("It is not possible to compute `M^-1 p` when using a "
"network for the kinetic energy.")
if self.kinetic_func_form in ("pure_quad", "embed_quad"):
return v
if self.kinetic_func_form == "matrix_diag_quad":
if self.parametrize_mass_matrix:
m_diag_log = hk.get_parameter("MassMatrixDiagLog",
shape=[self.system_dim],
init=hk.initializers.Constant(0.0))
m_inv_diag = 1.0 / (jnp.exp(m_diag_log) + self.mass_eps)
else:
m_inv_diag_log = hk.get_parameter("InvMassMatrixDiagLog",
shape=[self.system_dim],
init=hk.initializers.Constant(0.0))
m_inv_diag = jnp.exp(m_inv_diag_log) + self.mass_eps
return m_inv_diag * v
if self.kinetic_func_form == "matrix_quad":
if self.parametrize_mass_matrix:
m_triu = hk.get_parameter("MassMatrixU",
shape=[self.system_dim, self.system_dim],
init=hk.initializers.Identity())
m_triu = jnp.triu(m_triu)
m = jnp.matmul(m_triu.T, m_triu)
m = m + self.mass_eps * jnp.eye(self.system_dim)
solve = jnp.linalg.solve
for _ in range(v.ndim + 1 - m.ndim):
solve = jax.vmap(solve, in_axes=(None, 0))
return solve(m, v)
else:
m_inv_triu = hk.get_parameter("InvMassMatrixU",
shape=[self.system_dim, self.system_dim],
init=hk.initializers.Identity())
m_inv_triu = jnp.triu(m_inv_triu)
m_inv = jnp.matmul(m_inv_triu.T, m_inv_triu)
m_inv = m_inv + self.mass_eps * jnp.eye(self.system_dim)
return self.feature_matrix_vector(m_inv, v)
if self.kinetic_func_form in ("matrix_dep_diag_quad",
"matrix_dep_diag_embed_quad"):
if self.parametrize_mass_matrix:
m_diag_log = self.mass_matrix_net(q, **kwargs)
m_inv_diag = 1.0 / (jnp.exp(m_diag_log) + self.mass_eps)
else:
m_inv_diag_log = self.mass_matrix_net(q, **kwargs)
m_inv_diag = jnp.exp(m_inv_diag_log) + self.mass_eps
return m_inv_diag * v
if self.kinetic_func_form in ("matrix_dep_quad",
"matrix_dep_embed_quad"):
if self.parametrize_mass_matrix:
m_triu = self.mass_matrix_net(q, **kwargs)
m_triu = utils.triu_matrix_from_v(m_triu, self.system_dim)
m = jnp.matmul(jnp.swapaxes(m_triu, -2, -1), m_triu)
m = m + self.mass_eps * jnp.eye(self.system_dim)
return jnp.linalg.solve(m, v)
else:
m_inv_triu = self.mass_matrix_net(q, **kwargs)
m_inv_triu = utils.triu_matrix_from_v(m_inv_triu, self.system_dim)
m_inv = jnp.matmul(jnp.swapaxes(m_inv_triu, -2, -1), m_inv_triu)
m_inv = m_inv + self.mass_eps * jnp.eye(self.system_dim)
return self.feature_matrix_vector(m_inv, v)
raise NotImplementedError()
def momentum_from_velocity(
self,
q: jnp.ndarray,
q_dot: jnp.ndarray,
**kwargs
) -> jnp.ndarray:
"""Computes the momentum from position and velocity."""
def local_lagrangian(q_dot_):
# We take the sum so we can easily take gradients
return jnp.sum(self.lagrangian(
phase_space.PhaseSpace(q, q_dot_), **kwargs))
return jax.grad(local_lagrangian)(q_dot)
def velocity_from_momentum(
self,
q: jnp.ndarray,
p: jnp.ndarray,
**kwargs
) -> jnp.ndarray:
"""Computes the velocity from position and momentum."""
def local_hamiltonian(p_):
# We take the sum so we can easily take gradients
return jnp.sum(self.hamiltonian(
phase_space.PhaseSpace(q, p_), **kwargs))
return jax.grad(local_hamiltonian)(p)
def kinetic_energy_velocity(
self,
q: jnp.ndarray,
q_dot: jnp.ndarray,
**kwargs
) -> jnp.ndarray:
"""Computes the kinetic energy in velocity coordinates."""
if self.kinetic_func_form in ("separable_net", "dep_net"):
if self.input_space != "velocity":
raise ValueError("Can not evaluate the Kinetic energy from velocity, "
"when the input space is momentum and "
"kinetic_func_form is separable_net or dep_net.")
if self.kinetic_func_form == "separable_net":
s = q_dot
else:
s = jnp.concatenate([q, q_dot], axis=-1)
per_dim_energy = self.kinetic_net(s, **kwargs)
else:
if self.kinetic_embed_net is not None:
if self.input_space != "velocity":
raise ValueError("Can not evaluate the Kinetic energy from velocity, "
"when the input space is momentum and "
"kinetic_func_form is embed_quad, "
"matrix_dep_diag_embed_quad or "
"matrix_dep_embed_quad.")
q_dot = self.kinetic_embed_net(q_dot, **kwargs)
m_q_dot = self.mass_matrix_mul(q, q_dot, **kwargs)
per_dim_energy = q_dot * m_q_dot / 2
return self.sum_per_dim_energy(per_dim_energy)
def kinetic_energy_momentum(
self,
q: jnp.ndarray,
p: jnp.ndarray,
**kwargs
) -> jnp.ndarray:
"""Computes the kinetic energy in momentum coordinates."""
if self.kinetic_func_form in ("separable_net", "dep_net"):
if self.input_space != "momentum":
raise ValueError("Can not evaluate the Kinetic energy from momentum, "
"when the input space is velocity and "
"kinetic_func_form is separable_net or dep_net.")
if self.kinetic_func_form == "separable_net":
s = p
else:
s = jnp.concatenate([q, p], axis=-1)
per_dim_energy = self.kinetic_net(s, **kwargs)
else:
if self.kinetic_embed_net is not None:
if self.input_space != "momentum":
raise ValueError("Can not evaluate the Kinetic energy from momentum, "
"when the input space is velocity and "
"kinetic_func_form is embed_quad, "
"matrix_dep_diag_embed_quad or "
"matrix_dep_embed_quad.")
p = self.kinetic_embed_net(p, **kwargs)
m_inv_p = self.mass_matrix_inv_mul(q, p, **kwargs)
per_dim_energy = p * m_inv_p / 2
return self.sum_per_dim_energy(per_dim_energy)
def potential_energy_velocity(
self,
q: jnp.ndarray,
q_dot: jnp.ndarray,
**kwargs
) -> jnp.ndarray:
"""Computes the potential energy in velocity coordinates."""
if self.potential_func_form == "separable_net":
per_dim_energy = self.potential_net(q, **kwargs)
elif self.input_space != "momentum":
raise ValueError("Can not evaluate the Potential energy from velocity, "
"when the input space is momentum and "
"potential_func_form is dep_net.")
else:
s = jnp.concatenate([q, q_dot], axis=-1)
per_dim_energy = self.potential_net(s, **kwargs)
return self.sum_per_dim_energy(per_dim_energy)
def potential_energy_momentum(
self,
q: jnp.ndarray,
p: jnp.ndarray,
**kwargs
) -> jnp.ndarray:
"""Computes the potential energy in momentum coordinates."""
if self.potential_func_form == "separable_net":
per_dim_energy = self.potential_net(q, **kwargs)
elif self.input_space != "momentum":
raise ValueError("Can not evaluate the Potential energy from momentum, "
"when the input space is velocity and "
"potential_func_form is dep_net.")
else:
s = jnp.concatenate([q, p], axis=-1)
per_dim_energy = self.potential_net(s, **kwargs)
return self.sum_per_dim_energy(per_dim_energy)
def hamiltonian(
self,
s: phase_space.PhaseSpace,
**kwargs
) -> jnp.ndarray:
"""Computes the Hamiltonian in momentum coordinates."""
potential = self.potential_energy_momentum(s.q, s.p, **kwargs)
kinetic = self.kinetic_energy_momentum(s.q, s.p, **kwargs)
# Sanity check
assert potential.shape == kinetic.shape
return kinetic + potential
def lagrangian(
self,
s: phase_space.PhaseSpace,
**kwargs
) -> jnp.ndarray:
"""Computes the Lagrangian in velocity coordinates."""
potential = self.potential_energy_velocity(s.q, s.p, **kwargs)
kinetic = self.kinetic_energy_velocity(s.q, s.p, **kwargs)
# Sanity check
assert potential.shape == kinetic.shape
return kinetic - potential
def energy_from_momentum(
self,
s: phase_space.PhaseSpace,
**kwargs
) -> jnp.ndarray:
"""Computes the energy of the system in momentum coordinates."""
return self.hamiltonian(s, **kwargs)
def energy_from_velocity(
self,
s: phase_space.PhaseSpace,
**kwargs
) -> jnp.ndarray:
"""Computes the energy of the system in velocity coordinates."""
q, q_dot = s.q, s.p
p = self.momentum_from_velocity(q, q_dot, **kwargs)
q_dot_p = jnp.sum(q_dot * p, self.features_axis)
return q_dot_p - self.lagrangian(s, **kwargs)
def velocity_and_acceleration(
self,
q: jnp.ndarray,
q_dot: jnp.ndarray,
**kwargs
) -> phase_space.TangentPhaseSpace:
"""Computes the velocity and acceleration of the system in velocity coordinates."""
def local_lagrangian(*q_and_q_dot):
# We take the sum so we can easily take gradients
return jnp.sum(self.lagrangian(
phase_space.PhaseSpace(*q_and_q_dot), **kwargs))
grad_q = jax.grad(local_lagrangian, 0)(q, q_dot)
grad_q_dot_func = jax.grad(local_lagrangian, 1)
_, grad_q_dot_grad_q_times_q_dot = jax.jvp(grad_q_dot_func, (q, q_dot),
(q_dot, jnp.zeros_like(q_dot)))
pre_acc_vector = grad_q - grad_q_dot_grad_q_times_q_dot
if self.kinetic_func_form in ("pure_quad", "matrix_diag_quad",
"matrix_quad", "matrix_dep_diag_quad",
"matrix_dep_quad"):
q_dot_dot = self.mass_matrix_inv_mul(q, pre_acc_vector, **kwargs)
else:
hess_q_dot = jax.vmap(jax.hessian(local_lagrangian, 1))(q, q_dot)
q_dot_dot = jnp.linalg.solve(hess_q_dot, pre_acc_vector)
return phase_space.TangentPhaseSpace(q_dot, q_dot_dot)
def simulate(
self,
y0: phase_space.PhaseSpace,
dt: Union[float, jnp.ndarray],
num_steps_forward: int,
num_steps_backward: int,
include_y0: bool,
return_stats: bool = True,
**nets_kwargs
) -> _PhysicsSimulationOutput:
"""Simulates the continuous dynamics of the physical system.
Args:
y0: Initial state of the system.
dt: The size of the time intervals at which to evolve the system.
num_steps_forward: Number of steps to make into the future.
num_steps_backward: Number of steps to make into the past.
include_y0: Whether to include the initial state in the result.
return_stats: Whether to return additional statistics.
**nets_kwargs: Keyword arguments to pass to the networks.
Returns:
* The state of the system evolved as many steps as specified by the
arguments into the past and future, all in chronological order.
* Optionally return a dictionary of additional statistics. For the moment
this only returns the energy of the system at each evaluation point.
"""
# Define the dynamics
if self.simulation_space == "velocity":
dy_dt = lambda t_, y: self.velocity_and_acceleration( # pylint: disable=g-long-lambda
y.q, y.p, **nets_kwargs)
# Special Haiku magic to avoid tracer issues
if hk.running_init():
return self.lagrangian(y0, **nets_kwargs)
else:
hamiltonian = lambda t_, y: self.hamiltonian(y, **nets_kwargs)
dy_dt = phase_space.poisson_bracket_with_q_and_p(hamiltonian)
if hk.running_init():
return self.hamiltonian(y0, **nets_kwargs)
# Optionally switch coordinate frame
if self.input_space == "velocity" and self.simulation_space == "momentum":
p = self.momentum_from_velocity(y0.q, y0.p, **nets_kwargs)
y0 = phase_space.PhaseSpace(y0.q, p)
if self.input_space == "momentum" and self.simulation_space == "velocity":
q_dot = self.velocity_from_momentum(y0.q, y0.p, **nets_kwargs)
y0 = phase_space.PhaseSpace(y0.q, q_dot)
yt = integrators.solve_ivp_dt_two_directions(
fun=dy_dt,
y0=y0,
t0=0.0,
dt=dt,
method=self.integrator_method,
num_steps_forward=num_steps_forward,
num_steps_backward=num_steps_backward,
include_y0=include_y0,
steps_per_dt=self.steps_per_dt,
ode_int_kwargs=self.ode_int_kwargs
)
# Make time axis second
yt = jax.tree_map(lambda x: jnp.swapaxes(x, 0, 1), yt)
# Compute energies for the full trajectory
yt_energy = jax.tree_map(utils.merge_first_dims, yt)
if self.simulation_space == "momentum":
energy = self.energy_from_momentum(yt_energy, **nets_kwargs)
else:
energy = self.energy_from_velocity(yt_energy, **nets_kwargs)
energy = energy.reshape(yt.q.shape[:2])
# Optionally switch back to input coordinate frame
if self.input_space == "velocity" and self.simulation_space == "momentum":
q_dot = self.velocity_from_momentum(yt.q, yt.p, **nets_kwargs)
yt = phase_space.PhaseSpace(yt.q, q_dot)
if self.input_space == "momentum" and self.simulation_space == "velocity":
p = self.momentum_from_velocity(yt.q, yt.p, **nets_kwargs)
yt = phase_space.PhaseSpace(yt.q, p)
# Compute energy deficit
t = energy.shape[-1]
non_zero_diffs = float((t * (t - 1)) // 2)
energy_deficits = jnp.abs(energy[..., None, :] - energy[..., None])
avg_deficit = jnp.sum(energy_deficits, axis=(-2, -1)) / non_zero_diffs
max_deficit = jnp.max(energy_deficits)
# Return the states and energies
if return_stats:
return yt, dict(avg_energy_deficit=avg_deficit,
max_energy_deficit=max_deficit)
else:
return yt
def __call__(self, *args, **kwargs):
return self.simulate(*args, **kwargs)
class OdeNetwork(hk.Module):
"""A simple haiku module for constructing a NeuralODE."""
def __init__(
self,
system_dim: int,
net_kwargs: Mapping[str, Any],
integrator_method: Optional[str] = None,
steps_per_dt: int = 1,
ode_int_kwargs: Optional[Mapping[str, float]] = None,
use_scan: bool = True,
network_creation_func=networks.make_flexible_net,
name: Optional[str] = None,
):
super().__init__(name=name)
ode_int_kwargs = dict(ode_int_kwargs or {})
ode_int_kwargs.setdefault("rtol", 1e-6)
ode_int_kwargs.setdefault("atol", 1e-6)
ode_int_kwargs.setdefault("mxstep", 50)
self.system_dim = system_dim
self.integrator_method = integrator_method or "adaptive"
self.steps_per_dt = steps_per_dt
self.ode_int_kwargs = ode_int_kwargs
self.net_kwargs = net_kwargs
self.use_scan = use_scan
self.core = network_creation_func(
output_dims=system_dim, name="Net", **net_kwargs)
def simulate(
self,
y0: jnp.ndarray,
dt: Union[float, jnp.ndarray],
num_steps_forward: int,
num_steps_backward: int,
include_y0: bool,
return_stats: bool = True,
**nets_kwargs
) -> Union[jnp.ndarray, Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]]:
"""Simulates the continuous dynamics of the ODE specified by the network.
Args:
y0: Initial state of the system.
dt: The size of the time intervals at which to evolve the system.
num_steps_forward: Number of steps to make into the future.
num_steps_backward: Number of steps to make into the past.
include_y0: Whether to include the initial state in the result.
return_stats: Whether to return additional statistics.
**nets_kwargs: Keyword arguments to pass to the networks.
Returns:
* The state of the system evolved as many steps as specified by the
arguments into the past and future, all in chronological order.
* Optionally return a dictionary of additional statistics. For the moment
this is just an empty dictionary.
"""
if hk.running_init():
return self.core(y0, **nets_kwargs)
yt = integrators.solve_ivp_dt_two_directions(
fun=lambda t, y: self.core(y, **nets_kwargs),
y0=y0,
t0=0.0,
dt=dt,
method=self.integrator_method,
num_steps_forward=num_steps_forward,
num_steps_backward=num_steps_backward,
include_y0=include_y0,
steps_per_dt=self.steps_per_dt,
ode_int_kwargs=self.ode_int_kwargs
)
# Make time axis second
yt = jax.tree_map(lambda x: jnp.swapaxes(x, 0, 1), yt)
if return_stats:
return yt, dict()
else:
return yt
def __call__(self, *args, **kwargs):
return self.simulate(*args, **kwargs)
class DiscreteDynamicsNetwork(hk.Module):
"""A simple haiku module for constructing a discrete dynamics network."""
def __init__(
self,
system_dim: int,
residual: bool,
net_kwargs: Mapping[str, Any],
use_scan: bool = True,
network_creation_func=networks.make_flexible_net,
name: Optional[str] = None,
):
super().__init__(name=name)
self.system_dim = system_dim
self.residual = residual
self.net_kwargs = net_kwargs
self.use_scan = use_scan
self.core = network_creation_func(
output_dims=system_dim, name="Net", **net_kwargs)
def simulate(
self,
y0: jnp.ndarray,
num_steps_forward: int,
include_y0: bool,
return_stats: bool = True,
**nets_kwargs
) -> Union[jnp.ndarray, Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]]:
"""Simulates the dynamics of the discrete system.
Args:
y0: Initial state of the system.
num_steps_forward: Number of steps to make into the future.
include_y0: Whether to include the initial state in the result.
return_stats: Whether to return additional statistics.
**nets_kwargs: Keyword arguments to pass to the networks.
Returns:
* The state of the system evolved as many steps as specified by the
arguments into the past and future, all in chronological order.
* Optionally return a dictionary of additional statistics. For the moment
this is just an empty dictionary.
"""
if num_steps_forward < 0:
raise ValueError("It is required to unroll at least one step.")
nets_kwargs.pop("dt", None)
nets_kwargs.pop("num_steps_backward", None)
if hk.running_init():
return self.core(y0, **nets_kwargs)
def step(*args):
y, _ = args
if self.residual:
y_next = y + self.core(y, **nets_kwargs)
else:
y_next = self.core(y, **nets_kwargs)
return y_next, y_next
if self.use_scan:
_, yt = jax.lax.scan(step, init=y0, xs=None, length=num_steps_forward)
if include_y0:
yt = jnp.concatenate([y0[None], yt], axis=0)
# Make time axis second
yt = jax.tree_map(lambda x: jnp.swapaxes(x, 0, 1), yt)
else:
yt = [y0]
for _ in range(num_steps_forward):
yt.append(step(yt[-1], None)[0])
if not include_y0:
yt = yt[1:]
if len(yt) == 1:
yt = yt[0][:, None]
else:
yt = jax.tree_map(lambda args: jnp.stack(args, 1), yt)
if return_stats:
return yt, dict()
else:
return yt
def __call__(self, *args, **kwargs):
return self.simulate(*args, **kwargs)
| deepmind-research-master | physics_inspired_models/models/dynamics.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Episodic Memory and Synthetic Returns Core Wrapper modules."""
import collections
import haiku as hk
import jax
import jax.numpy as jnp
SRCoreWrapperOutput = collections.namedtuple(
"SRCoreWrapperOutput", ["output", "synthetic_return", "augmented_return",
"sr_loss"])
class EpisodicMemory(hk.RNNCore):
"""Episodic Memory module."""
def __init__(self, memory_size, capacity, name="episodic_memory"):
"""Constructor.
Args:
memory_size: Integer. The size of the vectors to be stored.
capacity: Integer. The maximum number of memories to store before it
becomes necessary to overwrite old memories.
name: String. A name for this Haiku module instance.
"""
super().__init__(name=name)
self._memory_size = memory_size
self._capacity = capacity
def __call__(self, inputs, prev_state):
"""Writes a new memory into the episodic memory.
Args:
inputs: A Tensor of shape ``[batch_size, memory_size]``.
prev_state: The previous state of the episodic memory, which is a tuple
with a (i) counter of shape ``[batch_size, 1]`` indicating how many
memories have been written so far, and (ii) a tensor of shape
``[batch_size, capacity, memory_size]`` with the full content of the
episodic memory.
Returns:
A tuple with (i) a tensor of shape ``[batch_size, capacity, memory_size]``
with the full content of the episodic memory, including the newly
written memory, and (ii) the new state of the episodic memory.
"""
inputs = jax.lax.stop_gradient(inputs)
counter, memories = prev_state
counter_mod = jnp.mod(counter, self._capacity)
slot_selector = jnp.expand_dims(
jax.nn.one_hot(counter_mod, self._capacity), axis=2)
memories = memories * (1 - slot_selector) + (
slot_selector * jnp.expand_dims(inputs, 1))
counter = counter + 1
return memories, (counter, memories)
def initial_state(self, batch_size):
"""Creates the initial state of the episodic memory.
Args:
batch_size: Integer. The batch size of the episodic memory.
Returns:
A tuple with (i) a counter of shape ``[batch_size, 1]`` and (ii) a tensor
of shape ``[batch_size, capacity, memory_size]`` with the full content
of the episodic memory.
"""
if batch_size is None:
shape = []
else:
shape = [batch_size]
counter = jnp.zeros(shape)
memories = jnp.zeros(shape + [self._capacity, self._memory_size])
return (counter, memories)
class SyntheticReturnsCoreWrapper(hk.RNNCore):
"""Synthetic Returns core wrapper."""
def __init__(self, core, memory_size, capacity, hidden_layers, alpha, beta,
loss_func=(lambda x, y: 0.5 * jnp.square(x - y)),
apply_core_to_input=False, name="synthetic_returns_wrapper"):
"""Constructor.
Args:
core: hk.RNNCore. The recurrent core of the agent. E.g. an LSTM.
memory_size: Integer. The size of the vectors to be stored in the episodic
memory.
capacity: Integer. The maximum number of memories to store before it
becomes necessary to overwrite old memories.
hidden_layers: Tuple or list of integers, indicating the size of the
hidden layers of the MLPs used to produce synthetic returns, current
state bias, and gate.
alpha: The multiplier of the synthetic returns term in the augmented
return.
beta: The multiplier of the environment returns term in the augmented
return.
loss_func: A function of two arguments (predictions and targets) to
compute the SR loss.
apply_core_to_input: Boolean. Whether to apply the core on the inputs. If
true, the synthetic returns will be computed from the outputs of the
RNN core passed to the constructor. If false, the RNN core will be
applied only at the output of this wrapper, and the synthetic returns
will be computed from the inputs.
name: String. A name for this Haiku module instance.
"""
super().__init__(name=name)
self._em = EpisodicMemory(memory_size, capacity)
self._capacity = capacity
hidden_layers = list(hidden_layers)
self._synthetic_return = hk.nets.MLP(hidden_layers + [1])
self._bias = hk.nets.MLP(hidden_layers + [1])
self._gate = hk.Sequential([
hk.nets.MLP(hidden_layers + [1]),
jax.nn.sigmoid,
])
self._apply_core_to_input = apply_core_to_input
self._core = core
self._alpha = alpha
self._beta = beta
self._loss = loss_func
def initial_state(self, batch_size):
return (
self._em.initial_state(batch_size),
self._core.initial_state(batch_size)
)
def __call__(self, inputs, prev_state):
current_input, return_target = inputs
em_state, core_state = prev_state
(counter, memories) = em_state
if self._apply_core_to_input:
current_input, core_state = self._core(current_input, core_state)
# Synthetic return for the current state
synth_return = jnp.squeeze(self._synthetic_return(current_input), -1)
# Current state bias term
bias = self._bias(current_input)
# Gate computed from current state
gate = self._gate(current_input)
# When counter > capacity, mask will be all ones
mask = 1 - jnp.cumsum(jax.nn.one_hot(counter, self._capacity), axis=1)
mask = jnp.expand_dims(mask, axis=2)
# Synthetic returns for each state in memory
past_synth_returns = hk.BatchApply(self._synthetic_return)(memories)
# Sum of synthetic returns from previous states
sr_sum = jnp.sum(past_synth_returns * mask, axis=1)
prediction = jnp.squeeze(sr_sum * gate + bias, -1)
sr_loss = self._loss(prediction, return_target)
augmented_return = jax.lax.stop_gradient(
self._alpha * synth_return + self._beta * return_target)
# Write current state to memory
_, em_state = self._em(current_input, em_state)
if not self._apply_core_to_input:
output, core_state = self._core(current_input, core_state)
else:
output = current_input
output = SRCoreWrapperOutput(
output=output,
synthetic_return=synth_return,
augmented_return=augmented_return,
sr_loss=sr_loss,
)
return output, (em_state, core_state)
| deepmind-research-master | synthetic_returns/synthetic_returns.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Memory & Planning Game environment."""
import string
import dm_env
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
class MemoryPlanningGame(dm_env.Environment):
"""Memory & Planning Game environment."""
ACTION_NAMES = ['Up', 'Down', 'Left', 'Right', 'Collect']
NUM_ACTIONS = len(ACTION_NAMES)
DIRECTIONS = [
(0, 1), # Up
(0, -1), # Down
(-1, 0), # Left
(1, 0), # Right
(0, 0), # Collect
]
def __init__(self,
maze_size=4,
max_episode_steps=100,
target_reward=1.,
per_step_reward=0.,
random_respawn=False,
seed=None):
"""The Memory & Planning Game environment.
Args:
maze_size: (int) size of the maze dimension.
max_episode_steps: (int) number of steps per episode.
target_reward: (float) reward value of the target.
per_step_reward: (float) reward/cost of taking a step.
random_respawn: (bool) whether the agent respawns in a random location
upon collecting the goal.
seed: (int or None) seed for random number generator.
"""
self._maze_size = maze_size
self._num_labels = maze_size * maze_size
# The graph itself is the same across episodes, but the node labels will be
# randomly sampled in each episode.
self._graph = nx.grid_2d_graph(
self._maze_size, self._maze_size, periodic=True)
self._max_episode_steps = max_episode_steps
self._target_reward = target_reward
self._per_step_reward = per_step_reward
self._random_respawn = random_respawn
self._rng = np.random.RandomState(seed)
def _one_hot(self, node):
one_hot_vector = np.zeros([self._num_labels], dtype=np.int32)
one_hot_vector[self._labels[node]] = 1
return one_hot_vector
def step(self, action):
# If previous step was the last step of an episode, reset.
if self._needs_reset:
return self.reset()
# Increment step count and check if it's the last step of the episode.
self._episode_steps += 1
if self._episode_steps >= self._max_episode_steps:
self._needs_reset = True
transition = dm_env.termination
else:
transition = dm_env.transition
# Recompute agent's position given the selected action.
direction = self.DIRECTIONS[action]
self._position = tuple(
(np.array(self._position) + np.array(direction)) % self._maze_size)
self._previous_action = self.ACTION_NAMES[action]
# Get reward if agent is over the goal location and the selected action is
# `collect`.
if self._position == self._goal and self.ACTION_NAMES[action] == 'Collect':
reward = self._target_reward
self._set_new_goal()
else:
reward = self._per_step_reward
self._episode_reward += reward
return transition(reward, self._observation())
def _observation(self):
return {
'position': np.array(self._one_hot(self.position), dtype=np.int32),
'goal': np.array(self._one_hot(self.goal), dtype=np.int32),
}
def observation_spec(self):
return {
'position': dm_env.specs.Array(
shape=(self._num_labels,), dtype=np.int32, name='position'),
'goal': dm_env.specs.Array(
shape=(self._num_labels,), dtype=np.int32, name='goal'),
}
def action_spec(self):
return dm_env.specs.DiscreteArray(self.NUM_ACTIONS)
def take_random_action(self):
return self.step(self._rng.randint(self.NUM_ACTIONS))
def reset(self):
self._previous_action = ''
self._episode_reward = 0.
self._episode_steps = 0
self._needs_reset = False
random_labels = self._rng.permutation(self._num_labels)
self._labels = {n: random_labels[i]
for i, n in enumerate(self._graph.nodes())}
self._respawn()
self._set_new_goal()
return dm_env.restart(self._observation())
def _respawn(self):
random_idx = self._rng.randint(self._num_labels)
self._position = list(self._graph.nodes())[random_idx]
def _set_new_goal(self):
if self._random_respawn:
self._respawn()
goal = self._position
while goal == self._position:
random_idx = self._rng.randint(self._num_labels)
goal = list(self._graph.nodes())[random_idx]
self._goal = goal
@property
def position(self):
return self._position
@property
def goal(self):
return self._goal
@property
def previous_action(self):
return self._previous_action
@property
def episode_reward(self):
return self._episode_reward
def draw_maze(self, ax=None):
if ax is None:
plt.figure()
ax = plt.gca()
node_positions = {(x, y): (x, y) for x, y in self._graph.nodes()}
letters = string.ascii_uppercase + string.ascii_lowercase
labels = {n: letters[self._labels[n]] for n in self._graph.nodes()}
node_list = list(self._graph.nodes())
colors = []
for n in node_list:
if n == self.position:
colors.append('lightblue')
elif n == self.goal:
colors.append('lightgreen')
else:
colors.append('pink')
nx.draw(self._graph, pos=node_positions, nodelist=node_list, ax=ax,
node_color=colors, with_labels=True, node_size=200, labels=labels)
ax.set_title('{}\nEpisode reward={:.1f}'.format(
self.previous_action, self.episode_reward))
ax.margins(.1)
return plt.gcf(), ax
| deepmind-research-master | rapid_task_solving/memory_planning_game.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""One-shot StreetLearn environment."""
import dm_env
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
def deg_to_rad(x):
"""Convert degrees to radians."""
return x / 180. * np.pi
def rad_to_deg(x):
"""Convert radians to degrees."""
return x * 180. / np.pi
class OneShotStreetLearn(dm_env.Environment):
"""One-shot Streetlearn environment."""
ACTION_NAMES = [
'Forward',
'Left',
'Right',
'Collect',
]
NUM_ACTIONS = len(ACTION_NAMES)
def __init__(self, dataset_path, max_episode_steps, num_junctions=8,
target_reward=1., per_step_reward=0., observation_length=60,
seed=None):
self._graph = nx.read_gexf(dataset_path)
self._node_attrs = self._graph.nodes(data=True)
self._num_junctions = num_junctions
self._observation_length = observation_length
self._max_episode_steps = max_episode_steps
self._target_reward = target_reward
self._per_step_reward = per_step_reward
self._rng = np.random.RandomState(seed)
self.reset()
def reset(self):
self._previous_action = ''
self._episode_reward = 0.
self._episode_steps = 0
self._needs_reset = False
self._subgraph = self.get_random_subgraph()
self._observation_map = self.randomize_observations(self._subgraph)
self._position = self._rng.choice(list(self._subgraph.nodes()))
neighbours = self._neighbors_bearings(self._subgraph, self._position)
self._neighbour = neighbours[self._rng.randint(len(neighbours))]
self._set_new_goal()
return dm_env.restart(self._observation())
@property
def _current_edge(self):
return (self._position, self._neighbour['neighbour'])
def _set_new_goal(self):
goal = None
edges = list(self._observation_map.keys())
while goal is None or goal == self._current_edge:
goal = edges[self._rng.randint(len(edges))]
self._goal = goal
def _one_hot(self, edge):
one_hot_vector = np.zeros([self._observation_length], dtype=np.int32)
one_hot_vector[self._observation_map[edge]] = 1
return one_hot_vector
def _observation(self):
return {
'position': np.array(self._one_hot(self._current_edge), dtype=np.int32),
'goal': np.array(self._one_hot(self._goal), dtype=np.int32),
}
def observation_spec(self):
return {
'position': dm_env.specs.Array(
shape=(self._observation_length,), dtype=np.int32, name='position'),
'goal': dm_env.specs.Array(
shape=(self._observation_length,), dtype=np.int32, name='goal'),
}
def action_spec(self):
return dm_env.specs.DiscreteArray(self.NUM_ACTIONS)
def step(self, action):
# If previous step was the last step of an episode, reset.
if self._needs_reset:
return self.reset()
# Increment step count and check if it's the last step of the episode.
self._episode_steps += 1
if self._episode_steps >= self._max_episode_steps:
self._needs_reset = True
transition = dm_env.termination
else:
transition = dm_env.transition
# Recompute agent's position
self._move(action)
self._previous_action = self.ACTION_NAMES[action]
# Get reward if agent is at the goal location and the selected action is
# `collect`.
if (self._current_edge == self._goal and
self.ACTION_NAMES[action] == 'Collect'):
reward = self._target_reward
self._set_new_goal()
else:
reward = self._per_step_reward
self._episode_reward += reward
return transition(reward, self._observation())
def randomize_observations(self, subgraph):
edges = list(subgraph.edges())
edges.extend([(y, x) for (x, y) in edges])
obs_permutation = self._rng.permutation(self._observation_length)
return {e: obs_permutation[i] for i, e in enumerate(edges)}
def _calculate_bearing(self, node, neighbor):
lat1 = deg_to_rad(self._node_attrs[node]['lat'])
lng1 = deg_to_rad(self._node_attrs[node]['lng'])
lat2 = deg_to_rad(self._node_attrs[neighbor]['lat'])
lng2 = deg_to_rad(self._node_attrs[neighbor]['lng'])
delta_lng = lng2 - lng1
theta = np.arctan2(
np.sin(delta_lng) * np.cos(lat2),
np.cos(lat1) * np.sin(lat2) -
np.sin(lat1) * np.cos(lat2) * np.cos(delta_lng))
return theta
def _neighbors_bearings(self, subgraph, node):
bearings = []
for neighbor in list(subgraph[node]):
orientation = self._calculate_bearing(node, neighbor)
bearings.append({'neighbour': neighbor, 'orientation': orientation})
bearings.sort(key=lambda x: x['orientation'])
return bearings
def _sort_neighbors(self, node, neighbour):
bearings = self._neighbors_bearings(self._subgraph, node)
bs = [x['orientation'] for x in bearings]
idx = np.argmin(np.abs(bs - neighbour['orientation']))
return {
'forward': bearings[idx],
'right': bearings[idx-1],
'left': bearings[(idx+1) % len(bearings)],
}
def _move(self, action):
neighbours = self._sort_neighbors(self._position, self._neighbour)
if action == 0:
new_node = self._neighbour['neighbour']
neighbours = self._sort_neighbors(new_node, neighbours['forward'])
new_neighbour = neighbours['forward']
else:
new_node = self._position
if action == 1:
new_neighbour = neighbours['left']
elif action == 2:
new_neighbour = neighbours['right']
else:
new_neighbour = self._neighbour
self._position = new_node
self._neighbour = new_neighbour
def _all_next_junctions(self, subgraph, node):
neighbors = list(subgraph[node])
edges = [self._get_next_junction(subgraph, node, nb) for nb in neighbors]
nodes = [y for (_, y) in edges]
return nodes, edges
def _get_next_junction(self, subgraph, initial_node, next_node):
node = initial_node
while subgraph.degree(next_node) == 2:
neighbours = list(subgraph.neighbors(next_node))
neighbours.remove(node)
node = next_node
next_node = neighbours.pop()
return (initial_node, next_node)
def get_random_subgraph(self):
graph = self._graph
num_nodes = len(graph)
rnd_index = self._rng.randint(num_nodes)
center_node = list(graph.nodes())[rnd_index]
while graph.degree(center_node) <= 2:
rnd_index = self._rng.randint(num_nodes)
center_node = list(graph.nodes())[rnd_index]
to_visit = [center_node]
visited = []
subgraph = nx.Graph()
while to_visit:
node = to_visit.pop(0)
visited.append(node)
new_nodes, new_edges = self._all_next_junctions(graph, node)
subgraph.add_edges_from(new_edges)
node_degrees = [subgraph.degree(n) for n in subgraph.nodes()]
count_junctions = len(list(filter(lambda x: x > 2, node_degrees)))
if count_junctions >= self._num_junctions:
break
new_nodes = filter(lambda x: x not in visited + to_visit, new_nodes)
to_visit.extend(new_nodes)
return subgraph
def draw_subgraph(self, ax=None):
if ax is None:
_ = plt.figure(figsize=(3, 3))
ax = plt.gca()
node_ids = list(self._subgraph.nodes())
pos = {
x: (self._node_attrs[x]['lat'], self._node_attrs[x]['lng'])
for x in node_ids
}
labels = {}
nc = 'pink'
ec = 'black'
ns = 50
nshape = 'o'
# Draw the current subgraph
nx.draw(self._subgraph, pos=pos, node_color=nc, with_labels=False,
node_size=ns, labels=labels, edgecolors=ec, node_shape=nshape,
ax=ax)
max_xy = np.array([np.array(x) for x in pos.values()]).max(0)
min_xy = np.array([np.array(x) for x in pos.values()]).min(0)
delta_xy = (max_xy - min_xy) / 6.
ax.set_xlim([min_xy[0] - delta_xy[0], max_xy[0] + delta_xy[0]])
ax.set_ylim([min_xy[1] - delta_xy[1], max_xy[1] + delta_xy[1]])
# Draw goal position and orientation
x = self._node_attrs[self._goal[0]]['lat']
y = self._node_attrs[self._goal[0]]['lng']
rotation = rad_to_deg(self._calculate_bearing(*self._goal))
_ = ax.plot(x, y, marker=(3, 0, rotation - 90), color=(0, 0, 0),
markersize=14, markerfacecolor='white')
_ = ax.plot(x, y, marker=(2, 0, rotation - 90), color=(0, 0, 0),
markersize=12, markerfacecolor='None')
# Draw current position and orientation
x = self._node_attrs[self._position]['lat']
y = self._node_attrs[self._position]['lng']
rotation = rad_to_deg(self._neighbour['orientation'])
_ = ax.plot(x, y, marker=(3, 0, rotation - 90), color=(0, 0, 0),
markersize=14, markerfacecolor='lightgreen')
_ = ax.plot(x, y, marker=(2, 0, rotation - 90), color=(0, 0, 0),
markersize=12, markerfacecolor='None')
ax.set_title('{}\nEpisode reward = {}'.format(
self._previous_action, self._episode_reward))
return plt.gcf(), ax
| deepmind-research-master | rapid_task_solving/one_shot_streetlearn.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Submission to Unrestricted Adversarial Challenge."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
from unrestricted_advex import eval_kit
def _preprocess_image(image):
image = tf.image.central_crop(image, central_fraction=0.875)
image = tf.image.resize_bilinear(image, [224, 224], align_corners=False)
return image
def test_preprocess(image):
image = _preprocess_image(image)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def main():
g = tf.Graph()
with g.as_default():
input_tensor = tf.placeholder(tf.float32, (None, 224, 224, 3))
x_np = test_preprocess(input_tensor)
raw_module_1 = hub.Module(
"https://tfhub.dev/deepmind/llr-pretrain-adv/latents/1")
raw_module_2 = hub.Module(
"https://tfhub.dev/deepmind/llr-pretrain-adv/linear/1")
latents = raw_module_1(dict(inputs=x_np, decay_rate=0.1))
logits = raw_module_2(dict(inputs=latents))
logits = tf.squeeze(logits, axis=[1, 2])
two_class_logits = tf.concat([tf.nn.relu(-logits[:, 1:]),
tf.nn.relu(logits[:, 1:])], axis=1)
sess = tf.train.SingularMonitoredSession()
def model(x_np):
return sess.run(two_class_logits, feed_dict={input_tensor: x_np})
eval_kit.evaluate_bird_or_bicycle_model(model, model_name="llr_resnet")
if __name__ == "__main__":
main()
| deepmind-research-master | unrestricted_advx/main.py |
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for DM21 functionals interface to PySCF."""
from setuptools import setup
REQUIRED_PACKAGES = [
'absl-py',
'attrs',
# Note PySCF 1.7.6 and older do not support h5py 3.3.0:
# https://github.com/pyscf/pyscf/issues/1016
'h5py',
'numpy',
# Note DM21 functionals are compatible with PySCF 1.7.6 if an older version
# of h5py is used.
'pyscf>=2.0',
'tensorflow',
'tensorflow_hub',
]
CHECKPOINT_DATA = ['checkpoints/DM21*/*.pb', 'checkpoints/DM21*/variables/*']
setup(
name='density_functional_approximation_dm21',
version='0.1',
description='An interface to PySCF for the DM21 functionals.',
url='https://github.com/deepmind/deepmind-research/density_functional_approximation_dm21',
author='DeepMind',
author_email='[email protected]',
# Contained modules and scripts.
packages=['density_functional_approximation_dm21'],
package_data={
'density_functional_approximation_dm21': CHECKPOINT_DATA,
},
scripts=['density_functional_approximation_dm21/export_saved_model.py'],
install_requires=REQUIRED_PACKAGES,
platforms=['any'],
license='Apache 2.0',
extras_require={'testing': ['pytest', 'scipy']},
)
| deepmind-research-master | density_functional_approximation_dm21/setup.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Computation of the Hartree-Fock exchange density.
We consider two types of potential:
1. Coulomb potential v(r,r') = 1/|r-r'|, which results in the full HF exchange
density and energy.
2. Screened (long-range) Coulomb potential v(r,r') = erf(\omega|r-r'|)/|r-r'|,
which results in the screened HF exchange density energy.
Note that PySCF and libcint treat a value of omega=0 to correspond to the
Coulomb potential. In the following, HF refers to full HF exchange if the
Coulomb potential is used and to screened HF exchange if the screened Coulomb
potential is used.
The Hartree-Fock (HF) exchange energy can be written as:
-2 HF_x = \sum_{a,b,c,d} D_{ab} D_{cd} \int dr \int dr'
[ \chi_a(r) \chi_c(r) v(r, r') \chi_b(r') \chi_d(r') ]
where D is the density matrix, \chi_a the atomic basis functions and r, r' are
coordinates. For clarity we have dropped the spin-channel label of the density
matrix.
Defining the following intermediates:
\nu_{bd}(r) = \int dr' (\chi_b(r') v(r, r') \chi_d(r'))
E_b(r) = \sum_a D_{ab} \chi_a(r)
we get the following expression for HF:
-2 HF_x = \int dr \sum_{bd} E_b(r) E_d(r) \nu_{bd}(r)
Therefore the quantity
exx(r) = -0.5 sum_{bd} E_b(r) E_d(r) \nu_{bd}(r)
represents an energy density at location r which integrates to the HF exchange
energy.
The Fock matrix, F, is the derivative of the energy with respect to the density
matrix. If the energy depends upon the set of features {x}, then the Fock matrix
can be evaluated as \sum_x dE/dx dx/dD_{ab}. The derivatives with respect to the
features can be easily evaluated using automatic differentiation. We hence
require the derivative of exx with respect to the density matrix:
dexx(r)/dD_{ab} = -D_{cd} \chi_a(r) \chi_c(r) \nu_{bd}(r)
This is too large to store, so we instead compute the following intermediate,
and evaluate the derivative as required on the fly:
fxx_a(r) = D_{bd} \chi_a(r) \nu_{bd}(r)
Note: we compute exx and fxx for each spin channel for both restricted and
unrestricted calculations.
"""
from typing import Generator, Optional, Tuple, Union
import attr
import numpy as np
from pyscf.dft import numint
from pyscf.gto import mole
from pyscf.lib import logger
from pyscf.lib import numpy_helper
def _evaluate_nu_slow(mol: mole.Mole,
coords: np.ndarray,
omega: float,
hermi: int = 1) -> np.ndarray:
"""Computes nu integrals for given coordinates using a slow loop."""
nu = []
# Use the Gaussian nuclear model in int1e_rinv_sph to evaluate the screened
# integrals.
with mol.with_rinv_zeta(zeta=omega * omega):
# This is going to be slow...
for coord in coords:
with mol.with_rinv_origin(coord):
nu.append(mol.intor('int1e_rinv_sph', hermi=hermi))
return np.asarray(nu)
def _evaluate_nu(mol: mole.Mole,
coords: np.ndarray,
omega: float,
hermi: int = 1) -> np.ndarray:
"""Computes nu integrals for given coordinates."""
try:
with mol.with_range_coulomb(omega=omega):
# grids keyword argument supported in pyscf 2.0.0-alpha.
nu = mol.intor('int1e_grids_sph', hermi=hermi, grids=coords) # pytype: disable=wrong-keyword-args
except TypeError:
logger.info(
mol, 'Support for int1e_grids not found (requires libcint 4.4.1 and '
'pyscf 2.0.0a or later. Falling back to slow loop over individual grid '
'points.')
nu = _evaluate_nu_slow(mol, coords, omega)
return nu
def _nu_chunk(mol: mole.Mole,
coords: np.ndarray,
omega: float,
chunk_size: int = 1000
) -> Generator[Tuple[int, int, np.ndarray], None, None]:
r"""Yields chunks of nu integrals over the grid.
Args:
mol: pyscf Mole object.
coords: coordinates, r', at which to evaluate the nu integrals, shape (N,3).
omega: range separation parameter. A value of 0 disables range-separation
(i.e. uses the kernel v(r,r') = 1/|r-r'| instead of
v(r,r') = erf(\omega |r-r'|) / |r-r'|)
chunk_size: number of coordinates to evaluate the integrals at a time.
Yields:
start_index, end_index, nu_{ab}(r) where
start_index, end_index are indices into coords,
nu is an array of shape (end_index-start_index, nao, nao), where nao is
the number of atomic orbitals and contains
nu_{ab}(r) = <a(r')|v(r,r')| b(r')>, where a,b are atomic
orbitals and r' are the grid coordinates in coords[start_index:end_index].
Raises:
ValueError: if omega is negative.
"""
if omega < 0:
raise ValueError('Range-separated parameter omega must be non-negative!')
ncoords = len(coords)
for chunk_index in range(0, ncoords, chunk_size):
end_index = min(chunk_index + chunk_size, ncoords)
coords_chunk = coords[chunk_index:end_index]
nu_chunk = _evaluate_nu(mol, coords_chunk, omega=omega)
yield chunk_index, end_index, nu_chunk
def _compute_exx_block(nu: np.ndarray,
e: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
r"""Computes exx and fxx.
Args:
nu: batch of <i|v(r,r_k)|j> integrals, in format (k,i,j) where r_k is the
position of the k-th grid point, i and j label atomic orbitals.
e: density matrix in the AO basis at each grid point.
Returns:
exx and fxx, where
fxx_{gb} =\sum_c nu_{gbc} e_{gc} and
exx_{g} = -0.5 \sum_b e_{gb} fxx_{gb}.
"""
fxx = np.einsum('gbc,gc->gb', nu, e)
exx = -0.5 * np.einsum('gb,gb->g', e, fxx)
return exx, fxx
def _compute_jk_block(nu: np.ndarray, fxx: np.ndarray, dm: np.ndarray,
ao_value: np.ndarray,
weights: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Computes J and K contributions from the given block of nu integrals."""
batch_size = nu.shape[0]
vj = numpy_helper.dot(nu.reshape(batch_size, -1), dm.reshape(-1, 1))
vj = np.squeeze(vj)
vj_ao = np.einsum('g,gb->gb', vj * weights, ao_value)
j = numpy_helper.dot(ao_value.T, vj_ao)
w_ao = np.einsum('g,gb->gb', weights, ao_value)
k = numpy_helper.dot(fxx.T, w_ao)
return j, k
@attr.s(auto_attribs=True)
class HFDensityResult:
r"""Container for results returned by get_hf_density.
Note that the kernel used in all integrals is defined by the omega input
argument.
Attributes:
exx: exchange energy density at position r on the grid for the alpha, beta
spin channels. Each array is shape (N), where N is the number of grid
points.
fxx: intermediate for evaluating dexx/dD^{\sigma}_{ab}, where D is the
density matrix and \sigma is the spin coordinate. See top-level docstring
for details. Each array is shape (N, nao), where nao is the number of
atomic orbitals.
coulomb: coulomb matrix (restricted calculations) or matrices (unrestricted
calculations). Each array is shape (nao, nao).
Restricted calculations: \sum_{} D_{cd} (ab|cd)
Unrestricted calculations: \sum_{} D^{\sigma}_{cd} (ab|cd)
exchange: exchange matrix (restricted calculations) or matrices
(unrestricted calculations). Each array is shape (nao, nao).
Restricted calculations: \sum_{} D_{cd} (ab|cd)
Unrestricted calculations: \sum_{} D^{\sigma}_{cd} (ac|bd).
"""
exx: Tuple[np.ndarray, np.ndarray]
fxx: Optional[Tuple[np.ndarray, np.ndarray]] = None
coulomb: Optional[Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]] = None
exchange: Optional[Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]] = None
def get_hf_density(
mol: mole.Mole,
dm: Union[Tuple[np.ndarray, np.ndarray], np.ndarray],
coords: np.ndarray,
omega: float = 0.,
deriv: int = 0,
ao: Optional[np.ndarray] = None,
chunk_size: int = 1000,
weights: Optional[np.ndarray] = None,
) -> HFDensityResult:
r"""Computes the (range-separated) HF energy density.
Args:
mol: PySCF molecule.
dm: The density matrix. For restricted calculations, an array of shape
(M, M), where M is the number of atomic orbitals. For unrestricted
calculations, either an array of shape (2, M, M) or a tuple of arrays,
each of shape (M, M), where dm[0] is the density matrix for the alpha
electrons and dm[1] the density matrix for the beta electrons.
coords: The coordinates to compute the HF density at, shape (N, 3), where N
is the number of grid points.
omega: The inverse width of the error function. An omega of 0. means range
separation and a 1/|r-R| kernel is used in the nu integrals. Otherwise,
the kernel erf(\omega|r-R|)/|r-R|) is used. Must be non-negative.
deriv: The derivative order. Only first derivatives (deriv=1) are currently
implemented. deriv=0 indicates no derivatives are required.
ao: The atomic orbitals evaluated on the grid, shape (N, M). These are
computed if not supplied.
chunk_size: The number of coordinates to compute the HF density for at once.
Reducing this saves memory since we don't have to keep as many Nus (nbasis
x nbasis) in memory at once.
weights: weight of each grid point, shape (N). If present, the Coulomb and
exchange matrices are also computed semi-numerically, otherwise only the
HF density and (if deriv=1) its first derivative are computed.
Returns:
HFDensityResult object with the HF density (exx), the derivative of the HF
density with respect to the density (fxx) if deriv is 1, and the Coulomb and
exchange matrices if the weights argument is provided.
Raises:
NotImplementedError: if a Cartesian basis set is used or if deriv is greater
than 1.
ValueError: if omega or deriv are negative.
"""
if mol.cart:
raise NotImplementedError('Local HF exchange is not implmented for basis '
'sets with Cartesian functions!')
if deriv < 0:
raise ValueError(f'`deriv` must be non-negative, got {deriv}')
if omega < 0:
raise ValueError(f'`omega` must be non-negative, got {omega}')
if deriv > 1:
raise NotImplementedError('Higher order derivatives are not implemented.')
if isinstance(dm, tuple) or dm.ndim == 3:
dma, dmb = dm
restricted = False
else:
dma = dm / 2
dmb = dm / 2
restricted = True
logger.info(mol, 'Computing contracted density matrix ...')
if ao is None:
ao = numint.eval_ao(mol, coords, deriv=0)
e_a = np.dot(ao, dma)
e_b = np.dot(ao, dmb)
exxa = []
exxb = []
fxxa = []
fxxb = []
ja = np.zeros_like(dma)
jb = np.zeros_like(dmb)
ka = np.zeros_like(dma)
kb = np.zeros_like(dmb)
for start, end, nu in _nu_chunk(mol, coords, omega, chunk_size=chunk_size):
logger.info(mol, 'Computing exx %s / %s ...', end, len(e_a))
exxa_block, fxxa_block = _compute_exx_block(nu, e_a[start:end])
exxa.extend(exxa_block)
if not restricted:
exxb_block, fxxb_block = _compute_exx_block(nu, e_b[start:end])
exxb.extend(exxb_block)
if deriv == 1:
fxxa.extend(fxxa_block)
if not restricted:
fxxb.extend(fxxb_block)
if weights is not None:
ja_block, ka_block = _compute_jk_block(nu, fxxa_block, dma, ao[start:end],
weights[start:end])
ja += ja_block
ka += ka_block
if not restricted:
jb_block, kb_block = _compute_jk_block(nu, fxxb_block, dmb,
ao[start:end],
weights[start:end])
jb += jb_block
kb += kb_block
exxa = np.asarray(exxa)
fxxa = np.asarray(fxxa)
if restricted:
exxb = exxa
fxxb = fxxa
else:
exxb = np.asarray(exxb)
fxxb = np.asarray(fxxb)
result = HFDensityResult(exx=(exxa, exxb))
if deriv == 1:
result.fxx = (fxxa, fxxb)
if weights is not None:
if restricted:
result.coulomb = 2 * ja
result.exchange = 2 * ka
else:
result.coulomb = (ja, jb)
result.exchange = (ka, kb)
return result
| deepmind-research-master | density_functional_approximation_dm21/density_functional_approximation_dm21/compute_hfx_density.py |
#!/usr/bin/env python3
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper for exporting a functional and its derivatives to a saved_model."""
from typing import Sequence
from absl import app
from absl import flags
from density_functional_approximation_dm21 import neural_numint
_OUT_DIR = flags.DEFINE_string(
'out_dir', None, 'Output directory.', required=True)
_BATCH_SIZE = flags.DEFINE_integer(
'batch_size',
1000,
'Number of grid points exported functional will process in a single call.',
lower_bound=0)
_FUNCTIONAL = flags.DEFINE_enum_class('functional',
neural_numint.Functional.DM21,
neural_numint.Functional,
'Functional to export.')
def export(
functional: neural_numint.Functional,
export_path: str,
batch_dim: int,
) -> None:
"""Export a functional and its derivatives to a single saved_model.
Args:
functional: functional to export.
export_path: path to saved the model to.
batch_dim: number of grid points to process in a single call.
"""
ni = neural_numint.NeuralNumInt(functional)
ni.export_functional_and_derivatives(
export_path=export_path, batch_dim=batch_dim)
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
export(_FUNCTIONAL.value, _OUT_DIR.value, _BATCH_SIZE.value)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | density_functional_approximation_dm21/density_functional_approximation_dm21/export_saved_model.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for neural_numint."""
import os
from absl.testing import parameterized
import attr
from pyscf import dft
from pyscf import gto
from pyscf import lib
import tensorflow.compat.v1 as tf
from density_functional_approximation_dm21 import neural_numint
class NeuralNumintTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
lib.param.TMPDIR = None
lib.num_threads(1)
# Golden values were obtained using the version of PySCF (including integral
# generation) reported in the DM21 paper.
@parameterized.parameters(
{
'functional': neural_numint.Functional.DM21,
'expected_energy': -126.898521
},
{
'functional': neural_numint.Functional.DM21m,
'expected_energy': -126.907332
},
{
'functional': neural_numint.Functional.DM21mc,
'expected_energy': -126.922127
},
{
'functional': neural_numint.Functional.DM21mu,
'expected_energy': -126.898178
},
)
def test_rks(self, functional, expected_energy):
ni = neural_numint.NeuralNumInt(functional)
mol = gto.Mole()
mol.atom = [['Ne', 0., 0., 0.]]
mol.basis = 'sto-3g'
mol.build()
mf = dft.RKS(mol)
mf.small_rho_cutoff = 1.e-20
mf._numint = ni
mf.run()
self.assertAlmostEqual(mf.e_tot, expected_energy, delta=2.e-4)
@parameterized.parameters(
{
'functional': neural_numint.Functional.DM21,
'expected_energy': -37.34184876
},
{
'functional': neural_numint.Functional.DM21m,
'expected_energy': -37.3377766
},
{
'functional': neural_numint.Functional.DM21mc,
'expected_energy': -37.33489173
},
{
'functional': neural_numint.Functional.DM21mu,
'expected_energy': -37.34015315
},
)
def test_uks(self, functional, expected_energy):
ni = neural_numint.NeuralNumInt(functional)
mol = gto.Mole()
mol.atom = [['C', 0., 0., 0.]]
mol.spin = 2
mol.basis = 'sto-3g'
mol.build()
mf = dft.UKS(mol)
mf.small_rho_cutoff = 1.e-20
mf._numint = ni
mf.run()
self.assertAlmostEqual(mf.e_tot, expected_energy, delta=2.e-4)
def test_exported_model(self):
mol = gto.Mole()
mol.atom = [['C', 0., 0., 0.]]
mol.spin = 2
mol.basis = 'sto-3g'
mol.build()
ni = neural_numint.NeuralNumInt(neural_numint.Functional.DM21)
mf = dft.UKS(mol)
mf.small_rho_cutoff = 1.e-20
mf._numint = ni
mf.run()
dms = mf.make_rdm1()
ao = ni.eval_ao(mol, mf.grids.coords, deriv=2)
rho_a = ni.eval_rho(mol, ao, dms[0], xctype='MGGA')
rho_b = ni.eval_rho(mol, ao, dms[1], xctype='MGGA')
inputs, _ = ni.construct_functional_inputs(
mol=mol,
dms=dms,
spin=1,
coords=mf.grids.coords,
weights=mf.grids.weights,
rho=(rho_a, rho_b),
ao=ao[0])
feed_dict = dict(
zip(
attr.asdict(ni._placeholders).values(),
attr.asdict(inputs).values(),
))
with ni._graph.as_default():
outputs = ni._session.run(
{
'vxc': ni._vxc,
'vrho': ni._vrho,
'vsigma': ni._vsigma,
'vtau': ni._vtau,
'vhf': ni._vhf
},
feed_dict=feed_dict)
export_path = os.path.join(self.get_temp_dir(), 'export')
ni.export_functional_and_derivatives(export_path)
model = tf.saved_model.load_v2(export_path)
tensor_inputs = {
k: tf.constant(v, dtype=tf.float32)
for k, v in attr.asdict(inputs).items()
}
exported_output_tensors = model.signatures['default'](**tensor_inputs)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
exported_outputs = session.run(exported_output_tensors)
self.assertAllClose(outputs, exported_outputs, atol=5.e-5, rtol=1.e-5)
if __name__ == '__main__':
tf.test.main()
| deepmind-research-master | density_functional_approximation_dm21/density_functional_approximation_dm21/neural_numint_test.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An interface to DM21 family of exchange-correlation functionals for PySCF."""
from density_functional_approximation_dm21.neural_numint import Functional
from density_functional_approximation_dm21.neural_numint import NeuralNumInt
| deepmind-research-master | density_functional_approximation_dm21/density_functional_approximation_dm21/__init__.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An interface to DM21 family of exchange-correlation functionals for PySCF."""
import enum
import os
from typing import Generator, Optional, Sequence, Tuple, Union
import attr
import numpy as np
from pyscf import dft
from pyscf import gto
from pyscf.dft import numint
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
from density_functional_approximation_dm21 import compute_hfx_density
tf.disable_v2_behavior()
# TODO(b/196260242): avoid depending upon private function
_dot_ao_ao = numint._dot_ao_ao # pylint: disable=protected-access
@enum.unique
class Functional(enum.Enum):
"""Enum for exchange-correlation functionals in the DM21 family.
Attributes:
DM21: trained on molecules dataset, and fractional charge, and fractional
spin constraints.
DM21m: trained on molecules dataset.
DM21mc: trained on molecules dataset, and fractional charge constraints.
DM21mu: trained on molecules dataset, and electron gas constraints.
"""
# Break pylint's preferred naming pattern to match the functional names used
# in the paper.
# pylint: disable=invalid-name
DM21 = enum.auto()
DM21m = enum.auto()
DM21mc = enum.auto()
DM21mu = enum.auto()
# pylint: enable=invalid-name
# We use attr.s instead of here instead of dataclasses.dataclass as
# dataclasses.asdict returns a deepcopy of the attributes. This is wasteful in
# memory if they are large and breaks (as in the case of tf.Tensors) if they are
# not serializable. attr.asdict does not perform this copy and so works with
# both np.ndarrays and tf.Tensors.
@attr.s(auto_attribs=True)
class FunctionalInputs:
r""""Inputs required for DM21 functionals.
Depending upon the context, this is either a set of numpy arrays (feature
construction) or TF tensors (constructing placeholders/running functionals).
Attributes:
rho_a: Density information for the alpha electrons.
PySCF for meta-GGAs supplies a single array for the total density
(restricted calculations) and a pair of arrays, one for each spin channel
(unrestricted calculations).
Each array/tensor is of shape (6, N) and contains the density and density
derivatives, where:
rho(0, :) - density at each grid point
rho(1, :) - norm of the derivative of the density at each grid point
along x
rho(2, :) - norm of the derivative of the density at each grid point
along y
rho(3, :) - norm of the derivative of the density at each grid point
along z
rho(4, :) - \nabla^2 \rho [not used]
rho(5, :) - tau (1/2 (\nabla \rho)^2) at each grid point.
See pyscf.dft.numint.eval_rho for more details.
We require separate inputs for both alpha- and beta-spin densities, even
in restricted calculations (where rho_a = rho_b = rho/2, where rho is the
total density).
rho_b: as for rho_a for the beta electrons.
hfx_a: local Hartree-Fock energy density at each grid point for the alpha-
spin density for each value of omega. Shape [N, len(omega_values)].
See compute_hfx_density for more details.
hfx_b: as for hfx_a for the beta-spin density.
grid_coords: grid coordinates at which to evaluate the density. Shape
(N, 3), where N is the number of grid points. Note that this is currently
unused by the functional, but is still a required input.
grid_weights: weight of each grid point. Shape (N).
"""
rho_a: Union[tf.Tensor, np.ndarray]
rho_b: Union[tf.Tensor, np.ndarray]
hfx_a: Union[tf.Tensor, np.ndarray]
hfx_b: Union[tf.Tensor, np.ndarray]
grid_coords: Union[tf.Tensor, np.ndarray]
grid_weights: Union[tf.Tensor, np.ndarray]
@attr.s(auto_attribs=True)
class _GridState:
"""Internal state required for the numerical grid.
Attributes:
coords: coordinates of the grid. Shape (N, 3), where N is the number of grid
points.
weight: weight associated with each grid point. Shape (N).
mask: mask indicating whether a shell is zero at a grid point. Shape
(N, nbas) where nbas is the number of shells in the basis set. See
pyscf.dft.gen_grids.make_mask.
ao: atomic orbitals evaluated on the grid. Shape (N, nao), where nao is the
number of atomic orbitals, or shape (:, N, nao), where the 0-th element
contains the ao values, the next three elements contain the first
derivatives, and so on.
"""
coords: np.ndarray
weight: np.ndarray
mask: np.ndarray
ao: np.ndarray
@attr.s(auto_attribs=True)
class _SystemState:
"""Internal state required for system of interest.
Attributes:
mol: PySCF molecule
dms: density matrix or matrices (unrestricted calculations only).
Restricted calculations: shape (nao, nao), where nao is the number of
atomic orbitals.
Unrestricted calculations: shape (2, nao, nao) or a sequence (length 2) of
arrays of shape (nao, nao), and dms[0] and dms[1] are the density matrices
of the alpha and beta electrons respectively.
"""
mol: gto.Mole
dms: Union[np.ndarray, Sequence[np.ndarray]]
def _get_number_of_density_matrices(dms):
"""Returns the number of density matrices in dms."""
# See pyscf.numint.NumInt._gen_rho_evaluator
if isinstance(dms, np.ndarray) and dms.ndim == 2:
return 1
return len(dms)
class NeuralNumInt(numint.NumInt):
"""A wrapper around pyscf.dft.numint.NumInt for the DM21 functionals.
In order to supply the local Hartree-Fock features required for the DM21
functionals, we lightly wrap the NumInt class. The actual evaluation of the
exchange-correlation functional is performed in NeuralNumInt.eval_xc.
Usage:
mf = dft.RKS(...) # dft.ROKS and dft.UKS are also supported.
# Specify the functional by monkey-patching mf._numint rather than using
# mf._xc or mf._define_xc_.
mf._numint = NeuralNumInt(Functional.DM21)
mf.kernel()
"""
def __init__(self,
functional: Functional,
*,
checkpoint_path: Optional[str] = None):
"""Constructs a NeuralNumInt object.
Args:
functional: member of Functional enum giving the name of the
functional.
checkpoint_path: Optional path to specify the directory containing the
checkpoints of the DM21 family of functionals. If not specified, attempt
to find the checkpoints using a path relative to the source code.
"""
self._functional_name = functional.name
if checkpoint_path:
self._model_path = os.path.join(checkpoint_path, self._functional_name)
else:
self._model_path = os.path.join(
os.path.dirname(__file__), 'checkpoints', self._functional_name)
# All DM21 functionals use local Hartree-Fock features with a non-range
# separated 1/r kernel and a range-seperated kernel with \omega = 0.4.
# Note an omega of 0.0 is interpreted by PySCF and libcint to indicate no
# range-separation.
self._omega_values = [0.0, 0.4]
self._graph = tf.Graph()
with self._graph.as_default():
self._build_graph()
self._session = tf.Session()
self._session.run(tf.global_variables_initializer())
self._grid_state = None
self._system_state = None
self._vmat_hf = None
super().__init__()
def _build_graph(self, batch_dim: Optional[int] = None):
"""Builds the TensorFlow graph for evaluating the functional.
Args:
batch_dim: the batch dimension of the grid to use in the model. Default:
None (determine at runtime). This should only be set if building a model
in order to export and ahead-of-time compile it into a standalone
library.
"""
self._functional = hub.Module(spec=self._model_path)
grid_coords = tf.placeholder(
tf.float32, shape=[batch_dim, 3], name='grid_coords')
grid_weights = tf.placeholder(
tf.float32, shape=[batch_dim], name='grid_weights')
# Density information.
rho_a = tf.placeholder(tf.float32, shape=[6, batch_dim], name='rho_a')
rho_b = tf.placeholder(tf.float32, shape=[6, batch_dim], name='rho_b')
# Split into corresponding terms.
rho_only_a, grad_a_x, grad_a_y, grad_a_z, _, tau_a = tf.unstack(
rho_a, axis=0)
rho_only_b, grad_b_x, grad_b_y, grad_b_z, _, tau_b = tf.unstack(
rho_b, axis=0)
# Evaluate |\del \rho|^2 for each spin density and for the total density.
norm_grad_a = (grad_a_x**2 + grad_a_y**2 + grad_a_z**2)
norm_grad_b = (grad_b_x**2 + grad_b_y**2 + grad_b_z**2)
grad_x = grad_a_x + grad_b_x
grad_y = grad_a_y + grad_b_y
grad_z = grad_a_z + grad_b_z
norm_grad = (grad_x**2 + grad_y**2 + grad_z**2)
# The local Hartree-Fock energy densities at each grid point for the alpha-
# and beta-spin densities for each value of omega.
# Note an omega of 0 indicates no screening of the Coulomb potential.
hfxa = tf.placeholder(
tf.float32, shape=[batch_dim, len(self._omega_values)], name='hfxa')
hfxb = tf.placeholder(
tf.float32, shape=[batch_dim, len(self._omega_values)], name='hfxb')
# Make all features 2D arrays on input for ease of handling inside the
# functional.
features = {
'grid_coords': grid_coords,
'grid_weights': tf.expand_dims(grid_weights, 1),
'rho_a': tf.expand_dims(rho_only_a, 1),
'rho_b': tf.expand_dims(rho_only_b, 1),
'tau_a': tf.expand_dims(tau_a, 1),
'tau_b': tf.expand_dims(tau_b, 1),
'norm_grad_rho_a': tf.expand_dims(norm_grad_a, 1),
'norm_grad_rho_b': tf.expand_dims(norm_grad_b, 1),
'norm_grad_rho': tf.expand_dims(norm_grad, 1),
'hfxa': hfxa,
'hfxb': hfxb,
}
tensor_dict = {f'tensor_dict${k}': v for k, v in features.items()}
predictions = self._functional(tensor_dict, as_dict=True)
local_xc = predictions['grid_contribution']
weighted_local_xc = local_xc * grid_weights
unweighted_xc = tf.reduce_sum(local_xc, axis=0)
xc = tf.reduce_sum(weighted_local_xc, axis=0)
# The potential is the local exchange correlation divided by the
# total density. Add a small constant to deal with zero density.
self._vxc = local_xc / (rho_only_a + rho_only_b + 1E-12)
# The derivatives of the exchange-correlation (XC) energy with respect to
# input features. PySCF weights the (standard) derivatives by the grid
# weights, so we need to compute this with respect to the unweighted sum
# over grid points.
self._vrho = tf.gradients(
unweighted_xc, [features['rho_a'], features['rho_b']],
name='GRAD_RHO',
unconnected_gradients=tf.UnconnectedGradients.ZERO)
self._vsigma = tf.gradients(
unweighted_xc, [
features['norm_grad_rho_a'], features['norm_grad_rho_b'],
features['norm_grad_rho']
],
name='GRAD_SIGMA',
unconnected_gradients=tf.UnconnectedGradients.ZERO)
self._vtau = tf.gradients(
unweighted_xc, [features['tau_a'], features['tau_b']],
name='GRAD_TAU',
unconnected_gradients=tf.UnconnectedGradients.ZERO)
# Standard meta-GGAs do not have a dependency on local HF, so we need to
# compute the contribution to the Fock matrix ourselves. Just use the
# weighted XC energy to avoid having to weight this later.
self._vhf = tf.gradients(
xc, [features['hfxa'], features['hfxb']],
name='GRAD_HFX',
unconnected_gradients=tf.UnconnectedGradients.ZERO)
self._placeholders = FunctionalInputs(
rho_a=rho_a,
rho_b=rho_b,
hfx_a=hfxa,
hfx_b=hfxb,
grid_coords=grid_coords,
grid_weights=grid_weights)
outputs = {
'vxc': self._vxc,
'vrho': tf.stack(self._vrho),
'vsigma': tf.stack(self._vsigma),
'vtau': tf.stack(self._vtau),
'vhf': tf.stack(self._vhf),
}
# Create the signature for TF-Hub, including both the energy and functional
# derivatives.
# This is a no-op if _build_graph is called outside of
# hub.create_module_spec.
hub.add_signature(
inputs=attr.asdict(self._placeholders), outputs=outputs)
def export_functional_and_derivatives(
self,
export_path: str,
batch_dim: Optional[int] = None,
):
"""Exports the TensorFlow graph containing the functional and derivatives.
The hub modules supplied contain the TensorFlow operations for the
evaluation of the exchange-correlation energy. Evaluation of the functional
derivatives, required for a self-consistent calculation, are added in
_build_graph. The module created by export_functional_and_derivatives
contains the evaluation of the functional and the functional derivatives.
This is much simpler to use from languages other than Python, e.g. using the
C or C++ TensorFlow API, or using tfcompile to create a standalone C++
library.
Args:
export_path: path to write the Hub model to. The exported model can be
loaded using either TF-Hub or SavedModel APIs.
batch_dim: the batch dimension of the grid to use in the model. Default:
None (determine at runtime). This should only be set if the exported
model is to be ahead-of-time compiled into a standalone library.
"""
with tf.Graph().as_default():
spec = hub.create_module_spec(
self._build_graph, tags_and_args=[(set(), {'batch_dim': batch_dim})])
functional_and_derivatives = hub.Module(spec=spec)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
functional_and_derivatives.export(export_path, session)
# DM21* functionals include the hybrid term directly, so set the
# range-separated and hybrid parameters expected by PySCF to 0 so PySCF
# doesn't also add these contributions in separately.
def rsh_coeff(self, *args):
"""Returns the range separated parameters, omega, alpha, beta."""
return [0.0, 0.0, 0.0]
def hybrid_coeff(self, *args, **kwargs):
"""Returns the fraction of Hartree-Fock exchange to include."""
return 0.0
def _xc_type(self, *args, **kwargs):
return 'MGGA'
def nr_rks(self,
mol: gto.Mole,
grids: dft.Grids,
xc_code: str,
dms: Union[np.ndarray, Sequence[np.ndarray]],
relativity: int = 0,
hermi: int = 0,
max_memory: float = 20000,
verbose=None) -> Tuple[float, float, np.ndarray]:
"""Calculates RKS XC functional and potential matrix on a given grid.
Args:
mol: PySCF molecule.
grids: grid on which to evaluate the functional.
xc_code: XC code. Unused. NeuralNumInt hard codes the XC functional
based upon the functional argument given to the constructor.
dms: the density matrix or sequence of density matrices. Multiple density
matrices are not currently supported. Shape (nao, nao), where nao is the
number of atomic orbitals.
relativity: Unused. (pyscf.numint.NumInt.nr_rks does not currently use
this argument.)
hermi: 0 if the density matrix is Hermitian, 1 if the density matrix is
non-Hermitian.
max_memory: the maximum cache to use, in MB.
verbose: verbosity level. Unused. (PySCF currently does not handle the
verbosity level passed in here.)
Returns:
nelec, excsum, vmat, where
nelec is the number of electrons obtained by numerical integration of
the density matrix.
excsum is the functional's XC energy.
vmat is the functional's XC potential matrix, shape (nao, nao).
Raises:
NotImplementedError: if multiple density matrices are supplied.
"""
# Wrap nr_rks so we can store internal variables required to evaluate the
# contribution to the XC potential from local Hartree-Fock features.
# See pyscf.dft.numint.nr_rks for more details.
ndms = _get_number_of_density_matrices(dms)
if ndms > 1:
raise NotImplementedError(
'NeuralNumInt does not support multiple density matrices. '
'Only ground state DFT calculations are currently implemented.')
nao = mol.nao_nr()
self._vmat_hf = np.zeros((nao, nao))
self._system_state = _SystemState(mol=mol, dms=dms)
nelec, excsum, vmat = super().nr_rks(
mol=mol,
grids=grids,
xc_code=xc_code,
dms=dms,
relativity=relativity,
hermi=hermi,
max_memory=max_memory,
verbose=verbose)
vmat += self._vmat_hf + self._vmat_hf.T
# Clear internal state to prevent accidental re-use.
self._system_state = None
self._grid_state = None
return nelec, excsum, vmat
def nr_uks(self,
mol: gto.Mole,
grids: dft.Grids,
xc_code: str,
dms: Union[Sequence[np.ndarray], Sequence[Sequence[np.ndarray]]],
relativity: int = 0,
hermi: int = 0,
max_memory: float = 20000,
verbose=None) -> Tuple[np.ndarray, float, np.ndarray]:
"""Calculates UKS XC functional and potential matrix on a given grid.
Args:
mol: PySCF molecule.
grids: grid on which to evaluate the functional.
xc_code: XC code. Unused. NeuralNumInt hard codes the XC functional
based upon the functional argument given to the constructor.
dms: the density matrix or sequence of density matrices for each spin
channel. Multiple density matrices for each spin channel are not
currently supported. Each density matrix is shape (nao, nao), where nao
is the number of atomic orbitals.
relativity: Unused. (pyscf.dft.numint.NumInt.nr_rks does not currently use
this argument.)
hermi: 0 if the density matrix is Hermitian, 1 if the density matrix is
non-Hermitian.
max_memory: the maximum cache to use, in MB.
verbose: verbosity level. Unused. (PySCF currently does not handle the
verbosity level passed in here.)
Returns:
nelec, excsum, vmat, where
nelec is the number of alpha, beta electrons obtained by numerical
integration of the density matrix as an array of size 2.
excsum is the functional's XC energy.
vmat is the functional's XC potential matrix, shape (2, nao, nao), where
vmat[0] and vmat[1] are the potential matrices for the alpha and beta
spin channels respectively.
Raises:
NotImplementedError: if multiple density matrices for each spin channel
are supplied.
"""
# Wrap nr_uks so we can store internal variables required to evaluate the
# contribution to the XC potential from local Hartree-Fock features.
# See pyscf.dft.numint.nr_uks for more details.
if isinstance(dms, np.ndarray) and dms.ndim == 2: # RHF DM
ndms = _get_number_of_density_matrices(dms)
else:
ndms = _get_number_of_density_matrices(dms[0])
if ndms > 1:
raise NotImplementedError(
'NeuralNumInt does not support multiple density matrices. '
'Only ground state DFT calculations are currently implemented.')
nao = mol.nao_nr()
self._vmat_hf = np.zeros((2, nao, nao))
self._system_state = _SystemState(mol=mol, dms=dms)
nelec, excsum, vmat = super().nr_uks(
mol=mol,
grids=grids,
xc_code=xc_code,
dms=dms,
relativity=relativity,
hermi=hermi,
max_memory=max_memory,
verbose=verbose)
vmat[0] += self._vmat_hf[0] + self._vmat_hf[0].T
vmat[1] += self._vmat_hf[1] + self._vmat_hf[1].T
# Clear internal state to prevent accidental re-use.
self._system_state = None
self._grid_state = None
self._vmat_hf = None
return nelec, excsum, vmat
def block_loop(
self,
mol: gto.Mole,
grids: dft.Grids,
nao: Optional[int] = None,
deriv: int = 0,
max_memory: float = 2000,
non0tab: Optional[np.ndarray] = None,
blksize: Optional[int] = None,
buf: Optional[np.ndarray] = None
) -> Generator[Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray], None,
None]:
"""Loops over the grid by blocks. See pyscf.dft.numint.NumInt.block_loop.
Args:
mol: PySCF molecule.
grids: grid on which to evaluate the functional.
nao: number of basis functions. If None, obtained from mol.
deriv: unused. The first functional derivatives are always computed.
max_memory: the maximum cache to use for the information on the grid, in
MB. Determines the size of each block if blksize is None.
non0tab: mask determining if a shell in the basis set is zero at a grid
point. Shape (N, nbas), where N is the number of grid points and nbas
the number of shells in the basis set. Obtained from grids if not
supplied.
blksize: size of each block. Calculated from max_memory if None.
buf: buffer to use for storing ao. If None, a new array for ao is created
for each block.
Yields:
ao, mask, weight, coords: information on a block of the grid containing N'
points, where
ao: atomic orbitals evaluated on the grid. Shape (N', nao), where nao is
the number of atomic orbitals.
mask: mask indicating whether a shell in the basis set is zero at a grid
point. Shape (N', nbas).
weight: weight associated with each grid point. Shape (N').
coords: coordinates of the grid. Shape (N', 3).
"""
# Wrap block_loop so we can store internal variables required to evaluate
# the contribution to the XC potential from local Hartree-Fock features.
for ao, mask, weight, coords in super().block_loop(
mol=mol,
grids=grids,
nao=nao,
deriv=deriv,
max_memory=max_memory,
non0tab=non0tab,
blksize=blksize,
buf=buf):
# Cache the curent block so we can access it in eval_xc.
self._grid_state = _GridState(
ao=ao, mask=mask, weight=weight, coords=coords)
yield ao, mask, weight, coords
def construct_functional_inputs(
self,
mol: gto.Mole,
dms: Union[np.ndarray, Sequence[np.ndarray]],
spin: int,
coords: np.ndarray,
weights: np.ndarray,
rho: Union[np.ndarray, Tuple[np.ndarray, np.ndarray]],
ao: Optional[np.ndarray] = None,
) -> Tuple[FunctionalInputs, Tuple[np.ndarray, np.ndarray]]:
"""Constructs the input features required for the functional.
Args:
mol: PySCF molecule.
dms: density matrix of shape (nao, nao) (restricted calculations) or of
shape (2, nao, nao) (unrestricted calculations) or tuple of density
matrices for each spin channel, each of shape (nao, nao) (unrestricted
calculations).
spin: 0 for a spin-unpolarized (restricted Kohn-Sham) calculation, and
spin-polarized (unrestricted) otherwise.
coords: coordinates of the grid. Shape (N, 3), where N is the number of
grid points.
weights: weight associated with each grid point. Shape (N).
rho: density and density derivatives at each grid point. Single array
containing the total density for restricted calculations, tuple of
arrays for each spin channel for unrestricted calculations. Each array
has shape (6, N). See pyscf.dft.numint.eval_rho and comments in
FunctionalInputs for more details.
ao: The atomic orbitals evaluated on the grid, shape (N, nao). Computed if
not supplied.
Returns:
inputs, fxx, where
inputs: FunctionalInputs object containing the inputs (as np.ndarrays)
for the functional.
fxx: intermediates, shape (N, nao) for the alpha- and beta-spin
channels, required for computing the first derivative of the local
Hartree-Fock density with respect to the density matrices. See
compute_hfx_density for more details.
"""
if spin == 0:
# RKS
rhoa = rho / 2
rhob = rho / 2
else:
# UKS
rhoa, rhob = rho
# Local HF features.
exxa, exxb = [], []
fxxa, fxxb = [], []
for omega in sorted(self._omega_values):
hfx_results = compute_hfx_density.get_hf_density(
mol,
dms,
coords=coords,
omega=omega,
deriv=1,
ao=ao)
exxa.append(hfx_results.exx[0])
exxb.append(hfx_results.exx[1])
fxxa.append(hfx_results.fxx[0])
fxxb.append(hfx_results.fxx[1])
exxa = np.stack(exxa, axis=-1)
fxxa = np.stack(fxxa, axis=-1)
if spin == 0:
exx = (exxa, exxa)
fxx = (fxxa, fxxa)
else:
exxb = np.stack(exxb, axis=-1)
fxxb = np.stack(fxxb, axis=-1)
exx = (exxa, exxb)
fxx = (fxxa, fxxb)
return FunctionalInputs(
rho_a=rhoa,
rho_b=rhob,
hfx_a=exx[0],
hfx_b=exx[1],
grid_coords=coords,
grid_weights=weights), fxx
def eval_xc(
self,
xc_code: str,
rho: Union[np.ndarray, Tuple[np.ndarray, np.ndarray]],
spin: int = 0,
relativity: int = 0,
deriv: int = 1,
omega: Optional[float] = None,
verbose=None
) -> Tuple[np.ndarray, Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray],
None, None]:
"""Evaluates the XC energy and functional derivatives.
See pyscf.dft.libxc.eval_xc for more details on the interface.
Note: this also sets self._vmat_extra, which contains the contribution the
the potential matrix from the local Hartree-Fock terms in the functional.
Args:
xc_code: unused.
rho: density and density derivatives at each grid point. Single array
containing the total density for restricted calculations, tuple of
arrays for each spin channel for unrestricted calculations. Each array
has shape (6, N), where N is the number of grid points. See
pyscf.dft.numint.eval_rho and comments in FunctionalInputs for more
details.
spin: 0 for a spin-unpolarized (restricted Kohn-Sham) calculation, and
spin-polarized (unrestricted) otherwise.
relativity: Not supported.
deriv: unused. The first functional derivatives are always computed.
omega: RSH parameter. Not supported.
verbose: unused.
Returns:
exc, vxc, fxc, kxc, where:
exc is the exchange-correlation potential matrix evaluated at each grid
point, shape (N).
vxc is (vrho, vgamma, vlapl, vtau), the first-order functional
derivatives evaluated at each grid point, each shape (N).
fxc is set to None. (The second-order functional derivatives are not
computed.)
kxc is set to None. (The third-order functional derivatives are not
computed.)
"""
del xc_code, verbose, deriv # unused
if relativity != 0:
raise NotImplementedError('Relatistic calculations are not implemented '
'for DM21 functionals.')
if omega is not None:
raise NotImplementedError('User-specifed range seperation parameters are '
'not implemented for DM21 functionals.')
# Retrieve cached state.
ao = self._grid_state.ao
if ao.ndim == 3:
# Just need the AO values, not the gradients.
ao = ao[0]
if self._grid_state.weight is None:
weights = np.array([1.])
else:
weights = self._grid_state.weight
mask = self._grid_state.mask
inputs, (fxxa, fxxb) = self.construct_functional_inputs(
mol=self._system_state.mol,
dms=self._system_state.dms,
spin=spin,
rho=rho,
weights=weights,
coords=self._grid_state.coords,
ao=ao)
with self._graph.as_default():
feed_dict = dict(
zip(
attr.asdict(self._placeholders).values(),
attr.asdict(inputs).values(),
))
tensor_list = [
self._vxc,
self._vrho,
self._vsigma,
self._vtau,
self._vhf,
]
exc, vrho, vsigma, vtau, vhf = (
self._session.run(tensor_list, feed_dict=feed_dict))
mol = self._system_state.mol
shls_slice = (0, mol.nbas)
ao_loc_nr = mol.ao_loc_nr()
# Note: tf.gradients returns a list of gradients.
# vrho, vsigma, vtau are derivatives of objects that had
# tf.expand_dims(..., 1) applied. The [:, 0] indexing undoes this by
# selecting the 0-th (and only) element from the second dimension.
if spin == 0:
vxc_0 = (vrho[0][:, 0] + vrho[1][:, 0]) / 2.
# pyscf expects derivatives with respect to:
# grad_rho . grad_rho.
# The functional uses the first and last as inputs, but then has
# grad_(rho_a + rho_b) . grad_(rho_a + rho_b)
# as input. The following computes the correct total derivatives.
vxc_1 = (vsigma[0][:, 0] / 4. + vsigma[1][:, 0] / 4. + vsigma[2][:, 0])
vxc_3 = (vtau[0][:, 0] + vtau[1][:, 0]) / 2.
vxc_2 = np.zeros_like(vxc_3)
vhfs = (vhf[0] + vhf[1]) / 2.
# Local Hartree-Fock terms
for i in range(len(self._omega_values)):
# Factor of 1/2 is to account for adding vmat_hf + vmat_hf.T to vmat,
# which we do to match existing PySCF style. Unlike other terms, vmat_hf
# is already symmetric though.
aow = np.einsum('pi,p->pi', fxxa[:, :, i], -0.5 * vhfs[:, i])
self._vmat_hf += _dot_ao_ao(mol, ao, aow, mask, shls_slice,
ao_loc_nr)
else:
vxc_0 = np.stack([vrho[0][:, 0], vrho[1][:, 0]], axis=1)
# pyscf expects derivatives with respect to:
# grad_rho_a . grad_rho_a
# grad_rho_a . grad_rho_b
# grad_rho_b . grad_rho_b
# The functional uses the first and last as inputs, but then has
# grad_(rho_a + rho_b) . grad_(rho_a + rho_b)
# as input. The following computes the correct total derivatives.
vxc_1 = np.stack([
vsigma[0][:, 0] + vsigma[2][:, 0], 2. * vsigma[2][:, 0],
vsigma[1][:, 0] + vsigma[2][:, 0]
],
axis=1)
vxc_3 = np.stack([vtau[0][:, 0], vtau[1][:, 0]], axis=1)
vxc_2 = np.zeros_like(vxc_3)
vhfs = np.stack([vhf[0], vhf[1]], axis=2)
for i in range(len(self._omega_values)):
# Factors of 1/2 are due to the same reason as in the spin=0 case.
aow = np.einsum('pi,p->pi', fxxa[:, :, i], -0.5 * vhfs[:, i, 0])
self._vmat_hf[0] += _dot_ao_ao(mol, ao, aow, mask, shls_slice,
ao_loc_nr)
aow = np.einsum('pi,p->pi', fxxb[:, :, i], -0.5 * vhfs[:, i, 1])
self._vmat_hf[1] += _dot_ao_ao(mol, ao, aow, mask, shls_slice,
ao_loc_nr)
fxc = None # Second derivative not implemented
kxc = None # Second derivative not implemented
# PySCF C routines expect float64.
exc = exc.astype(np.float64)
vxc = tuple(v.astype(np.float64) for v in (vxc_0, vxc_1, vxc_2, vxc_3))
return exc, vxc, fxc, kxc
| deepmind-research-master | density_functional_approximation_dm21/density_functional_approximation_dm21/neural_numint.py |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for compute_hfx_density."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from pyscf import dft
from pyscf import gto
from pyscf import lib
from pyscf import scf
import scipy
from density_functional_approximation_dm21 import compute_hfx_density
class ComputeHfxDensityTest(parameterized.TestCase):
def setUp(self):
super().setUp()
lib.param.TMPDIR = None
lib.num_threads(1)
@parameterized.named_parameters(
{'testcase_name': 'local_hf', 'omega': 0.},
{'testcase_name': 'range_separated_local_hf_0.5', 'omega': 0.5},
{'testcase_name': 'range_separated_local_hf_1.0', 'omega': 1.0},
{'testcase_name': 'range_separated_local_hf_2.0', 'omega': 2.0},
)
def test_closed_shell(self, omega):
mol = gto.M(atom='He 0. 0. 0.', basis='3-21g')
solver = dft.RKS(mol)
solver.grids.level = 2
solver.grids.build()
solver.kernel()
dm = solver.make_rdm1()
with mol.with_range_coulomb(omega=omega):
target_j, target_k = scf.hf.get_jk(mol, dm)
target_hf = -0.25 * np.einsum('ij,ji', dm, target_k)
target_coulomb = np.einsum('ij,ji', dm, target_j)
coords = solver.grids.coords
weights = solver.grids.weights
results = compute_hfx_density.get_hf_density(
mol, dm, coords, omega=omega, weights=weights)
coulomb = np.einsum('ij,ji', dm, results.coulomb)
hf = -0.25 * np.einsum('ij,ji', dm, results.exchange)
predicted_hf = np.sum((results.exx[0] + results.exx[1]) * weights)
with self.subTest('test_hf_density'):
self.assertAlmostEqual(target_hf, predicted_hf)
with self.subTest('test_get_jk'):
np.testing.assert_allclose(results.coulomb, target_j)
np.testing.assert_allclose(results.exchange, target_k)
self.assertAlmostEqual(coulomb, target_coulomb)
self.assertAlmostEqual(hf, target_hf)
@parameterized.named_parameters(
{'testcase_name': 'local_hf', 'omega': 0.},
{'testcase_name': 'range_separated_local_hf_0.5', 'omega': 0.5},
{'testcase_name': 'range_separated_local_hf_1.0', 'omega': 1.0},
{'testcase_name': 'range_separated_local_hf_2.0', 'omega': 2.0},
)
def test_hf_density_on_open_shell(self, omega):
mol = gto.M(atom='He 0. 0. 0.', basis='3-21g', charge=1, spin=1)
solver = dft.UKS(mol)
solver.grids.level = 2
solver.grids.build()
solver.kernel()
dm = solver.make_rdm1()
with mol.with_range_coulomb(omega=omega):
target_j, target_k = scf.hf.get_jk(mol, dm)
target_hf = -0.5 * (
np.einsum('ij,ji', dm[0], target_k[0]) +
np.einsum('ij,ji', dm[1], target_k[1]))
target_coulomb = np.einsum('ij,ji', dm[0], target_j[0]) + np.einsum(
'ij,ji', dm[1], target_j[1])
coords = solver.grids.coords
weights = solver.grids.weights
results = compute_hfx_density.get_hf_density(
mol, dm, coords, omega=omega, weights=weights)
predicted_hf = np.sum((results.exx[0] + results.exx[1]) * weights)
coulomb = (
np.einsum('ij,ji', dm[0], results.coulomb[0]) +
np.einsum('ij,ji', dm[1], results.coulomb[1]))
hf = -0.5 * (
np.einsum('ij,ji', dm[0], results.exchange[0]) +
np.einsum('ij,ji', dm[1], results.exchange[1]))
with self.subTest('test_hf_density'):
self.assertAlmostEqual(target_hf, predicted_hf, places=3)
with self.subTest('test_get_jk'):
np.testing.assert_allclose(results.coulomb[0], target_j[0])
np.testing.assert_allclose(results.coulomb[1], target_j[1])
np.testing.assert_allclose(results.exchange[0], target_k[0])
np.testing.assert_allclose(results.exchange[1], target_k[1])
self.assertAlmostEqual(coulomb, target_coulomb)
self.assertAlmostEqual(hf, target_hf)
def _nu_test_systems():
systems = [
{
'atom': 'N 0 0 0; N 0 0 2.4',
'charge': 0,
'spin': 0,
'basis': 'cc-pVDZ',
'num_grids': -1
},
{
'atom': 'N 0 0 0; N 0 0 2.4',
'charge': 0,
'spin': 0,
'basis': 'cc-pVDZ',
'num_grids': 1
},
{
'atom': 'N 0 0 0; N 0 0 2.4',
'charge': 0,
'spin': 0,
'basis': 'cc-pVDZ',
'num_grids': 2
},
{
'atom': 'N 0 0 0; N 0 0 2.4',
'charge': 0,
'spin': 0,
'basis': 'cc-pVDZ',
'num_grids': 10
},
{
'atom': 'N 0 0 0; N 0 0 2.4',
'charge': 0,
'spin': 0,
'basis': 'cc-pVDZ',
'num_grids': 32
},
{
'atom': 'N 0 0 0; N 0 0 2.4',
'charge': 0,
'spin': 0,
'basis': 'cc-pVDZ',
'num_grids': 33
},
{
'atom': 'Li 0 0 0',
'charge': 0,
'spin': 1,
'basis': 'cc-pVTZ',
'num_grids': -1
},
{
'atom': 'H 0 0 0',
'charge': 0,
'spin': 1,
'basis': 'cc-pVQZ',
'num_grids': -1
},
]
system_names = ['N2', 'N2_1', 'N2_2', 'N2_10', 'N2_32', 'N2_33', 'Li', 'H']
for name, system in zip(system_names, systems):
yield {'testcase_name': f'{name}_hermitian', 'hermi': 0, **system}
yield {'testcase_name': f'{name}_non_hermitian', 'hermi': 1, **system}
class NuTest(parameterized.TestCase):
def setUp(self):
super(NuTest, self).setUp()
lib.param.TMPDIR = None
lib.num_threads(1)
@parameterized.named_parameters(_nu_test_systems())
def test_nu_integrals(self, atom, charge, spin, basis, num_grids, hermi):
mol = gto.M(atom=atom, charge=charge, spin=spin, basis=basis)
mf = dft.UKS(mol)
mf.grids.build()
if num_grids == -1:
test_coords = mf.grids.coords
else:
test_coords = mf.grids.coords[0:num_grids]
nu_slow = compute_hfx_density._evaluate_nu_slow(
mol, test_coords, omega=0.0, hermi=hermi)
nu_fast = compute_hfx_density._evaluate_nu(
mol, test_coords, omega=0.0, hermi=hermi)
np.testing.assert_allclose(nu_slow, nu_fast, atol=1E-13)
def test_range_separated_nu(self):
mol = gto.M(atom='He 0 0 0', basis='cc-pVDZ')
r0 = np.array([[0.1, 0.2, 1.]])
omega = 1.
result = np.squeeze(compute_hfx_density._evaluate_nu(mol, r0, omega=omega))
solver = dft.RKS(mol)
solver.grids.level = 2
solver.grids.build()
coords = solver.grids.coords
weights = solver.grids.weights
ao_value = dft.numint.eval_ao(mol, coords, deriv=0)
dist = np.linalg.norm(coords - r0, axis=1)
erf = scipy.special.erf(omega * dist) / dist
expected_result = np.squeeze(
np.einsum('g,ga,gb->ab', weights * erf, ao_value, ao_value))
np.testing.assert_allclose(result, expected_result)
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | density_functional_approximation_dm21/density_functional_approximation_dm21/compute_hfx_density_test.py |
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""nest utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
from tensorflow.contrib import framework as contrib_framework
nest = contrib_framework.nest
def _nest_apply_over_list(list_of_nests, fn):
"""Equivalent to fn, but works on list-of-nests.
Transforms a list-of-nests to a nest-of-lists, then applies `fn`
to each of the inner lists.
It is assumed that all nests have the same structure. Elements of the nest may
be None, in which case they are ignored, i.e. they do not form part of the
stack. This is useful when stacking agent states where parts of the state nest
have been filtered.
Args:
list_of_nests: A Python list of nests.
fn: the function applied on the list of leaves.
Returns:
A nest-of-arrays, where the arrays are formed by `fn`ing a list.
"""
list_of_flat_nests = [nest.flatten(n) for n in list_of_nests]
flat_nest_of_stacks = []
for position in range(len(list_of_flat_nests[0])):
new_list = [flat_nest[position] for flat_nest in list_of_flat_nests]
new_list = [x for x in new_list if x is not None]
flat_nest_of_stacks.append(fn(new_list))
return nest.pack_sequence_as(
structure=list_of_nests[0], flat_sequence=flat_nest_of_stacks)
def _take_indices(inputs, indices):
return nest.map_structure(lambda t: np.take(t, indices, axis=0), inputs)
def nest_stack(list_of_nests, axis=0):
"""Equivalent to np.stack, but works on list-of-nests.
Transforms a list-of-nests to a nest-of-lists, then applies `np.stack`
to each of the inner lists.
It is assumed that all nests have the same structure. Elements of the nest may
be None, in which case they are ignored, i.e. they do not form part of the
stack. This is useful when stacking agent states where parts of the state nest
have been filtered.
Args:
list_of_nests: A Python list of nests.
axis: Optional, the `axis` argument for `np.stack`.
Returns:
A nest-of-arrays, where the arrays are formed by `np.stack`ing a list.
"""
return _nest_apply_over_list(list_of_nests, lambda l: np.stack(l, axis=axis))
def nest_unstack(batched_inputs, batch_size):
"""Splits a sequence of numpy arrays along 0th dimension."""
return [_take_indices(batched_inputs, idx) for idx in range(batch_size)]
| deepmind-research-master | tvt/nest_utils.py |
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Memory Reader/Writer for RMA."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import sonnet as snt
import tensorflow.compat.v1 as tf
ReadInformation = collections.namedtuple(
'ReadInformation', ('weights', 'indices', 'keys', 'strengths'))
class MemoryWriter(snt.RNNCore):
"""Memory Writer Module."""
def __init__(self, mem_shape, name='memory_writer'):
"""Initializes the `MemoryWriter`.
Args:
mem_shape: The shape of the memory `(num_rows, memory_width)`.
name: The name to use for the Sonnet module.
"""
super(MemoryWriter, self).__init__(name=name)
self._mem_shape = mem_shape
def _build(self, inputs, state):
"""Inserts z into the argmin row of usage markers and updates all rows.
Returns an operation that, when executed, correctly updates the internal
state and usage markers.
Args:
inputs: A tuple consisting of:
* z, the value to write at this timestep
* mem_state, the state of the memory at this timestep before writing
state: The state is just the write_counter.
Returns:
A tuple of the new memory state and a tuple containing the next state.
"""
z, mem_state = inputs
# Stop gradient on writes to memory.
z = tf.stop_gradient(z)
prev_write_counter = state
new_row_value = z
# Find the index to insert the next row into.
num_mem_rows = self._mem_shape[0]
write_index = tf.cast(prev_write_counter, dtype=tf.int32) % num_mem_rows
one_hot_row = tf.one_hot(write_index, num_mem_rows)
write_counter = prev_write_counter + 1
# Insert state variable to new row.
# First you need to size it up to the full size.
insert_new_row = lambda mem, o_hot, z: mem - (o_hot * mem) + (o_hot * z)
new_mem = insert_new_row(mem_state,
tf.expand_dims(one_hot_row, axis=-1),
tf.expand_dims(new_row_value, axis=-2))
new_state = write_counter
return new_mem, new_state
@property
def state_size(self):
"""Returns a description of the state size, without batch dimension."""
return tf.TensorShape([])
@property
def output_size(self):
"""Returns a description of the output size, without batch dimension."""
return self._mem_shape
class MemoryReader(snt.AbstractModule):
"""Memory Reader Module."""
def __init__(self,
memory_word_size,
num_read_heads,
top_k=0,
memory_size=None,
name='memory_reader'):
"""Initializes the `MemoryReader`.
Args:
memory_word_size: The dimension of the 1-D read keys this memory reader
should produce. Each row of the memory is of length `memory_word_size`.
num_read_heads: The number of reads to perform.
top_k: Softmax and summation when reading is only over top k most similar
entries in memory. top_k=0 (default) means dense reads, i.e. no top_k.
memory_size: Number of rows in memory.
name: The name for this Sonnet module.
"""
super(MemoryReader, self).__init__(name=name)
self._memory_word_size = memory_word_size
self._num_read_heads = num_read_heads
self._top_k = top_k
# This is not an RNNCore but it is useful to expose the output size.
self._output_size = num_read_heads * memory_word_size
num_read_weights = top_k if top_k > 0 else memory_size
self._read_info_size = ReadInformation(
weights=tf.TensorShape([num_read_heads, num_read_weights]),
indices=tf.TensorShape([num_read_heads, num_read_weights]),
keys=tf.TensorShape([num_read_heads, memory_word_size]),
strengths=tf.TensorShape([num_read_heads]),
)
with self._enter_variable_scope():
# Transforms to value-based read for each read head.
output_dim = (memory_word_size + 1) * num_read_heads
self._keys_and_read_strengths_generator = snt.Linear(output_dim)
def _build(self, inputs):
"""Looks up rows in memory.
In the args list, we have the following conventions:
B: batch size
M: number of slots in a row of the memory matrix
R: number of rows in the memory matrix
H: number of read heads in the memory controller
Args:
inputs: A tuple of
* read_inputs, a tensor of shape [B, ...] that will be flattened and
passed through a linear layer to get read keys/read_strengths for
each head.
* mem_state, the primary memory tensor. Of shape [B, R, M].
Returns:
The read from the memory (concatenated across read heads) and read
information.
"""
# Assert input shapes are compatible and separate inputs.
_assert_compatible_memory_reader_input(inputs)
read_inputs, mem_state = inputs
# Determine the read weightings for each key.
flat_outputs = self._keys_and_read_strengths_generator(
snt.BatchFlatten()(read_inputs))
# Separate the read_strengths from the rest of the weightings.
h = self._num_read_heads
flat_keys = flat_outputs[:, :-h]
read_strengths = tf.nn.softplus(flat_outputs[:, -h:])
# Reshape the weights.
read_shape = (self._num_read_heads, self._memory_word_size)
read_keys = snt.BatchReshape(read_shape)(flat_keys)
# Read from memory.
memory_reads, read_weights, read_indices, read_strengths = (
read_from_memory(read_keys, read_strengths, mem_state, self._top_k))
concatenated_reads = snt.BatchFlatten()(memory_reads)
return concatenated_reads, ReadInformation(
weights=read_weights,
indices=read_indices,
keys=read_keys,
strengths=read_strengths)
@property
def output_size(self):
"""Returns a description of the output size, without batch dimension."""
return self._output_size, self._read_info_size
def read_from_memory(read_keys, read_strengths, mem_state, top_k):
"""Function for cosine similarity content based reading from memory matrix.
In the args list, we have the following conventions:
B: batch size
M: number of slots in a row of the memory matrix
R: number of rows in the memory matrix
H: number of read heads (of the controller or the policy)
K: top_k if top_k>0
Args:
read_keys: the read keys of shape [B, H, M].
read_strengths: the coefficients used to compute the normalised weighting
vector of shape [B, H].
mem_state: the primary memory tensor. Of shape [B, R, M].
top_k: only use top k read matches, other reads do not go into softmax and
are zeroed out in the output. top_k=0 (default) means use dense reads.
Returns:
The memory reads [B, H, M], read weights [B, H, top k], read indices
[B, H, top k], and read strengths [B, H, 1].
"""
_assert_compatible_read_from_memory_inputs(read_keys, read_strengths,
mem_state)
batch_size = read_keys.shape[0]
num_read_heads = read_keys.shape[1]
with tf.name_scope('memory_reading'):
# Scale such that all rows are L2-unit vectors, for memory and read query.
scaled_read_keys = tf.math.l2_normalize(read_keys, axis=-1) # [B, H, M]
scaled_mem = tf.math.l2_normalize(mem_state, axis=-1) # [B, R, M]
# The cosine distance is then their dot product.
# Find the cosine distance between each read head and each row of memory.
cosine_distances = tf.matmul(
scaled_read_keys, scaled_mem, transpose_b=True) # [B, H, R]
# The rank must match cosine_distances for broadcasting to work.
read_strengths = tf.expand_dims(read_strengths, axis=-1) # [B, H, 1]
weighted_distances = read_strengths * cosine_distances # [B, H, R]
if top_k:
# Get top k indices (row indices with top k largest weighted distances).
top_k_output = tf.nn.top_k(weighted_distances, top_k, sorted=False)
read_indices = top_k_output.indices # [B, H, K]
# Create a sub-memory for each read head with only the top k rows.
# Each batch_gather is [B, K, M] and the list stacks to [B, H, K, M].
topk_mem_per_head = [tf.batch_gather(mem_state, ri_this_head)
for ri_this_head in tf.unstack(read_indices, axis=1)]
topk_mem = tf.stack(topk_mem_per_head, axis=1) # [B, H, K, M]
topk_scaled_mem = tf.math.l2_normalize(topk_mem, axis=-1) # [B, H, K, M]
# Calculate read weights for each head's top k sub-memory.
expanded_scaled_read_keys = tf.expand_dims(
scaled_read_keys, axis=2) # [B, H, 1, M]
topk_cosine_distances = tf.reduce_sum(
expanded_scaled_read_keys * topk_scaled_mem, axis=-1) # [B, H, K]
topk_weighted_distances = (
read_strengths * topk_cosine_distances) # [B, H, K]
read_weights = tf.nn.softmax(
topk_weighted_distances, axis=-1) # [B, H, K]
# For each head, read using the sub-memories and corresponding weights.
expanded_weights = tf.expand_dims(read_weights, axis=-1) # [B, H, K, 1]
memory_reads = tf.reduce_sum(
expanded_weights * topk_mem, axis=2) # [B, H, M]
else:
read_weights = tf.nn.softmax(weighted_distances, axis=-1)
num_rows_memory = mem_state.shape[1]
all_indices = tf.range(num_rows_memory, dtype=tf.int32)
all_indices = tf.reshape(all_indices, [1, 1, num_rows_memory])
read_indices = tf.tile(all_indices, [batch_size, num_read_heads, 1])
# This is the actual memory access.
# Note that matmul automatically batch applies for us.
memory_reads = tf.matmul(read_weights, mem_state)
read_keys.shape.assert_is_compatible_with(memory_reads.shape)
read_strengths = tf.squeeze(read_strengths, axis=-1) # [B, H, 1] -> [B, H]
return memory_reads, read_weights, read_indices, read_strengths
def _assert_compatible_read_from_memory_inputs(read_keys, read_strengths,
mem_state):
read_keys.shape.assert_has_rank(3)
b_shape, h_shape, m_shape = read_keys.shape
mem_state.shape.assert_has_rank(3)
r_shape = mem_state.shape[1]
read_strengths.shape.assert_is_compatible_with(
tf.TensorShape([b_shape, h_shape]))
mem_state.shape.assert_is_compatible_with(
tf.TensorShape([b_shape, r_shape, m_shape]))
def _assert_compatible_memory_reader_input(input_tensors):
"""Asserts MemoryReader's _build has been given the correct shapes."""
assert len(input_tensors) == 2
_, mem_state = input_tensors
mem_state.shape.assert_has_rank(3)
| deepmind-research-master | tvt/memory.py |
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Temporal Value Transport implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from concurrent import futures
import numpy as np
from six.moves import range
from six.moves import zip
def _unstack(array, axis):
"""Opposite of np.stack."""
split_array = np.split(array, array.shape[axis], axis=axis)
return [np.squeeze(a, axis=axis) for a in split_array]
def _top_k_args(array, k):
"""Return top k arguments or all arguments if array size is less than k."""
if len(array) <= k:
return np.arange(len(array))
return np.argpartition(array, kth=-k)[-k:]
def _threshold_read_event_times(read_strengths, threshold):
"""Return the times of max read strengths within one threshold read event."""
chosen_times = []
over_threshold = False
max_read_strength = 0.
# Wait until the threshold is crossed then keep track of max read strength and
# time of max read strength until the read strengths go back under the
# threshold, then add that max read strength time to the chosen times. Wait
# until threshold is crossed again and then repeat the process.
for time, strength in enumerate(read_strengths):
if strength > threshold:
over_threshold = True
if strength > max_read_strength:
max_read_strength = strength
max_read_strength_time = time
else:
# If coming back under threshold, add the time of the last max read.
if over_threshold:
chosen_times.append(max_read_strength_time)
max_read_strength = 0.
over_threshold = False
# Add max read strength time if episode finishes before going under threshold.
if over_threshold:
chosen_times.append(max_read_strength_time)
return np.array(chosen_times)
def _tvt_rewards_single_head(read_weights, read_strengths, read_times,
baselines, alpha, top_k_t1,
read_strength_threshold, no_transport_period):
"""Compute TVT rewards for a single read head, no batch dimension.
This performs the updates for one read head.
`t1` and `t2` refer to times to where and from where the value is being
transported, respectively. I.e. the rewards at `t1` times are being modified
based on values at times `t2`.
Args:
read_weights: shape (ep_length, top_k).
read_strengths: shape (ep_length,).
read_times: shape (ep_length, top_k).
baselines: shape (ep_length,).
alpha: The multiplier for the temporal value transport rewards.
top_k_t1: For each read event time, this determines how many time points
to send tvt reward to.
read_strength_threshold: Read strengths below this value are ignored.
no_transport_period: Length of no_transport_period.
Returns:
An array of TVT rewards with shape (ep_length,).
"""
tvt_rewards = np.zeros_like(baselines)
# Mask read_weights for reads that read back to times within
# no_transport_period of current time.
ep_length = read_times.shape[0]
times = np.arange(ep_length)
# Expand dims for correct broadcasting when subtracting read_times.
times = np.expand_dims(times, -1)
read_past_no_transport_period = (times - read_times) > no_transport_period
read_weights_masked = np.where(read_past_no_transport_period,
read_weights,
np.zeros_like(read_weights))
# Find t2 times with maximum read weights. Ignore t2 times whose maximum
# read weights fall inside the no_transport_period.
max_read_weight_args = np.argmax(read_weights, axis=1) # (ep_length,)
times = np.arange(ep_length)
max_read_weight_times = read_times[times,
max_read_weight_args] # (ep_length,)
read_strengths_cut = np.where(
times - max_read_weight_times > no_transport_period,
read_strengths,
np.zeros_like(read_strengths))
# Filter t2 candidates to perform value transport on local maximums
# above a threshold.
t2_times_with_largest_reads = _threshold_read_event_times(
read_strengths_cut, read_strength_threshold)
# Loop through all t2 candidates and transport value to top_k_t1 read times.
for t2 in t2_times_with_largest_reads:
try:
baseline_value_when_reading = baselines[t2]
except IndexError:
raise RuntimeError("Attempting to access baselines array with length {}"
" at index {}. Make sure output_baseline is set in"
" the agent config.".format(len(baselines), t2))
read_times_from_t2 = read_times[t2]
read_weights_from_t2 = read_weights_masked[t2]
# Find the top_k_t1 read times for this t2 and their corresponding read
# weights. The call to _top_k_args() here gives the array indices for the
# times and weights of the top_k_t1 reads from t2.
top_t1_indices = _top_k_args(read_weights_from_t2, top_k_t1)
top_t1_read_times = np.take(read_times_from_t2, top_t1_indices)
top_t1_read_weights = np.take(read_weights_from_t2, top_t1_indices)
# For each of the top_k_t1 read times t and corresponding read weight w,
# find the trajectory that contains step_num (t + shift) and modify the
# reward at step_num (t + shift) using w and the baseline value at t2.
# We ignore any read times t >= t2. These can emerge because if nothing
# in memory matches positively with the read query, the top reads may be
# in the empty region of the memory.
for step_num, read_weight in zip(top_t1_read_times, top_t1_read_weights):
if step_num >= t2:
# Skip this step_num as it is not really a memory time.
continue
# Compute the tvt reward and add it on.
tvt_reward = alpha * read_weight * baseline_value_when_reading
tvt_rewards[step_num] += tvt_reward
return tvt_rewards
def _compute_tvt_rewards_from_read_info(
read_weights, read_strengths, read_times, baselines, gamma,
alpha=0.9, top_k_t1=50,
read_strength_threshold=2.,
no_transport_period_when_gamma_1=25):
"""Compute TVT rewards given supplied read information, no batch dimension.
Args:
read_weights: shape (ep_length, num_read_heads, top_k).
read_strengths: shape (ep_length, num_read_heads).
read_times: shape (ep_length, num_read_heads, top_k).
baselines: shape (ep_length,).
gamma: Scalar discount factor used to calculate the no_transport_period.
alpha: The multiplier for the temporal value transport rewards.
top_k_t1: For each read event time, this determines how many time points
to send tvt reward to.
read_strength_threshold: Read strengths below this value are ignored.
no_transport_period_when_gamma_1: no transport period when gamma == 1.
Returns:
An array of TVT rewards with shape (ep_length,).
"""
if gamma < 1:
no_transport_period = int(1 / (1 - gamma))
else:
if no_transport_period_when_gamma_1 is None:
raise ValueError("No transport period must be defined when gamma == 1.")
no_transport_period = no_transport_period_when_gamma_1
# Split read infos by read head.
num_read_heads = read_weights.shape[1]
read_weights = _unstack(read_weights, axis=1)
read_strengths = _unstack(read_strengths, axis=1)
read_times = _unstack(read_times, axis=1)
# Calcuate TVT rewards for each read head separately and add to total.
tvt_rewards = np.zeros_like(baselines)
for i in range(num_read_heads):
tvt_rewards += _tvt_rewards_single_head(
read_weights[i], read_strengths[i], read_times[i],
baselines, alpha, top_k_t1, read_strength_threshold,
no_transport_period)
return tvt_rewards
def compute_tvt_rewards(read_infos, baselines, gamma=.96):
"""Compute TVT rewards from EpisodeOutputs.
Args:
read_infos: A memory_reader.ReadInformation namedtuple, where each element
has shape (ep_length, batch_size, num_read_heads, ...).
baselines: A numpy float array with shape (ep_length, batch_size).
gamma: Discount factor.
Returns:
An array of TVT rewards with shape (ep_length,).
"""
if not read_infos:
return np.zeros_like(baselines)
# TVT reward computation is without batch dimension. so we need to process
# read_infos and baselines into batchwise components.
batch_size = baselines.shape[1]
# Split each element of read info on batch dim.
read_weights = _unstack(read_infos.weights, axis=1)
read_strengths = _unstack(read_infos.strengths, axis=1)
read_indices = _unstack(read_infos.indices, axis=1)
# Split baselines on batch dim.
baselines = _unstack(baselines, axis=1)
# Comute TVT rewards for each element in the batch (threading over batch).
tvt_rewards = []
with futures.ThreadPoolExecutor(max_workers=batch_size) as executor:
for i in range(batch_size):
tvt_rewards.append(
executor.submit(
_compute_tvt_rewards_from_read_info,
read_weights[i],
read_strengths[i],
read_indices[i],
baselines[i],
gamma)
)
tvt_rewards = [f.result() for f in tvt_rewards]
# Process TVT rewards back into an array of shape (ep_length, batch_size).
return np.stack(tvt_rewards, axis=1)
| deepmind-research-master | tvt/tvt_rewards.py |
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Threaded batch environment wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from concurrent import futures
from six.moves import range
from six.moves import zip
from tvt import nest_utils
class BatchEnv(object):
"""Wrapper that steps multiple environments in separate threads.
The threads are stepped in lock step, so all threads progress by one step
before any move to the next step.
"""
def __init__(self, batch_size, env_builder, **env_kwargs):
self.batch_size = batch_size
self._envs = [env_builder(**env_kwargs) for _ in range(batch_size)]
self._num_actions = self._envs[0].num_actions
self._observation_shape = self._envs[0].observation_shape
self._episode_length = self._envs[0].episode_length
self._executor = futures.ThreadPoolExecutor(max_workers=self.batch_size)
def reset(self):
"""Reset the entire batch of environments."""
def reset_environment(env):
return env.reset()
try:
output_list = []
for env in self._envs:
output_list.append(self._executor.submit(reset_environment, env))
output_list = [env_output.result() for env_output in output_list]
except KeyboardInterrupt:
self._executor.shutdown(wait=True)
raise
observations, rewards = nest_utils.nest_stack(output_list)
return observations, rewards
def step(self, action_list):
"""Step batch of envs.
Args:
action_list: A list of actions, one per environment in the batch. Each one
should be a scalar int or a numpy scaler int.
Returns:
A tuple (observations, rewards):
observations: A nest of observations, each one a numpy array where the
first dimension has size equal to the number of environments in the
batch.
rewards: An array of rewards with size equal to the number of
environments in the batch.
"""
def step_environment(env, action):
return env.step(action)
try:
output_list = []
for env, action in zip(self._envs, action_list):
output_list.append(self._executor.submit(step_environment, env, action))
output_list = [env_output.result() for env_output in output_list]
except KeyboardInterrupt:
self._executor.shutdown(wait=True)
raise
observations, rewards = nest_utils.nest_stack(output_list)
return observations, rewards
@property
def observation_shape(self):
"""Observation shape per environment, i.e. with no batch dimension."""
return self._observation_shape
@property
def num_actions(self):
return self._num_actions
@property
def episode_length(self):
return self._episode_length
def last_phase_rewards(self):
return [env.last_phase_reward() for env in self._envs]
| deepmind-research-master | tvt/batch_env.py |
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""RMA agent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from six.moves import range
from six.moves import zip
import sonnet as snt
import tensorflow.compat.v1 as tf
import trfl
from tvt import losses
from tvt import memory as memory_module
from tensorflow.contrib import framework as contrib_framework
nest = contrib_framework.nest
PolicyOutputs = collections.namedtuple(
'PolicyOutputs', ['policy', 'action', 'baseline'])
StepOutput = collections.namedtuple(
'StepOutput', ['action', 'baseline', 'read_info'])
AgentState = collections.namedtuple(
'AgentState', ['core_state', 'prev_action'])
Observation = collections.namedtuple(
'Observation', ['image', 'last_action', 'last_reward'])
RNNStateNoMem = collections.namedtuple(
'RNNStateNoMem', ['controller_outputs', 'h_controller'])
RNNState = collections.namedtuple(
'RNNState',
list(RNNStateNoMem._fields) + ['memory', 'mem_reads', 'h_mem_writer'])
CoreOutputs = collections.namedtuple(
'CoreOutputs', ['action', 'policy', 'baseline', 'z', 'read_info'])
def rnn_inputs_to_static_rnn_inputs(inputs):
"""Converts time major tensors to timestep lists."""
# Inputs to core build method are expected to be a tensor or tuple of tensors.
if isinstance(inputs, tuple):
num_timesteps = inputs[0].shape.as_list()[0]
converted_inputs = [tf.unstack(input_, num_timesteps) for input_ in inputs]
return list(zip(*converted_inputs))
else:
return tf.unstack(inputs)
def static_rnn_outputs_to_core_outputs(outputs):
"""Convert from length T list of nests to nest of tensors with first dim T."""
list_of_flats = [nest.flatten(n) for n in outputs]
new_outputs = list()
for i in range(len(list_of_flats[0])):
new_outputs.append(tf.stack([flat_nest[i] for flat_nest in list_of_flats]))
return nest.pack_sequence_as(structure=outputs[0], flat_sequence=new_outputs)
def unroll(core, initial_state, inputs, dtype=tf.float32):
"""Perform a static unroll of the core."""
static_rnn_inputs = rnn_inputs_to_static_rnn_inputs(inputs)
static_outputs, _ = tf.nn.static_rnn(
core,
inputs=static_rnn_inputs,
initial_state=initial_state,
dtype=dtype)
core_outputs = static_rnn_outputs_to_core_outputs(static_outputs)
return core_outputs
class ImageEncoderDecoder(snt.AbstractModule):
"""Image Encoder/Decoder module."""
def __init__(
self,
image_code_size,
name='image_encoder_decoder'):
"""Initialize the image encoder/decoder."""
super(ImageEncoderDecoder, self).__init__(name=name)
# This is set by a call to `encode`. `decode` will fail before this is set.
self._convnet_output_shape = None
with self._enter_variable_scope():
self._convnet = snt.nets.ConvNet2D(
output_channels=(16, 32),
kernel_shapes=(3, 3),
strides=(1, 1),
paddings=('SAME',))
self._post_convnet_layer = snt.Linear(image_code_size, name='final_layer')
@snt.reuse_variables
def encode(self, image):
"""Encode the image observation."""
convnet_output = self._convnet(image)
# Store unflattened convnet output shape for use in decoder.
self._convnet_output_shape = convnet_output.shape[1:]
# Flatten convnet outputs and pass through final layer to get image code.
return self._post_convnet_layer(snt.BatchFlatten()(convnet_output))
@snt.reuse_variables
def decode(self, code):
"""Decode the image observation from a latent code."""
if self._convnet_output_shape is None:
raise ValueError('Must call `encode` before `decode`.')
transpose_convnet_in_flat = snt.Linear(
self._convnet_output_shape.num_elements(),
name='decode_initial_linear')(
code)
transpose_convnet_in_flat = tf.nn.relu(transpose_convnet_in_flat)
transpose_convnet_in = snt.BatchReshape(
self._convnet_output_shape.as_list())(transpose_convnet_in_flat)
return self._convnet.transpose(None)(transpose_convnet_in)
def _build(self, *args): # Unused. Use encode/decode instead.
raise NotImplementedError('Use encode/decode methods instead of __call__.')
class Policy(snt.AbstractModule):
"""A policy module possibly containing a read-only DNC."""
def __init__(self,
num_actions,
num_policy_hiddens=(),
num_baseline_hiddens=(),
activation=tf.nn.tanh,
policy_clip_abs_value=10.0,
name='Policy'):
"""Construct a policy module possibly containing a read-only DNC.
Args:
num_actions: Number of discrete actions to choose from.
num_policy_hiddens: Tuple or List, sizes of policy MLP hidden layers.
num_baseline_hiddens: Tuple or List, sizes of baseline MLP hidden layers.
An empty tuple/list results in a linear layer instead of an MLP.
activation: Callable, e.g. tf.nn.tanh.
policy_clip_abs_value: float, Policy gradient clip value.
name: A string, the module's name
"""
super(Policy, self).__init__(name=name)
self._num_actions = num_actions
self._policy_layers = tuple(num_policy_hiddens) + (num_actions,)
self._baseline_layers = tuple(num_baseline_hiddens) + (1,)
self._policy_clip_abs_value = policy_clip_abs_value
self._activation = activation
def _build(self, inputs):
(shared_inputs, extra_policy_inputs) = inputs
policy_in = tf.concat([shared_inputs, extra_policy_inputs], axis=1)
policy = snt.nets.MLP(
output_sizes=self._policy_layers,
activation=self._activation,
name='policy_mlp')(
policy_in)
# Sample an action from the policy logits.
action = tf.multinomial(policy, num_samples=1, output_dtype=tf.int32)
action = tf.squeeze(action, 1) # [B, 1] -> [B]
if self._policy_clip_abs_value > 0:
policy = snt.clip_gradient(
net=policy,
clip_value_min=-self._policy_clip_abs_value,
clip_value_max=self._policy_clip_abs_value)
baseline_in = tf.concat([shared_inputs, tf.stop_gradient(policy)], axis=1)
baseline = snt.nets.MLP(
self._baseline_layers,
activation=self._activation,
name='baseline_mlp')(
baseline_in)
baseline = tf.squeeze(baseline, axis=-1) # [B, 1] -> [B]
if self._policy_clip_abs_value > 0:
baseline = snt.clip_gradient(
net=baseline,
clip_value_min=-self._policy_clip_abs_value,
clip_value_max=self._policy_clip_abs_value)
outputs = PolicyOutputs(
policy=policy,
action=action,
baseline=baseline)
return outputs
class _RMACore(snt.RNNCore):
"""RMA RNN Core."""
def __init__(self,
num_actions,
with_memory=True,
name='rma_core'):
super(_RMACore, self).__init__(name=name)
# MLP activation as callable.
mlp_activation = tf.nn.tanh
# Size of latent code written to memory (if using it) and used to
# reconstruct from (if including reconstructions).
num_latents = 200
# Value function decode settings.
baseline_mlp_num_hiddens = (200,)
# Policy settings.
num_policy_hiddens = (200,) # Only used for non-recurrent core.
# Controller settings.
control_hidden_size = 256
control_num_layers = 2
# Memory settings (only used if with_memory=True).
memory_size = 1000
memory_num_reads = 3
memory_top_k = 50
self._with_memory = with_memory
with self._enter_variable_scope():
# Construct the features -> latent encoder.
self._z_encoder_mlp = snt.nets.MLP(
output_sizes=(2 * num_latents, num_latents),
activation=mlp_activation,
activate_final=False,
name='z_encoder_mlp')
# Construct controller.
rnn_cores = [snt.LSTM(control_hidden_size)
for _ in range(control_num_layers)]
self._controller = snt.DeepRNN(
rnn_cores, skip_connections=True, name='controller')
# Construct memory.
if self._with_memory:
memory_dim = num_latents # Each write to memory is of size memory_dim.
self._mem_shape = (memory_size, memory_dim)
self._memory_reader = memory_module.MemoryReader(
memory_word_size=memory_dim,
num_read_heads=memory_num_reads,
top_k=memory_top_k,
memory_size=memory_size)
self._memory_writer = memory_module.MemoryWriter(
mem_shape=self._mem_shape)
# Construct policy, starting with policy_core and policy_action_head.
# `extra_inputs` in this case will be mem_out from current time step (note
# that mem_out is just the controller output if with_memory=False).
self._policy = Policy(
num_policy_hiddens=num_policy_hiddens,
num_actions=num_actions,
num_baseline_hiddens=baseline_mlp_num_hiddens,
activation=mlp_activation,
policy_clip_abs_value=10.0,)
# Set state_size and output_size.
controller_out_size = self._controller.output_size
controller_state_size = self._controller.state_size
self._state_size = RNNStateNoMem(controller_outputs=controller_out_size,
h_controller=controller_state_size)
read_info_size = ()
if self._with_memory:
mem_reads_size, read_info_size = self._memory_reader.output_size
mem_writer_state_size = self._memory_writer.state_size
self._state_size = RNNState(memory=tf.TensorShape(self._mem_shape),
mem_reads=mem_reads_size,
h_mem_writer=mem_writer_state_size,
**self._state_size._asdict())
z_size = num_latents
self._output_size = CoreOutputs(
action=tf.TensorShape([]), # Scalar tensor shapes must be explicit.
policy=num_actions,
baseline=tf.TensorShape([]), # Scalar tensor shapes must be explicit.
z=z_size,
read_info=read_info_size)
def _build(self, inputs, h_prev):
features = inputs
z_net_inputs = [features, h_prev.controller_outputs]
if self._with_memory:
z_net_inputs.append(h_prev.mem_reads)
z_net_inputs_concat = tf.concat(z_net_inputs, axis=1)
z = self._z_encoder_mlp(z_net_inputs_concat)
controller_out, h_controller = self._controller(z, h_prev.h_controller)
read_info = ()
if self._with_memory:
# Perform a memory read/write step before generating the policy_modules.
mem_reads, read_info = self._memory_reader((controller_out,
h_prev.memory))
memory, h_mem_writer = self._memory_writer((z, h_prev.memory),
h_prev.h_mem_writer)
policy_extra_input = tf.concat([controller_out, mem_reads], axis=1)
else:
policy_extra_input = controller_out
# Get policy, action and (possible empty) baseline from policy module.
policy_inputs = (z, policy_extra_input)
policy_outputs = self._policy(policy_inputs)
core_outputs = CoreOutputs(
z=z,
read_info=read_info,
**policy_outputs._asdict())
h_next = RNNStateNoMem(controller_outputs=controller_out,
h_controller=h_controller)
if self._with_memory:
h_next = RNNState(memory=memory,
mem_reads=mem_reads,
h_mem_writer=h_mem_writer,
**h_next._asdict())
return core_outputs, h_next
def initial_state(self, batch_size):
"""Use initial state for RNN modules, otherwise use zero state."""
zero_state = self.zero_state(batch_size, dtype=tf.float32)
controller_out = zero_state.controller_outputs
h_controller = self._controller.initial_state(batch_size)
state = RNNStateNoMem(controller_outputs=controller_out,
h_controller=h_controller)
if self._with_memory:
memory = zero_state.memory
mem_reads = zero_state.mem_reads
h_mem_writer = self._memory_writer.initial_state(batch_size)
state = RNNState(memory=memory,
mem_reads=mem_reads,
h_mem_writer=h_mem_writer,
**state._asdict())
return state
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
class Agent(snt.AbstractModule):
"""Myriad RMA agent.
`latents` here refers to a purely deterministic encoding of the inputs, rather
than VAE-like latents in e.g. the MERLIN agent.
"""
def __init__(self,
batch_size,
with_reconstructions=True,
with_memory=True,
image_code_size=500,
image_cost_weight=50.,
num_actions=None,
observation_shape=None,
entropy_cost=0.01,
return_cost_weight=0.4,
gamma=0.96,
read_strength_cost=5e-5,
read_strength_tolerance=2.,
name='rma_agent'):
super(Agent, self).__init__(name=name)
self._batch_size = batch_size
self._with_reconstructions = with_reconstructions
self._image_cost_weight = image_cost_weight
self._image_code_size = image_code_size
self._entropy_cost = entropy_cost
self._return_cost_weight = return_cost_weight
self._gamma = gamma
self._read_strength_cost = read_strength_cost
self._read_strength_tolerance = read_strength_tolerance
self._num_actions = num_actions
self._name = name
self._logged_values = {}
# Store total number of pixels across channels (for image loss scaling).
self._total_num_pixels = np.prod(observation_shape)
with self._enter_variable_scope():
# Construct image encoder/decoder.
self._image_encoder_decoder = ImageEncoderDecoder(
image_code_size=image_code_size)
self._core = _RMACore(
num_actions=self._num_actions,
with_memory=with_memory)
def initial_state(self, batch_size):
with tf.name_scope(self._name + '/initial_state'):
return AgentState(
core_state=self._core.initial_state(batch_size),
prev_action=tf.zeros(shape=(batch_size,), dtype=tf.int32))
def _prepare_observations(self, observation, last_reward, last_action):
image = observation
# Make sure the entries are in [0, 1) range.
if image.dtype.is_integer:
image = tf.cast(image, tf.float32) / 255.
if last_reward is None:
# For some envs, in the first timestep the last_reward can be None.
batch_size = observation.shape[0]
last_reward = tf.zeros((batch_size,), dtype=tf.float32)
return Observation(
image=image,
last_action=last_action,
last_reward=last_reward)
@snt.reuse_variables
def _encode(self, observation, last_reward, last_action):
inputs = self._prepare_observations(observation, last_reward, last_action)
# Encode image observation.
obs_code = self._image_encoder_decoder.encode(inputs.image)
# Encode last action.
action_code = tf.one_hot(inputs.last_action, self._num_actions)
# Encode last reward.
reward_code = tf.expand_dims(inputs.last_reward, -1)
features = tf.concat([obs_code, action_code, reward_code], axis=1)
return inputs, features
@snt.reuse_variables
def _decode(self, z):
# Decode image.
image_recon = self._image_encoder_decoder.decode(z)
# Decode action.
action_recon = snt.Linear(self._num_actions, name='action_recon_linear')(z)
# Decode reward.
reward_recon = snt.Linear(1, name='reward_recon_linear')(z)
# Full reconstructions.
recons = Observation(
image=image_recon,
last_reward=reward_recon,
last_action=action_recon)
return recons
def step(self, reward, observation, prev_state):
with tf.name_scope(self._name + '/step'):
_, features = self._encode(observation, reward, prev_state.prev_action)
core_outputs, next_core_state = self._core(
features, prev_state.core_state)
action = core_outputs.action
step_output = StepOutput(
action=action,
baseline=core_outputs.baseline,
read_info=core_outputs.read_info)
agent_state = AgentState(
core_state=next_core_state,
prev_action=action)
return step_output, agent_state
@snt.reuse_variables
def loss(self, observations, rewards, actions, additional_rewards=None):
"""Compute the loss."""
dummy_zeroth_step_actions = tf.zeros_like(actions[:1])
all_actions = tf.concat([dummy_zeroth_step_actions, actions], axis=0)
inputs, features = snt.BatchApply(self._encode)(
observations, rewards, all_actions)
rewards = rewards[1:] # Zeroth step reward not correlated to actions.
if additional_rewards is not None:
# Additional rewards are not passed to the encoder (above) in order to be
# consistent with the step, nor to the recon loss so that recons are
# consistent with the observations. Thus, additional rewards only affect
# the returns used to learn the value function.
rewards += additional_rewards
initial_state = self._core.initial_state(self._batch_size)
rnn_inputs = features
core_outputs = unroll(self._core, initial_state, rnn_inputs)
# Remove final timestep of outputs.
core_outputs = nest.map_structure(lambda t: t[:-1], core_outputs)
if self._with_reconstructions:
recons = snt.BatchApply(self._decode)(core_outputs.z)
recon_targets = nest.map_structure(lambda t: t[:-1], inputs)
recon_loss, recon_logged_values = losses.reconstruction_losses(
recons=recons,
targets=recon_targets,
image_cost=self._image_cost_weight / self._total_num_pixels,
action_cost=1.,
reward_cost=1.)
else:
recon_loss = tf.constant(0.0)
recon_logged_values = dict()
if core_outputs.read_info is not tuple():
read_reg_loss, read_reg_logged_values = (
losses.read_regularization_loss(
read_info=core_outputs.read_info,
strength_cost=self._read_strength_cost,
strength_tolerance=self._read_strength_tolerance,
strength_reg_mode='L1',
key_norm_cost=0.,
key_norm_tolerance=1.))
else:
read_reg_loss = tf.constant(0.0)
read_reg_logged_values = dict()
# Bootstrap value is at end of episode so is zero.
bootstrap_value = tf.zeros(shape=(self._batch_size,), dtype=tf.float32)
discounts = self._gamma * tf.ones_like(rewards)
a2c_loss, a2c_loss_extra = trfl.sequence_advantage_actor_critic_loss(
policy_logits=core_outputs.policy,
baseline_values=core_outputs.baseline,
actions=actions,
rewards=rewards,
pcontinues=discounts,
bootstrap_value=bootstrap_value,
lambda_=self._gamma,
entropy_cost=self._entropy_cost,
baseline_cost=self._return_cost_weight,
name='SequenceA2CLoss')
a2c_loss = tf.reduce_mean(a2c_loss) # Average over batch.
total_loss = a2c_loss + recon_loss + read_reg_loss
a2c_loss_logged_values = dict(
pg_loss=tf.reduce_mean(a2c_loss_extra.policy_gradient_loss),
baseline_loss=tf.reduce_mean(a2c_loss_extra.baseline_loss),
entropy_loss=tf.reduce_mean(a2c_loss_extra.entropy_loss))
agent_loss_log = losses.combine_logged_values(
a2c_loss_logged_values,
recon_logged_values,
read_reg_logged_values)
agent_loss_log['total_loss'] = total_loss
return total_loss, agent_loss_log
def _build(self, *args): # Unused.
# pylint: disable=no-value-for-parameter
return self.step(*args)
# pylint: enable=no-value-for-parameter
| deepmind-research-master | tvt/rma.py |
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Loss functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import tensorflow.compat.v1 as tf
def sum_time_average_batch(tensor, name=None):
"""Computes the mean over B assuming tensor is of shape [T, B]."""
tensor.get_shape().assert_has_rank(2)
return tf.reduce_mean(tf.reduce_sum(tensor, axis=0), axis=0, name=name)
def combine_logged_values(*logged_values_dicts):
"""Combine logged values dicts. Throws if there are any repeated keys."""
combined_dict = dict()
for logged_values in logged_values_dicts:
for k, v in six.iteritems(logged_values):
if k in combined_dict:
raise ValueError('Key "%s" is repeated in loss logging.' % k)
combined_dict[k] = v
return combined_dict
def reconstruction_losses(
recons,
targets,
image_cost,
action_cost,
reward_cost):
"""Reconstruction losses."""
if image_cost > 0.0:
# Neg log prob of obs image given Bernoulli(recon image) distribution.
negative_image_log_prob = tf.nn.sigmoid_cross_entropy_with_logits(
labels=targets.image, logits=recons.image)
nll_per_time = tf.reduce_sum(negative_image_log_prob, [-3, -2, -1])
image_loss = image_cost * nll_per_time
image_loss = sum_time_average_batch(image_loss)
else:
image_loss = tf.constant(0.)
if action_cost > 0.0 and recons.last_action is not tuple():
# Labels have shape (T, B), logits have shape (T, B, num_actions).
action_loss = action_cost * tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=targets.last_action, logits=recons.last_action)
action_loss = sum_time_average_batch(action_loss)
else:
action_loss = tf.constant(0.)
if reward_cost > 0.0 and recons.last_reward is not tuple():
# MSE loss for reward.
recon_last_reward = recons.last_reward
recon_last_reward = tf.squeeze(recon_last_reward, -1)
reward_loss = 0.5 * reward_cost * tf.square(
recon_last_reward - targets.last_reward)
reward_loss = sum_time_average_batch(reward_loss)
else:
reward_loss = tf.constant(0.)
total_loss = image_loss + action_loss + reward_loss
logged_values = dict(
recon_loss_image=image_loss,
recon_loss_action=action_loss,
recon_loss_reward=reward_loss,
total_reconstruction_loss=total_loss,)
return total_loss, logged_values
def read_regularization_loss(
read_info,
strength_cost,
strength_tolerance,
strength_reg_mode,
key_norm_cost,
key_norm_tolerance):
"""Computes the sum of read strength and read key regularization losses."""
if (strength_cost <= 0.) and (key_norm_cost <= 0.):
read_reg_loss = tf.constant(0.)
return read_reg_loss, dict(read_regularization_loss=read_reg_loss)
if hasattr(read_info, 'read_strengths'):
read_strengths = read_info.read_strengths
read_keys = read_info.read_keys
else:
read_strengths = read_info.strengths
read_keys = read_info.keys
if read_info == tuple():
raise ValueError('Make sure read regularization costs are zero when '
'not outputting read info.')
read_reg_loss = tf.constant(0.)
if strength_cost > 0.:
strength_hinged = tf.maximum(strength_tolerance, read_strengths)
if strength_reg_mode == 'L2':
strength_loss = 0.5 * tf.square(strength_hinged)
elif strength_reg_mode == 'L1':
# Read strengths are always positive.
strength_loss = strength_hinged
else:
raise ValueError(
'Strength regularization mode "{}" is not supported.'.format(
strength_reg_mode))
# Sum across read heads to reduce from [T, B, n_reads] to [T, B].
strength_loss = strength_cost * tf.reduce_sum(strength_loss, axis=2)
if key_norm_cost > 0.:
key_norm_norms = tf.norm(read_keys, axis=-1)
key_norm_norms_hinged = tf.maximum(key_norm_tolerance, key_norm_norms)
key_norm_loss = 0.5 * tf.square(key_norm_norms_hinged)
# Sum across read heads to reduce from [T, B, n_reads] to [T, B].
key_norm_loss = key_norm_cost * tf.reduce_sum(key_norm_loss, axis=2)
read_reg_loss += key_norm_cost * key_norm_loss
if strength_cost > 0.:
strength_loss = sum_time_average_batch(strength_loss)
else:
strength_loss = tf.constant(0.)
if key_norm_cost > 0.:
key_norm_loss = sum_time_average_batch(key_norm_loss)
else:
key_norm_loss = tf.constant(0.)
read_reg_loss = strength_loss + key_norm_loss
logged_values = dict(
read_reg_strength_loss=strength_loss,
read_reg_key_norm_loss=key_norm_loss,
total_read_reg_loss=read_reg_loss)
return read_reg_loss, logged_values
| deepmind-research-master | tvt/losses.py |
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Batched synchronous actor/learner training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import app
from absl import flags
from absl import logging
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from tvt import batch_env
from tvt import nest_utils
from tvt import rma
from tvt import tvt_rewards as tvt_module
from tvt.pycolab import env as pycolab_env
from tensorflow.contrib import framework as contrib_framework
nest = contrib_framework.nest
FLAGS = flags.FLAGS
flags.DEFINE_integer('logging_frequency', 1,
'Log training progress every logging_frequency episodes.')
flags.DEFINE_string('logdir', None, 'Directory for tensorboard logging.')
flags.DEFINE_boolean('with_memory', True,
'whether or not agent has external memory.')
flags.DEFINE_boolean('with_reconstruction', True,
'whether or not agent reconstruct the observation.')
flags.DEFINE_float('gamma', 0.92, 'Agent discount factor')
flags.DEFINE_float('entropy_cost', 0.05, 'weight of the entropy loss')
flags.DEFINE_float('image_cost_weight', 50., 'image recon cost weight.')
flags.DEFINE_float('read_strength_cost', 5e-5,
'Cost weight of the memory read strength.')
flags.DEFINE_float('read_strength_tolerance', 2.,
'The tolerance of hinge loss of the read_strength_cost.')
flags.DEFINE_boolean('do_tvt', True, 'whether or not do tvt')
flags.DEFINE_enum('pycolab_game', 'key_to_door',
['key_to_door', 'active_visual_match'],
'The name of the game in pycolab environment')
flags.DEFINE_integer('num_episodes', None,
'Number of episodes to train for. None means run forever.')
flags.DEFINE_integer('batch_size', 16, 'Batch size')
flags.DEFINE_float('learning_rate', 2e-4, 'Adam optimizer learning rate')
flags.DEFINE_float('beta1', 0., 'Adam optimizer beta1')
flags.DEFINE_float('beta2', 0.95, 'Adam optimizer beta2')
flags.DEFINE_float('epsilon', 1e-6, 'Adam optimizer epsilon')
# Pycolab-specific flags:
flags.DEFINE_integer('pycolab_num_apples', 10,
'Number of apples to sample from the distractor grid.')
flags.DEFINE_float('pycolab_apple_reward_min', 1.,
'A reward range [min, max) to uniformly sample from.')
flags.DEFINE_float('pycolab_apple_reward_max', 10.,
'A reward range [min, max) to uniformly sample from.')
flags.DEFINE_boolean('pycolab_fix_apple_reward_in_episode', True,
'Fix the sampled apple reward within an episode.')
flags.DEFINE_float('pycolab_final_reward', 10.,
'Reward obtained at the last phase.')
flags.DEFINE_boolean('pycolab_crop', True,
'Whether to crop observations or not.')
def main(_):
batch_size = FLAGS.batch_size
env_builder = pycolab_env.PycolabEnvironment
env_kwargs = {
'game': FLAGS.pycolab_game,
'num_apples': FLAGS.pycolab_num_apples,
'apple_reward': [FLAGS.pycolab_apple_reward_min,
FLAGS.pycolab_apple_reward_max],
'fix_apple_reward_in_episode': FLAGS.pycolab_fix_apple_reward_in_episode,
'final_reward': FLAGS.pycolab_final_reward,
'crop': FLAGS.pycolab_crop
}
env = batch_env.BatchEnv(batch_size, env_builder, **env_kwargs)
ep_length = env.episode_length
agent = rma.Agent(batch_size=batch_size,
num_actions=env.num_actions,
observation_shape=env.observation_shape,
with_reconstructions=FLAGS.with_reconstruction,
gamma=FLAGS.gamma,
read_strength_cost=FLAGS.read_strength_cost,
read_strength_tolerance=FLAGS.read_strength_tolerance,
entropy_cost=FLAGS.entropy_cost,
with_memory=FLAGS.with_memory,
image_cost_weight=FLAGS.image_cost_weight)
# Agent step placeholders and agent step.
batch_shape = (batch_size,)
observation_ph = tf.placeholder(
dtype=tf.uint8, shape=batch_shape + env.observation_shape, name='obs')
reward_ph = tf.placeholder(
dtype=tf.float32, shape=batch_shape, name='reward')
state_ph = nest.map_structure(
lambda s: tf.placeholder(dtype=s.dtype, shape=s.shape, name='state'),
agent.initial_state(batch_size=batch_size))
step_outputs, state = agent.step(reward_ph, observation_ph, state_ph)
# Update op placeholders and update op.
observations_ph = tf.placeholder(
dtype=tf.uint8, shape=(ep_length + 1, batch_size) + env.observation_shape,
name='observations')
rewards_ph = tf.placeholder(
dtype=tf.float32, shape=(ep_length + 1, batch_size), name='rewards')
actions_ph = tf.placeholder(
dtype=tf.int64, shape=(ep_length, batch_size), name='actions')
tvt_rewards_ph = tf.placeholder(
dtype=tf.float32, shape=(ep_length, batch_size), name='tvt_rewards')
loss, loss_logs = agent.loss(
observations_ph, rewards_ph, actions_ph, tvt_rewards_ph)
optimizer = tf.train.AdamOptimizer(
learning_rate=FLAGS.learning_rate,
beta1=FLAGS.beta1,
beta2=FLAGS.beta2,
epsilon=FLAGS.epsilon)
update_op = optimizer.minimize(loss)
initial_state = agent.initial_state(batch_size)
if FLAGS.logdir:
if not tf.io.gfile.exists(FLAGS.logdir):
tf.io.gfile.makedirs(FLAGS.logdir)
summary_writer = tf.summary.FileWriter(FLAGS.logdir)
# Do init
init_ops = (tf.global_variables_initializer(),
tf.local_variables_initializer())
tf.get_default_graph().finalize()
sess = tf.Session()
sess.run(init_ops)
run = True
ep_num = 0
prev_logging_time = time.time()
while run:
observation, reward = env.reset()
agent_state = sess.run(initial_state)
# Initialise episode data stores.
observations = [observation]
rewards = [reward]
actions = []
baselines = []
read_infos = []
for _ in range(ep_length):
step_feed = {reward_ph: reward, observation_ph: observation}
for ph, ar in zip(nest.flatten(state_ph), nest.flatten(agent_state)):
step_feed[ph] = ar
step_output, agent_state = sess.run(
(step_outputs, state), feed_dict=step_feed)
action = step_output.action
baseline = step_output.baseline
read_info = step_output.read_info
# Take step in environment, append results.
observation, reward = env.step(action)
observations.append(observation)
rewards.append(reward)
actions.append(action)
baselines.append(baseline)
if read_info is not None:
read_infos.append(read_info)
# Stack the lists of length ep_length so that each array (or each element
# of nest stucture for read_infos) has shape (ep_length, batch_size, ...).
observations = np.stack(observations)
rewards = np.array(rewards)
actions = np.array(actions)
baselines = np.array(baselines)
read_infos = nest_utils.nest_stack(read_infos)
# Compute TVT rewards.
if FLAGS.do_tvt:
tvt_rewards = tvt_module.compute_tvt_rewards(read_infos,
baselines,
gamma=FLAGS.gamma)
else:
tvt_rewards = np.squeeze(np.zeros_like(baselines))
# Run update op.
loss_feed = {observations_ph: observations,
rewards_ph: rewards,
actions_ph: actions,
tvt_rewards_ph: tvt_rewards}
ep_loss, _, ep_loss_logs = sess.run([loss, update_op, loss_logs],
feed_dict=loss_feed)
# Log episode results.
if ep_num % FLAGS.logging_frequency == 0:
steps_per_second = (
FLAGS.logging_frequency * ep_length * batch_size / (
time.time() - prev_logging_time))
mean_reward = np.mean(np.sum(rewards, axis=0))
mean_last_phase_reward = np.mean(env.last_phase_rewards())
mean_tvt_reward = np.mean(np.sum(tvt_rewards, axis=0))
logging.info('Episode %d. SPS: %s', ep_num, steps_per_second)
logging.info('Episode %d. Mean episode reward: %f', ep_num, mean_reward)
logging.info('Episode %d. Last phase reward: %f', ep_num,
mean_last_phase_reward)
logging.info('Episode %d. Mean TVT episode reward: %f', ep_num,
mean_tvt_reward)
logging.info('Episode %d. Loss: %s', ep_num, ep_loss)
logging.info('Episode %d. Loss logs: %s', ep_num, ep_loss_logs)
if FLAGS.logdir:
summary = tf.Summary()
summary.value.add(tag='reward', simple_value=mean_reward)
summary.value.add(tag='last phase reward',
simple_value=mean_last_phase_reward)
summary.value.add(tag='tvt reward', simple_value=mean_tvt_reward)
summary.value.add(tag='total loss', simple_value=ep_loss)
for k, v in ep_loss_logs.items():
summary.value.add(tag='loss - {}'.format(k), simple_value=v)
# Tensorboard x-axis is total number of episodes run.
summary_writer.add_summary(summary, ep_num * batch_size)
summary_writer.flush()
prev_logging_time = time.time()
ep_num += 1
if FLAGS.num_episodes and ep_num >= FLAGS.num_episodes:
run = False
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | tvt/main.py |
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Pycolab env."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from pycolab import rendering
from tvt.pycolab import active_visual_match
from tvt.pycolab import key_to_door
from tensorflow.contrib import framework as contrib_framework
nest = contrib_framework.nest
class PycolabEnvironment(object):
"""A simple environment adapter for pycolab games."""
def __init__(self, game,
num_apples=10,
apple_reward=1.,
fix_apple_reward_in_episode=False,
final_reward=10.,
crop=True,
default_reward=0):
"""Construct a `environment.Base` adapter that wraps a pycolab game."""
rng = np.random.RandomState()
if game == 'key_to_door':
self._game = key_to_door.Game(rng,
num_apples,
apple_reward,
fix_apple_reward_in_episode,
final_reward,
crop)
elif game == 'active_visual_match':
self._game = active_visual_match.Game(rng,
num_apples,
apple_reward,
fix_apple_reward_in_episode,
final_reward)
else:
raise ValueError('Unsupported game "%s".' % game)
self._default_reward = default_reward
self._num_actions = self._game.num_actions
# Agents expect HWC uint8 observations, Pycolab uses CHW float observations.
colours = nest.map_structure(lambda c: float(c) * 255 / 1000,
self._game.colours)
self._rgb_converter = rendering.ObservationToArray(
value_mapping=colours, permute=(1, 2, 0), dtype=np.uint8)
episode = self._game.make_episode()
observation, _, _ = episode.its_showtime()
self._image_shape = self._rgb_converter(observation).shape
def _process_outputs(self, observation, reward):
if reward is None:
reward = self._default_reward
image = self._rgb_converter(observation)
return image, reward
def reset(self):
"""Start a new episode."""
self._episode = self._game.make_episode()
observation, reward, _ = self._episode.its_showtime()
return self._process_outputs(observation, reward)
def step(self, action):
"""Take step in episode."""
observation, reward, _ = self._episode.play(action)
return self._process_outputs(observation, reward)
@property
def num_actions(self):
return self._num_actions
@property
def observation_shape(self):
return self._image_shape
@property
def episode_length(self):
return self._game.episode_length
def last_phase_reward(self):
# In Pycolab games here we only track chapter_reward for final chapter.
return float(self._episode.the_plot['chapter_reward'])
| deepmind-research-master | tvt/pycolab/env.py |
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Pycolab Game interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class AbstractGame(object):
"""Abstract base class for Pycolab games."""
@abc.abstractmethod
def __init__(self, rng, **settings):
"""Initialize the game."""
@abc.abstractproperty
def num_actions(self):
"""Number of possible actions in the game."""
@abc.abstractproperty
def colours(self):
"""Symbol to colour map for the game."""
@abc.abstractmethod
def make_episode(self):
"""Factory method for generating new episodes of the game."""
| deepmind-research-master | tvt/pycolab/game.py |
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Key to door task.
The game is split up into three phases:
1. (exploration phase) player can collect a key,
2. (distractor phase) player is collecting apples,
3. (reward phase) player can open the door and get the reward if the key is
previously collected.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pycolab import ascii_art
from pycolab import storytelling
from pycolab import things as plab_things
from tvt.pycolab import common
from tvt.pycolab import game
from tvt.pycolab import objects
COLOURS = {
'i': (1000, 1000, 1000), # Indicator.
}
EXPLORE_GRID = [
' ####### ',
' #kkkkk# ',
' #kkkkk# ',
' ## ## ',
' #+++++# ',
' #+++++# ',
' ####### '
]
REWARD_GRID = [
' ',
' ##d## ',
' # # ',
' # + # ',
' # # ',
' ##### ',
' ',
]
class KeySprite(plab_things.Sprite):
"""Sprite for the key."""
def update(self, actions, board, layers, backdrop, things, the_plot):
player_position = things[common.PLAYER].position
pick_up = self.position == player_position
if self.visible and pick_up:
# Pass information to all phases.
the_plot['has_key'] = True
self._visible = False
class DoorSprite(plab_things.Sprite):
"""Sprite for the door."""
def __init__(self, corner, position, character, pickup_reward):
super(DoorSprite, self).__init__(corner, position, character)
self._pickup_reward = pickup_reward
def update(self, actions, board, layers, backdrop, things, the_plot):
player_position = things[common.PLAYER].position
pick_up = self.position == player_position
if pick_up and the_plot.get('has_key'):
the_plot.add_reward(self._pickup_reward)
# The key is lost after the first time opening the door
# to ensure only one reward per episode.
the_plot['has_key'] = False
class PlayerSprite(common.PlayerSprite):
"""Sprite for the actor."""
def __init__(self, corner, position, character):
super(PlayerSprite, self).__init__(
corner, position, character,
impassable=common.BORDER + common.INDICATOR + common.DOOR)
def update(self, actions, board, layers, backdrop, things, the_plot):
# Allow moving through the door if key is previously collected.
if common.DOOR in self.impassable and the_plot.get('has_key'):
self._impassable.remove(common.DOOR)
super(PlayerSprite, self).update(actions, board, layers, backdrop, things,
the_plot)
class Game(game.AbstractGame):
"""Key To Door Game."""
def __init__(self,
rng,
num_apples=10,
apple_reward=(1, 10),
fix_apple_reward_in_episode=True,
final_reward=10.,
crop=True,
max_frames=common.DEFAULT_MAX_FRAMES_PER_PHASE):
del rng # Each episode is identical and colours are not randomised.
self._num_apples = num_apples
self._apple_reward = apple_reward
self._fix_apple_reward_in_episode = fix_apple_reward_in_episode
self._final_reward = final_reward
self._crop = crop
self._max_frames = max_frames
self._episode_length = sum(self._max_frames.values())
self._num_actions = common.NUM_ACTIONS
self._colours = common.FIXED_COLOURS.copy()
self._colours.update(COLOURS)
self._extra_observation_fields = ['chapter_reward_as_string']
@property
def extra_observation_fields(self):
"""The field names of extra observations."""
return self._extra_observation_fields
@property
def num_actions(self):
"""Number of possible actions in the game."""
return self._num_actions
@property
def episode_length(self):
return self._episode_length
@property
def colours(self):
"""Symbol to colour map for key to door."""
return self._colours
def _make_explore_phase(self):
# Keep only one key and one player position.
explore_grid = common.keep_n_characters_in_grid(
EXPLORE_GRID, common.KEY, 1)
explore_grid = common.keep_n_characters_in_grid(
explore_grid, common.PLAYER, 1)
return ascii_art.ascii_art_to_game(
art=explore_grid,
what_lies_beneath=' ',
sprites={
common.PLAYER: PlayerSprite,
common.KEY: KeySprite,
common.INDICATOR: ascii_art.Partial(objects.IndicatorObjectSprite,
char_to_track=common.KEY,
override_position=(0, 5)),
common.TIMER: ascii_art.Partial(common.TimerSprite,
self._max_frames['explore']),
},
update_schedule=[
common.PLAYER, common.KEY, common.INDICATOR, common.TIMER],
z_order=[common.KEY, common.INDICATOR, common.PLAYER, common.TIMER],
)
def _make_distractor_phase(self):
return common.distractor_phase(
player_sprite=PlayerSprite,
num_apples=self._num_apples,
max_frames=self._max_frames['distractor'],
apple_reward=self._apple_reward,
fix_apple_reward_in_episode=self._fix_apple_reward_in_episode)
def _make_reward_phase(self):
return ascii_art.ascii_art_to_game(
art=REWARD_GRID,
what_lies_beneath=' ',
sprites={
common.PLAYER: PlayerSprite,
common.DOOR: ascii_art.Partial(DoorSprite,
pickup_reward=self._final_reward),
common.TIMER: ascii_art.Partial(common.TimerSprite,
self._max_frames['reward'],
track_chapter_reward=True),
},
update_schedule=[common.PLAYER, common.DOOR, common.TIMER],
z_order=[common.PLAYER, common.DOOR, common.TIMER],
)
def make_episode(self):
"""Factory method for generating new episodes of the game."""
if self._crop:
croppers = common.get_cropper()
else:
croppers = None
return storytelling.Story([
self._make_explore_phase,
self._make_distractor_phase,
self._make_reward_phase,
], croppers=croppers)
| deepmind-research-master | tvt/pycolab/key_to_door.py |
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Active visual match task.
The game is split up into three phases:
1. (exploration phase) player is in one room and there's a colour in the other,
2. (distractor phase) player is collecting apples,
3. (reward phase) player sees three doors of different colours and has to select
the one of the same color as the colour in the first phase.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pycolab import ascii_art
from pycolab import storytelling
from tvt.pycolab import common
from tvt.pycolab import game
from tvt.pycolab import objects
SYMBOLS_TO_SHUFFLE = ['b', 'c', 'e']
EXPLORE_GRID = [
' ppppppp ',
' p p ',
' p p ',
' pp pp ',
' p+++++p ',
' p+++++p ',
' ppppppp '
]
REWARD_GRID = [
'###########',
'# b c e #',
'# #',
'# #',
'#### ####',
' # + # ',
' ##### '
]
class Game(game.AbstractGame):
"""Image Match Passive Game."""
def __init__(self,
rng,
num_apples=10,
apple_reward=(1, 10),
fix_apple_reward_in_episode=True,
final_reward=10.,
max_frames=common.DEFAULT_MAX_FRAMES_PER_PHASE):
self._rng = rng
self._num_apples = num_apples
self._apple_reward = apple_reward
self._fix_apple_reward_in_episode = fix_apple_reward_in_episode
self._final_reward = final_reward
self._max_frames = max_frames
self._episode_length = sum(self._max_frames.values())
self._num_actions = common.NUM_ACTIONS
self._colours = common.FIXED_COLOURS.copy()
self._colours.update(
common.get_shuffled_symbol_colour_map(rng, SYMBOLS_TO_SHUFFLE))
self._extra_observation_fields = ['chapter_reward_as_string']
@property
def extra_observation_fields(self):
"""The field names of extra observations."""
return self._extra_observation_fields
@property
def num_actions(self):
"""Number of possible actions in the game."""
return self._num_actions
@property
def episode_length(self):
return self._episode_length
@property
def colours(self):
"""Symbol to colour map for key to door."""
return self._colours
def _make_explore_phase(self, target_char):
# Keep only one coloured position and one player position.
grid = common.keep_n_characters_in_grid(EXPLORE_GRID, 'p', 1, common.BORDER)
grid = common.keep_n_characters_in_grid(grid, 'p', 0, target_char)
grid = common.keep_n_characters_in_grid(grid, common.PLAYER, 1)
return ascii_art.ascii_art_to_game(
grid,
what_lies_beneath=' ',
sprites={
common.PLAYER:
ascii_art.Partial(
common.PlayerSprite,
impassable=common.BORDER + target_char),
target_char:
objects.ObjectSprite,
common.TIMER:
ascii_art.Partial(common.TimerSprite,
self._max_frames['explore']),
},
update_schedule=[common.PLAYER, target_char, common.TIMER],
z_order=[target_char, common.PLAYER, common.TIMER],
)
def _make_distractor_phase(self):
return common.distractor_phase(
player_sprite=common.PlayerSprite,
num_apples=self._num_apples,
max_frames=self._max_frames['distractor'],
apple_reward=self._apple_reward,
fix_apple_reward_in_episode=self._fix_apple_reward_in_episode)
def _make_reward_phase(self, target_char):
return ascii_art.ascii_art_to_game(
REWARD_GRID,
what_lies_beneath=' ',
sprites={
common.PLAYER: common.PlayerSprite,
'b': objects.ObjectSprite,
'c': objects.ObjectSprite,
'e': objects.ObjectSprite,
common.TIMER: ascii_art.Partial(common.TimerSprite,
self._max_frames['reward'],
track_chapter_reward=True),
target_char: ascii_art.Partial(objects.ObjectSprite,
reward=self._final_reward),
},
update_schedule=[common.PLAYER, 'b', 'c', 'e', common.TIMER],
z_order=[common.PLAYER, 'b', 'c', 'e', common.TIMER],
)
def make_episode(self):
"""Factory method for generating new episodes of the game."""
target_char = self._rng.choice(SYMBOLS_TO_SHUFFLE)
return storytelling.Story([
lambda: self._make_explore_phase(target_char),
self._make_distractor_phase,
lambda: self._make_reward_phase(target_char),
], croppers=common.get_cropper())
| deepmind-research-master | tvt/pycolab/active_visual_match.py |
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Common utilities for Pycolab games."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import colorsys
import numpy as np
from pycolab import ascii_art
from pycolab import cropping
from pycolab import things as plab_things
from pycolab.prefab_parts import sprites as prefab_sprites
from six.moves import zip
from tensorflow.contrib import framework as contrib_framework
nest = contrib_framework.nest
# Actions.
# Those with a negative ID are not allowed for the agent.
ACTION_QUIT = -2
ACTION_DELAY = -1
ACTION_NORTH = 0
ACTION_SOUTH = 1
ACTION_WEST = 2
ACTION_EAST = 3
NUM_ACTIONS = 4
DEFAULT_MAX_FRAMES_PER_PHASE = {
'explore': 15,
'distractor': 90,
'reward': 15
}
# Reserved symbols.
PLAYER = '+'
BORDER = '#'
BACKGROUND = ' '
KEY = 'k'
DOOR = 'd'
APPLE = 'a'
TIMER = 't'
INDICATOR = 'i'
FIXED_COLOURS = {
PLAYER: (898, 584, 430),
BORDER: (100, 100, 100),
BACKGROUND: (800, 800, 800),
KEY: (627, 321, 176),
DOOR: (529, 808, 922),
APPLE: (550, 700, 0),
}
APPLE_DISTRACTOR_GRID = [
'###########',
'#a a a a a#',
'# a a a a #',
'#a a a a a#',
'# a a a a #',
'#a a + a a#',
'###########'
]
DEFAULT_APPLE_RESPAWN_TIME = 20
DEFAULT_APPLE_REWARD = 1.
def get_shuffled_symbol_colour_map(rng_or_seed, symbols,
num_potential_colours=None):
"""Get a randomized mapping between symbols and colours.
Args:
rng_or_seed: A random state or random seed.
symbols: List of symbols.
num_potential_colours: Number of equally spaced colours to choose from.
Defaults to number of symbols. Colours are generated deterministically.
Returns:
Randomized mapping between symbols and colours.
"""
num_symbols = len(symbols)
num_potential_colours = num_potential_colours or num_symbols
if isinstance(rng_or_seed, np.random.RandomState):
rng = rng_or_seed
else:
rng = np.random.RandomState(rng_or_seed)
# Generate a range of colours.
step = 1. / num_potential_colours
hues = np.arange(0, num_potential_colours) * step
potential_colours = [colorsys.hsv_to_rgb(h, 1.0, 1.0) for h in hues]
# Randomly draw num_symbols colours without replacement.
rng.shuffle(potential_colours)
colours = potential_colours[:num_symbols]
symbol_to_colour_map = dict(list(zip(symbols, colours)))
# Multiply each colour value by 1000.
return nest.map_structure(lambda c: int(c * 1000), symbol_to_colour_map)
def get_cropper():
return cropping.ScrollingCropper(
rows=5,
cols=5,
to_track=PLAYER,
pad_char=BACKGROUND,
scroll_margins=(2, 2))
def distractor_phase(player_sprite, num_apples, max_frames,
apple_reward=DEFAULT_APPLE_REWARD,
fix_apple_reward_in_episode=False,
respawn_every=DEFAULT_APPLE_RESPAWN_TIME):
"""Distractor phase engine factory.
Args:
player_sprite: Player sprite class.
num_apples: Number of apples to sample from the apple distractor grid.
max_frames: Maximum duration of the distractor phase in frames.
apple_reward: Can either be a scalar specifying the reward or a reward range
[min, max), given as a list or tuple, to uniformly sample from.
fix_apple_reward_in_episode: The apple reward is constant throughout each
episode.
respawn_every: respawn frequency of apples.
Returns:
Distractor phase engine.
"""
distractor_grid = keep_n_characters_in_grid(APPLE_DISTRACTOR_GRID, APPLE,
num_apples)
engine = ascii_art.ascii_art_to_game(
distractor_grid,
what_lies_beneath=BACKGROUND,
sprites={
PLAYER: player_sprite,
TIMER: ascii_art.Partial(TimerSprite, max_frames),
},
drapes={
APPLE: ascii_art.Partial(
AppleDrape,
reward=apple_reward,
fix_apple_reward_in_episode=fix_apple_reward_in_episode,
respawn_every=respawn_every)
},
update_schedule=[PLAYER, APPLE, TIMER],
z_order=[APPLE, PLAYER, TIMER],
)
return engine
def replace_grid_symbols(grid, old_to_new_map):
"""Replaces symbols in the grid.
If mapping is not defined the symbol is not updated.
Args:
grid: Represented as a list of strings.
old_to_new_map: Mapping between symbols.
Returns:
Updated grid.
"""
def symbol_map(x):
if x in old_to_new_map:
return old_to_new_map[x]
return x
new_grid = []
for row in grid:
new_grid.append(''.join(symbol_map(i) for i in row))
return new_grid
def keep_n_characters_in_grid(grid, character, n, backdrop_char=BACKGROUND):
"""Keeps only a sample of characters `character` in the grid."""
np_grid = np.array([list(i) for i in grid])
char_positions = np.argwhere(np_grid == character)
# Randomly select parts to remove.
num_empty_positions = char_positions.shape[0] - n
if num_empty_positions < 0:
raise ValueError('Not enough characters `{}` in grid.'.format(character))
empty_pos = np.random.permutation(char_positions)[:num_empty_positions]
# Remove characters.
grid = [list(row) for row in grid]
for (i, j) in empty_pos:
grid[i][j] = backdrop_char
return [''.join(row) for row in grid]
class PlayerSprite(prefab_sprites.MazeWalker):
"""Sprite for the actor."""
def __init__(self, corner, position, character, impassable=BORDER):
super(PlayerSprite, self).__init__(
corner, position, character, impassable=impassable,
confined_to_board=True)
def update(self, actions, board, layers, backdrop, things, the_plot):
the_plot.add_reward(0.)
if actions == ACTION_QUIT:
the_plot.next_chapter = None
the_plot.terminate_episode()
if actions == ACTION_WEST:
self._west(board, the_plot)
elif actions == ACTION_EAST:
self._east(board, the_plot)
elif actions == ACTION_NORTH:
self._north(board, the_plot)
elif actions == ACTION_SOUTH:
self._south(board, the_plot)
class AppleDrape(plab_things.Drape):
"""Drape for the apples used in the distractor phase."""
def __init__(self,
curtain,
character,
respawn_every,
reward,
fix_apple_reward_in_episode):
"""Constructor.
Args:
curtain: Array specifying locations of apples. Obtained from ascii grid.
character: Character representing the drape.
respawn_every: respawn frequency of apples.
reward: Can either be a scalar specifying the reward or a reward range
[min, max), given as a list or tuple, to uniformly sample from.
fix_apple_reward_in_episode: If set to True, then only sample the apple's
reward once in the episode and then fix the value.
"""
super(AppleDrape, self).__init__(curtain, character)
self._respawn_every = respawn_every
if not isinstance(reward, (list, tuple)):
# Assuming scalar.
self._reward = [reward, reward]
else:
if len(reward) != 2:
raise ValueError('Reward must be a scalar or a two element list/tuple.')
self._reward = reward
self._fix_apple_reward_in_episode = fix_apple_reward_in_episode
# Grid specifying for each apple the last frame it was picked up.
# Initialized to inifinity for cells with apples and -1 for cells without.
self._last_pickup = np.where(curtain,
np.inf * np.ones_like(curtain),
-1. * np.ones_like(curtain))
def update(self, actions, board, layers, backdrop, things, the_plot):
player_position = things[PLAYER].position
# decide the apple_reward
if (self._fix_apple_reward_in_episode and
not the_plot.get('sampled_apple_reward', None)):
the_plot['sampled_apple_reward'] = np.random.choice((self._reward[0],
self._reward[1]))
if self.curtain[player_position]:
self._last_pickup[player_position] = the_plot.frame
self.curtain[player_position] = False
if not self._fix_apple_reward_in_episode:
the_plot.add_reward(np.random.uniform(*self._reward))
else:
the_plot.add_reward(the_plot['sampled_apple_reward'])
if self._respawn_every:
respawn_cond = the_plot.frame > self._last_pickup + self._respawn_every
respawn_cond &= self._last_pickup >= 0
self.curtain[respawn_cond] = True
class TimerSprite(plab_things.Sprite):
"""Sprite for the timer.
The timer is in charge of stopping the current chapter. Timer sprite should be
placed last in the update order to make sure everything is updated before the
chapter terminates.
"""
def __init__(self, corner, position, character, max_frames,
track_chapter_reward=False):
super(TimerSprite, self).__init__(corner, position, character)
if not isinstance(max_frames, int):
raise ValueError('max_frames must be of type integer.')
self._max_frames = max_frames
self._visible = False
self._track_chapter_reward = track_chapter_reward
self._total_chapter_reward = 0.
def update(self, actions, board, layers, backdrop, things, the_plot):
directives = the_plot._get_engine_directives() # pylint: disable=protected-access
if self._track_chapter_reward:
self._total_chapter_reward += directives.summed_reward or 0.
# Every chapter starts at frame = 0.
if the_plot.frame >= self._max_frames or directives.game_over:
# Calculate the reward obtained in this phase and send it through the
# extra observations channel.
if self._track_chapter_reward:
the_plot['chapter_reward'] = self._total_chapter_reward
the_plot.terminate_episode()
| deepmind-research-master | tvt/pycolab/common.py |
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Pycolab human player."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import curses
from absl import app
from absl import flags
import numpy as np
from pycolab import human_ui
from tvt.pycolab import active_visual_match
from tvt.pycolab import common
from tvt.pycolab import key_to_door
FLAGS = flags.FLAGS
flags.DEFINE_enum('game', 'key_to_door',
['key_to_door', 'active_visual_match'],
'The name of the game')
def main(unused_argv):
rng = np.random.RandomState()
if FLAGS.game == 'key_to_door':
game = key_to_door.Game(rng)
elif FLAGS.game == 'active_visual_match':
game = active_visual_match.Game(rng)
else:
raise ValueError('Unsupported game "%s".' % FLAGS.game)
episode = game.make_episode()
ui = human_ui.CursesUi(
keys_to_actions={
curses.KEY_UP: common.ACTION_NORTH,
curses.KEY_DOWN: common.ACTION_SOUTH,
curses.KEY_LEFT: common.ACTION_WEST,
curses.KEY_RIGHT: common.ACTION_EAST,
-1: common.ACTION_DELAY,
'q': common.ACTION_QUIT,
'Q': common.ACTION_QUIT},
delay=-1,
colour_fg=game.colours
)
ui.play(episode)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | tvt/pycolab/human_player.py |
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Pycolab sprites."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pycolab import things as plab_things
from pycolab.prefab_parts import sprites as prefab_sprites
import six
from tvt.pycolab import common
class PlayerSprite(prefab_sprites.MazeWalker):
"""Sprite representing the agent."""
def __init__(self, corner, position, character,
max_steps_per_act, moving_player):
"""Indicates to the superclass that we can't walk off the board."""
super(PlayerSprite, self).__init__(
corner, position, character, impassable=[common.BORDER],
confined_to_board=True)
self._moving_player = moving_player
self._max_steps_per_act = max_steps_per_act
self._num_steps = 0
def update(self, actions, board, layers, backdrop, things, the_plot):
del backdrop # Unused.
if actions is not None:
assert actions in common.ACTIONS
the_plot.log("Step {} | Action {}".format(self._num_steps, actions))
the_plot.add_reward(0.0)
self._num_steps += 1
if actions == common.ACTION_QUIT:
the_plot.terminate_episode()
if self._moving_player:
if actions == common.ACTION_WEST:
self._west(board, the_plot)
elif actions == common.ACTION_EAST:
self._east(board, the_plot)
elif actions == common.ACTION_NORTH:
self._north(board, the_plot)
elif actions == common.ACTION_SOUTH:
self._south(board, the_plot)
if self._max_steps_per_act == self._num_steps:
the_plot.terminate_episode()
class ObjectSprite(plab_things.Sprite):
"""Sprite for a generic object which can be collectable."""
def __init__(self, corner, position, character, reward=0., collectable=True,
terminate=True):
super(ObjectSprite, self).__init__(corner, position, character)
self._reward = reward # Reward on pickup.
self._collectable = collectable
def set_visibility(self, visible):
self._visible = visible
def update(self, actions, board, layers, backdrop, things, the_plot):
player_position = things[common.PLAYER].position
pick_up = self.position == player_position
if pick_up and self.visible:
the_plot.add_reward(self._reward)
if self._collectable:
self.set_visibility(False)
# set all other objects to be invisible
for v in six.itervalues(things):
if isinstance(v, ObjectSprite):
v.set_visibility(False)
class IndicatorObjectSprite(plab_things.Sprite):
"""Sprite for the indicator object.
The indicator object is an object that spawns at a designated position once
the player picks up an object defined by the `char_to_track` argument.
The indicator object is spawned for just a single frame.
"""
def __init__(self, corner, position, character, char_to_track,
override_position=None):
super(IndicatorObjectSprite, self).__init__(corner, position, character)
if override_position is not None:
self._position = override_position
self._char_to_track = char_to_track
self._visible = False
self._pickup_frame = None
def update(self, actions, board, layers, backdrop, things, the_plot):
player_position = things[common.PLAYER].position
pick_up = things[self._char_to_track].position == player_position
if self._pickup_frame:
self._visible = False
if pick_up and not self._pickup_frame:
self._visible = True
self._pickup_frame = the_plot.frame
| deepmind-research-master | tvt/pycolab/objects.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Two dimensional convolutional neural net layers."""
from absl import logging
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
def weight_variable(shape, stddev=0.01):
"""Returns the weight variable."""
logging.vlog(1, 'weight init for shape %s', str(shape))
return tf.get_variable(
'w', shape, initializer=tf.random_normal_initializer(stddev=stddev))
def bias_variable(shape):
return tf.get_variable(
'b', shape, initializer=tf.zeros_initializer())
def conv2d(x, w, atrou_rate=1, data_format='NHWC'):
if atrou_rate > 1:
return tf.nn.convolution(
x,
w,
dilation_rate=[atrou_rate] * 2,
padding='SAME',
data_format=data_format)
else:
return tf.nn.conv2d(
x, w, strides=[1, 1, 1, 1], padding='SAME', data_format=data_format)
def make_conv_sep2d_layer(input_node,
in_channels,
channel_multiplier,
out_channels,
layer_name,
filter_size,
filter_size_2=None,
batch_norm=False,
is_training=True,
atrou_rate=1,
data_format='NHWC',
stddev=0.01):
"""Use separable convolutions."""
if filter_size_2 is None:
filter_size_2 = filter_size
logging.vlog(1, 'layer %s in %d out %d chan mult %d', layer_name, in_channels,
out_channels, channel_multiplier)
with tf.variable_scope(layer_name):
with tf.variable_scope('depthwise'):
w_depthwise = weight_variable(
[filter_size, filter_size_2, in_channels, channel_multiplier],
stddev=stddev)
with tf.variable_scope('pointwise'):
w_pointwise = weight_variable(
[1, 1, in_channels * channel_multiplier, out_channels], stddev=stddev)
h_conv = tf.nn.separable_conv2d(
input_node,
w_depthwise,
w_pointwise,
padding='SAME',
strides=[1, 1, 1, 1],
rate=[atrou_rate, atrou_rate],
data_format=data_format)
if batch_norm:
h_conv = batch_norm_layer(
h_conv, layer_name=layer_name, is_training=is_training,
data_format=data_format)
else:
b_conv = bias_variable([out_channels])
h_conv = tf.nn.bias_add(h_conv, b_conv, data_format=data_format)
return h_conv
def batch_norm_layer(h_conv, layer_name, is_training=True, data_format='NCHW'):
"""Batch norm layer."""
logging.vlog(1, 'batch norm for layer %s', layer_name)
return tf.contrib.layers.batch_norm(
h_conv,
is_training=is_training,
fused=True,
decay=0.999,
scope=layer_name,
data_format=data_format)
def make_conv_layer(input_node,
in_channels,
out_channels,
layer_name,
filter_size,
filter_size_2=None,
non_linearity=True,
batch_norm=False,
is_training=True,
atrou_rate=1,
data_format='NHWC',
stddev=0.01):
"""Creates a convolution layer."""
if filter_size_2 is None:
filter_size_2 = filter_size
logging.vlog(
1, 'layer %s in %d out %d', layer_name, in_channels, out_channels)
with tf.variable_scope(layer_name):
w_conv = weight_variable(
[filter_size, filter_size_2, in_channels, out_channels], stddev=stddev)
h_conv = conv2d(
input_node, w_conv, atrou_rate=atrou_rate, data_format=data_format)
if batch_norm:
h_conv = batch_norm_layer(
h_conv, layer_name=layer_name, is_training=is_training,
data_format=data_format)
else:
b_conv = bias_variable([out_channels])
h_conv = tf.nn.bias_add(h_conv, b_conv, data_format=data_format)
if non_linearity:
h_conv = tf.nn.elu(h_conv)
return h_conv
| deepmind-research-master | alphafold_casp13/two_dim_convnet.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code to run distogram inference."""
import collections
import os
import time
from absl import app
from absl import flags
from absl import logging
import numpy as np
import six
import sonnet as snt
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from alphafold_casp13 import config_dict
from alphafold_casp13 import contacts_experiment
from alphafold_casp13 import distogram_io
from alphafold_casp13 import secstruct
flags.DEFINE_string('config_path', None, 'Path of the JSON config file.')
flags.DEFINE_string('checkpoint_path', None, 'Checkpoint path for evaluation.')
flags.DEFINE_boolean('cpu', False, 'Force onto CPU.')
flags.DEFINE_string('output_path', None,
'Base path where all output files will be saved to.')
flags.DEFINE_string('eval_sstable', None,
'Path of the SSTable to read the input tf.Examples from.')
flags.DEFINE_string('stats_file', None,
'Path of the statistics file to use for normalization.')
FLAGS = flags.FLAGS
# A named tuple to store the outputs of a single prediction run.
Prediction = collections.namedtuple(
'Prediction', [
'single_message', # A debugging message.
'num_crops_local', # The number of crops used to make this prediction.
'sequence', # The amino acid sequence.
'filebase', # The chain name. All output files will use this name.
'softmax_probs', # Softmax of the distogram.
'ss', # Secondary structure prediction.
'asa', # ASA prediction.
'torsions', # Torsion prediction.
])
def evaluate(crop_size_x, crop_size_y, feature_normalization, checkpoint_path,
normalization_exclusion, eval_config, network_config):
"""Main evaluation loop."""
experiment = contacts_experiment.Contacts(
tfrecord=eval_config.eval_sstable,
stats_file=eval_config.stats_file,
network_config=network_config,
crop_size_x=crop_size_x,
crop_size_y=crop_size_y,
feature_normalization=feature_normalization,
normalization_exclusion=normalization_exclusion)
checkpoint = snt.get_saver(experiment.model, collections=[
tf.GraphKeys.GLOBAL_VARIABLES,
tf.GraphKeys.MOVING_AVERAGE_VARIABLES])
with tf.train.SingularMonitoredSession(hooks=[]) as sess:
logging.info('Restoring from checkpoint %s', checkpoint_path)
checkpoint.restore(sess, checkpoint_path)
logging.info('Writing output to %s', eval_config.output_path)
eval_begin_time = time.time()
_run_evaluation(sess=sess,
experiment=experiment,
eval_config=eval_config,
output_dir=eval_config.output_path,
min_range=network_config.min_range,
max_range=network_config.max_range,
num_bins=network_config.num_bins,
torsion_bins=network_config.torsion_bins)
logging.info('Finished eval %.1fs', (time.time() - eval_begin_time))
def _run_evaluation(
sess, experiment, eval_config, output_dir, min_range, max_range, num_bins,
torsion_bins):
"""Evaluate a contact map by aggregating crops.
Args:
sess: A tf.train.Session.
experiment: An experiment class.
eval_config: A config dict of eval parameters.
output_dir: Directory to save the predictions to.
min_range: The minimum range in Angstroms to consider in distograms.
max_range: The maximum range in Angstroms to consider in distograms, see
num_bins below for clarification.
num_bins: The number of bins in the distance histogram being predicted.
We divide the min_range--(min_range + max_range) Angstrom range into this
many bins.
torsion_bins: The number of bins the torsion angles are discretised into.
"""
tf.io.gfile.makedirs(os.path.join(output_dir, 'pickle_files'))
logging.info('Eval config is %s\nnum_bins: %d', eval_config, num_bins)
num_examples = 0
num_crops = 0
start_all_time = time.time()
# Either do the whole test set, or up to a specified limit.
max_examples = experiment.num_eval_examples
if eval_config.max_num_examples > 0:
max_examples = min(max_examples, eval_config.max_num_examples)
while num_examples < max_examples:
one_prediction = compute_one_prediction(
num_examples, experiment, sess, eval_config, num_bins, torsion_bins)
single_message = one_prediction.single_message
num_crops_local = one_prediction.num_crops_local
sequence = one_prediction.sequence
filebase = one_prediction.filebase
softmax_probs = one_prediction.softmax_probs
ss = one_prediction.ss
asa = one_prediction.asa
torsions = one_prediction.torsions
num_examples += 1
num_crops += num_crops_local
# Save the output files.
filename = os.path.join(output_dir,
'pickle_files', '%s.pickle' % filebase)
distogram_io.save_distance_histogram(
filename, softmax_probs, filebase, sequence,
min_range=min_range, max_range=max_range, num_bins=num_bins)
if experiment.model.torsion_multiplier > 0:
torsions_dir = os.path.join(output_dir, 'torsions')
tf.io.gfile.makedirs(torsions_dir)
distogram_io.save_torsions(torsions_dir, filebase, sequence, torsions)
if experiment.model.secstruct_multiplier > 0:
ss_dir = os.path.join(output_dir, 'secstruct')
tf.io.gfile.makedirs(ss_dir)
secstruct.save_secstructs(ss_dir, filebase, None, sequence, ss)
if experiment.model.asa_multiplier > 0:
asa_dir = os.path.join(output_dir, 'asa')
tf.io.gfile.makedirs(asa_dir)
secstruct.save_secstructs(asa_dir, filebase, None, sequence,
np.expand_dims(asa, 1), label='Deepmind 2D ASA')
time_spent = time.time() - start_all_time
logging.info(
'Evaluate %d examples, %d crops %.1f crops/ex. '
'Took %.1fs, %.3f s/example %.3f crops/s\n%s',
num_examples, num_crops, num_crops / float(num_examples), time_spent,
time_spent / num_examples, num_crops / time_spent, single_message)
logging.info('Tested on %d', num_examples)
def compute_one_prediction(
num_examples, experiment, sess, eval_config, num_bins, torsion_bins):
"""Find the contact map for a single domain."""
num_crops_local = 0
debug_steps = 0
start = time.time()
output_fetches = {'probs': experiment.eval_probs}
output_fetches['softmax_probs'] = experiment.eval_probs_softmax
# Add the auxiliary outputs if present.
experiment.model.update_crop_fetches(output_fetches)
# Get data.
batch = experiment.get_one_example(sess)
length = batch['sequence_lengths'][0]
batch_size = batch['sequence_lengths'].shape[0]
domain = batch['domain_name'][0][0].decode('utf-8')
chain = batch['chain_name'][0][0].decode('utf-8')
filebase = domain or chain
sequence = six.ensure_str(batch['sequences'][0][0])
logging.info('SepWorking on %d %s %s %d', num_examples, domain, chain, length)
inputs_1d = batch['inputs_1d']
if 'residue_index' in batch:
logging.info('Getting residue_index from features')
residue_index = np.squeeze(
batch['residue_index'], axis=2).astype(np.int32)
else:
logging.info('Generating residue_index')
residue_index = np.tile(np.expand_dims(
np.arange(length, dtype=np.int32), 0), [batch_size, 1])
assert batch_size == 1
num_examples += batch_size
# Crops.
prob_accum = np.zeros((length, length, 2))
ss_accum = np.zeros((length, 8))
torsions_accum = np.zeros((length, torsion_bins**2))
asa_accum = np.zeros((length,))
weights_1d_accum = np.zeros((length,))
softmax_prob_accum = np.zeros((length, length, num_bins), dtype=np.float32)
crop_size_x = experiment.crop_size_x
crop_step_x = crop_size_x // eval_config.crop_shingle_x
crop_size_y = experiment.crop_size_y
crop_step_y = crop_size_y // eval_config.crop_shingle_y
prob_weights = 1
if eval_config.pyramid_weights > 0:
sx = np.expand_dims(np.linspace(1.0 / crop_size_x, 1, crop_size_x), 1)
sy = np.expand_dims(np.linspace(1.0 / crop_size_y, 1, crop_size_y), 0)
prob_weights = np.minimum(np.minimum(sx, np.flipud(sx)),
np.minimum(sy, np.fliplr(sy)))
prob_weights /= np.max(prob_weights)
prob_weights = np.minimum(prob_weights, eval_config.pyramid_weights)
logging.log_first_n(logging.INFO, 'Crop: %dx%d step %d,%d pyr %.2f',
debug_steps,
crop_size_x, crop_size_y,
crop_step_x, crop_step_y, eval_config.pyramid_weights)
# Accumulate all crops, starting and ending half off the square.
for i in range(-crop_size_x // 2, length - crop_size_x // 2, crop_step_x):
for j in range(-crop_size_y // 2, length - crop_size_y // 2, crop_step_y):
# The ideal crop.
patch = compute_one_patch(
sess, experiment, output_fetches, inputs_1d, residue_index,
prob_weights, batch, length, i, j, crop_size_x, crop_size_y)
# Assemble the crops into a final complete prediction.
ic = max(0, i)
jc = max(0, j)
ic_to = ic + patch['prob'].shape[1]
jc_to = jc + patch['prob'].shape[0]
prob_accum[jc:jc_to, ic:ic_to, 0] += patch['prob'] * patch['weight']
prob_accum[jc:jc_to, ic:ic_to, 1] += patch['weight']
softmax_prob_accum[jc:jc_to, ic:ic_to, :] += (
patch['softmax'] * np.expand_dims(patch['weight'], 2))
weights_1d_accum[jc:jc_to] += 1
weights_1d_accum[ic:ic_to] += 1
if 'asa_x' in patch:
asa_accum[ic:ic + patch['asa_x'].shape[0]] += np.squeeze(
patch['asa_x'], axis=1)
asa_accum[jc:jc + patch['asa_y'].shape[0]] += np.squeeze(
patch['asa_y'], axis=1)
if 'ss_x' in patch:
ss_accum[ic:ic + patch['ss_x'].shape[0]] += patch['ss_x']
ss_accum[jc:jc + patch['ss_y'].shape[0]] += patch['ss_y']
if 'torsions_x' in patch:
torsions_accum[
ic:ic + patch['torsions_x'].shape[0]] += patch['torsions_x']
torsions_accum[
jc:jc + patch['torsions_y'].shape[0]] += patch['torsions_y']
num_crops_local += 1
single_message = (
'Constructed %s len %d from %d chunks [%d, %d x %d, %d] '
'in %5.1fs' % (
filebase, length, num_crops_local,
crop_size_x, crop_step_x, crop_size_y, crop_step_y,
time.time() - start))
logging.info(single_message)
logging.info('prob_accum[:, :, 1]: %s', prob_accum[:, :, 1])
assert (prob_accum[:, :, 1] > 0.0).all()
probs = prob_accum[:, :, 0] / prob_accum[:, :, 1]
softmax_probs = softmax_prob_accum[:, :, :] / prob_accum[:, :, 1:2]
asa_accum /= weights_1d_accum
ss_accum /= np.expand_dims(weights_1d_accum, 1)
torsions_accum /= np.expand_dims(weights_1d_accum, 1)
# The probs are symmetrical.
probs = (probs + probs.transpose()) / 2
if num_bins > 1:
softmax_probs = (softmax_probs + np.transpose(
softmax_probs, axes=[1, 0, 2])) / 2
return Prediction(
single_message=single_message,
num_crops_local=num_crops_local,
sequence=sequence,
filebase=filebase,
softmax_probs=softmax_probs,
ss=ss_accum,
asa=asa_accum,
torsions=torsions_accum)
def compute_one_patch(sess, experiment, output_fetches, inputs_1d,
residue_index, prob_weights, batch, length, i, j,
crop_size_x, crop_size_y):
"""Compute the output predictions for a single crop."""
# Note that these are allowed to go off the end of the protein.
end_x = i + crop_size_x
end_y = j + crop_size_y
crop_limits = np.array([[i, end_x, j, end_y]], dtype=np.int32)
ic = max(0, i)
jc = max(0, j)
end_x_cropped = min(length, end_x)
end_y_cropped = min(length, end_y)
prepad_x = max(0, -i)
prepad_y = max(0, -j)
postpad_x = end_x - end_x_cropped
postpad_y = end_y - end_y_cropped
# Precrop the 2D features:
inputs_2d = np.pad(batch['inputs_2d'][
:, jc:end_y, ic:end_x, :],
[[0, 0],
[prepad_y, postpad_y],
[prepad_x, postpad_x],
[0, 0]], mode='constant')
assert inputs_2d.shape[1] == crop_size_y
assert inputs_2d.shape[2] == crop_size_x
# Generate the corresponding crop, but it might be truncated.
cxx = batch['inputs_2d'][:, ic:end_x, ic:end_x, :]
cyy = batch['inputs_2d'][:, jc:end_y, jc:end_y, :]
if cxx.shape[1] < inputs_2d.shape[1]:
cxx = np.pad(cxx, [[0, 0],
[prepad_x, max(0, i + crop_size_y - length)],
[prepad_x, postpad_x],
[0, 0]], mode='constant')
assert cxx.shape[1] == crop_size_y
assert cxx.shape[2] == crop_size_x
if cyy.shape[2] < inputs_2d.shape[2]:
cyy = np.pad(cyy, [[0, 0],
[prepad_y, postpad_y],
[prepad_y, max(0, j + crop_size_x - length)],
[0, 0]], mode='constant')
assert cyy.shape[1] == crop_size_y
assert cyy.shape[2] == crop_size_x
inputs_2d = np.concatenate([inputs_2d, cxx, cyy], 3)
output_results = sess.run(output_fetches, feed_dict={
experiment.inputs_1d_placeholder: inputs_1d,
experiment.residue_index_placeholder: residue_index,
experiment.inputs_2d_placeholder: inputs_2d,
experiment.crop_placeholder: crop_limits,
})
# Crop out the "live" region of the probs.
prob_patch = output_results['probs'][
0, prepad_y:crop_size_y - postpad_y,
prepad_x:crop_size_x - postpad_x]
weight_patch = prob_weights[prepad_y:crop_size_y - postpad_y,
prepad_x:crop_size_x - postpad_x]
patch = {'prob': prob_patch, 'weight': weight_patch}
if 'softmax_probs' in output_results:
patch['softmax'] = output_results['softmax_probs'][
0, prepad_y:crop_size_y - postpad_y,
prepad_x:crop_size_x - postpad_x]
if 'secstruct_probs' in output_results:
patch['ss_x'] = output_results['secstruct_probs'][
0, prepad_x:crop_size_x - postpad_x]
patch['ss_y'] = output_results['secstruct_probs'][
0, crop_size_x + prepad_y:crop_size_x + crop_size_y - postpad_y]
if 'torsion_probs' in output_results:
patch['torsions_x'] = output_results['torsion_probs'][
0, prepad_x:crop_size_x - postpad_x]
patch['torsions_y'] = output_results['torsion_probs'][
0, crop_size_x + prepad_y:crop_size_x + crop_size_y - postpad_y]
if 'asa_output' in output_results:
patch['asa_x'] = output_results['asa_output'][
0, prepad_x:crop_size_x - postpad_x]
patch['asa_y'] = output_results['asa_output'][
0, crop_size_x + prepad_y:crop_size_x + crop_size_y - postpad_y]
return patch
def main(argv):
del argv # Unused.
logging.info('Loading a JSON config from: %s', FLAGS.config_path)
with tf.io.gfile.GFile(FLAGS.config_path, 'r') as f:
config = config_dict.ConfigDict.from_json(f.read())
# Redefine the relevant output fields.
if FLAGS.eval_sstable:
config.eval_config.eval_sstable = FLAGS.eval_sstable
if FLAGS.stats_file:
config.eval_config.stats_file = FLAGS.stats_file
if FLAGS.output_path:
config.eval_config.output_path = FLAGS.output_path
with tf.device('/cpu:0' if FLAGS.cpu else None):
evaluate(checkpoint_path=FLAGS.checkpoint_path, **config)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | alphafold_casp13/contacts.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layer for modelling and scoring secondary structure."""
import os
from absl import logging
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
# 8-class classes (Q8)
SECONDARY_STRUCTURES = '-HETSGBI'
# Equivalence classes for 3-class (Q3) from Li & Yu 2016.
# See http://www.cmbi.ru.nl/dssp.html for letter explanations.
Q3_MAP = ['-TSGIB', 'H', 'E']
def make_q3_matrices():
"""Generate mapping matrices for secstruct Q8:Q3 equivalence classes."""
dimension = len(SECONDARY_STRUCTURES)
q3_map_matrix = np.zeros((dimension, len(Q3_MAP)))
q3_lookup = np.zeros((dimension,), dtype=np.int32)
for i, eclass in enumerate(Q3_MAP): # equivalence classes
for m in eclass: # Members of the class.
ss_type = SECONDARY_STRUCTURES.index(m)
q3_map_matrix[ss_type, i] = 1.0
q3_lookup[ss_type] = i
return q3_map_matrix, q3_lookup
class Secstruct(object):
"""Make a layer that computes hierarchical secstruct."""
# Build static, shared structures:
q3_map_matrix, q3_lookup = make_q3_matrices()
static_dimension = len(SECONDARY_STRUCTURES)
def __init__(self, name='secstruct'):
self.name = name
self._dimension = Secstruct.static_dimension
def make_layer_new(self, activations):
"""Make the layer."""
with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
logging.info('Creating secstruct %s', activations)
self.logits = tf.contrib.layers.linear(activations, self._dimension)
self.ss_q8_probs = tf.nn.softmax(self.logits)
self.ss_q3_probs = tf.matmul(
self.ss_q8_probs, tf.constant(self.q3_map_matrix, dtype=tf.float32))
def get_q8_probs(self):
return self.ss_q8_probs
def save_secstructs(dump_dir_path, name, index, sequence, probs,
label='Deepmind secstruct'):
"""Write secstruct prob distributions to an ss2 file.
Can be overloaded to write out asa values too.
Args:
dump_dir_path: directory where to write files.
name: name of domain
index: index number of multiple samples. (or None for no index)
sequence: string of L residue labels
probs: L x D matrix of probabilities. L is length of sequence,
D is probability dimension (usually 3).
label: A label for the file.
"""
filename = os.path.join(dump_dir_path, '%s.ss2' % name)
if index is not None:
filename = os.path.join(dump_dir_path, '%s_%04d.ss2' % (name, index))
with tf.io.gfile.GFile(filename, 'w') as gf:
logging.info('Saving secstruct to %s', filename)
gf.write('# %s CLASSES [%s] %s sample %s\n\n' % (
label, ''.join(SECONDARY_STRUCTURES[:probs.shape[1]]), name, index))
for l in range(probs.shape[0]):
ss = SECONDARY_STRUCTURES[np.argmax(probs[l, :])]
gf.write('%4d %1s %1s %s\n' % (l + 1, sequence[l], ss, ''.join(
[('%6.3f' % p) for p in probs[l, :]])))
| deepmind-research-master | alphafold_casp13/secstruct.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for storing configuration flags."""
import json
class ConfigDict(dict):
"""Configuration dictionary with convenient dot element access."""
def __init__(self, *args, **kwargs):
super(ConfigDict, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for key, value in arg.items():
self._add(key, value)
for key, value in kwargs.items():
self._add(key, value)
def _add(self, key, value):
if isinstance(value, dict):
self[key] = ConfigDict(value)
else:
self[key] = value
def __getattr__(self, attr):
try:
return self[attr]
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super(ConfigDict, self).__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super(ConfigDict, self).__delitem__(key)
del self.__dict__[key]
def to_json(self):
return json.dumps(self)
@classmethod
def from_json(cls, json_string):
return cls(json.loads(json_string))
| deepmind-research-master | alphafold_casp13/config_dict.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for predicting Accessible Surface Area."""
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
class ASAOutputLayer(object):
"""An output layer to predict Accessible Surface Area."""
def __init__(self, name='asa'):
self.name = name
def compute_asa_output(self, activations):
"""Just compute the logits and outputs given activations."""
asa_logits = tf.contrib.layers.linear(
activations, 1,
weights_initializer=tf.random_uniform_initializer(-0.01, 0.01),
scope='ASALogits')
self.asa_output = tf.nn.relu(asa_logits, name='ASA_output_relu')
return asa_logits
| deepmind-research-master | alphafold_casp13/asa_output.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""2D Resnet."""
from absl import logging
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from alphafold_casp13 import two_dim_convnet
def make_sep_res_layer(
input_node,
in_channels,
out_channels,
layer_name,
filter_size,
filter_size_2=None,
batch_norm=False,
is_training=True,
divide_channels_by=2,
atrou_rate=1,
channel_multiplier=0,
data_format='NHWC',
stddev=0.01,
dropout_keep_prob=1.0):
"""A separable resnet block."""
with tf.name_scope(layer_name):
input_times_almost_1 = input_node
h_conv = input_times_almost_1
if batch_norm:
h_conv = two_dim_convnet.batch_norm_layer(
h_conv, layer_name=layer_name, is_training=is_training,
data_format=data_format)
h_conv = tf.nn.elu(h_conv)
if filter_size_2 is None:
filter_size_2 = filter_size
# 1x1 with half size
h_conv = two_dim_convnet.make_conv_layer(
h_conv,
in_channels=in_channels,
out_channels=in_channels / divide_channels_by,
layer_name=layer_name + '_1x1h',
filter_size=1,
filter_size_2=1,
non_linearity=True,
batch_norm=batch_norm,
is_training=is_training,
data_format=data_format,
stddev=stddev)
# 3x3 with half size
if channel_multiplier == 0:
h_conv = two_dim_convnet.make_conv_layer(
h_conv,
in_channels=in_channels / divide_channels_by,
out_channels=in_channels / divide_channels_by,
layer_name=layer_name + '_%dx%dh' % (filter_size, filter_size_2),
filter_size=filter_size,
filter_size_2=filter_size_2,
non_linearity=True,
batch_norm=batch_norm,
is_training=is_training,
atrou_rate=atrou_rate,
data_format=data_format,
stddev=stddev)
else:
# We use separable convolution for 3x3
h_conv = two_dim_convnet.make_conv_sep2d_layer(
h_conv,
in_channels=in_channels / divide_channels_by,
channel_multiplier=channel_multiplier,
out_channels=in_channels / divide_channels_by,
layer_name=layer_name + '_sep%dx%dh' % (filter_size, filter_size_2),
filter_size=filter_size,
filter_size_2=filter_size_2,
batch_norm=batch_norm,
is_training=is_training,
atrou_rate=atrou_rate,
data_format=data_format,
stddev=stddev)
# 1x1 back to normal size without relu
h_conv = two_dim_convnet.make_conv_layer(
h_conv,
in_channels=in_channels / divide_channels_by,
out_channels=out_channels,
layer_name=layer_name + '_1x1',
filter_size=1,
filter_size_2=1,
non_linearity=False,
batch_norm=False,
is_training=is_training,
data_format=data_format,
stddev=stddev)
if dropout_keep_prob < 1.0:
logging.info('dropout keep prob %f', dropout_keep_prob)
h_conv = tf.nn.dropout(h_conv, keep_prob=dropout_keep_prob)
return h_conv + input_times_almost_1
def make_two_dim_resnet(
input_node,
num_residues=50,
num_features=40,
num_predictions=1,
num_channels=32,
num_layers=2,
filter_size=3,
filter_size_2=None,
final_non_linearity=False,
name_prefix='',
fancy=True,
batch_norm=False,
is_training=False,
atrou_rates=None,
channel_multiplier=0,
divide_channels_by=2,
resize_features_with_1x1=False,
data_format='NHWC',
stddev=0.01,
dropout_keep_prob=1.0):
"""Two dim resnet towers."""
del num_residues # Unused.
if atrou_rates is None:
atrou_rates = [1]
if not fancy:
raise ValueError('non fancy deprecated')
logging.info('atrou rates %s', atrou_rates)
logging.info('name prefix %s', name_prefix)
x_image = input_node
previous_layer = x_image
non_linearity = True
for i_layer in range(num_layers):
in_channels = num_channels
out_channels = num_channels
curr_atrou_rate = atrou_rates[i_layer % len(atrou_rates)]
if i_layer == 0:
in_channels = num_features
if i_layer == num_layers - 1:
out_channels = num_predictions
non_linearity = final_non_linearity
if i_layer == 0 or i_layer == num_layers - 1:
layer_name = name_prefix + 'conv%d' % (i_layer + 1)
initial_filter_size = filter_size
if resize_features_with_1x1:
initial_filter_size = 1
previous_layer = two_dim_convnet.make_conv_layer(
input_node=previous_layer,
in_channels=in_channels,
out_channels=out_channels,
layer_name=layer_name,
filter_size=initial_filter_size,
filter_size_2=filter_size_2,
non_linearity=non_linearity,
atrou_rate=curr_atrou_rate,
data_format=data_format,
stddev=stddev)
else:
layer_name = name_prefix + 'res%d' % (i_layer + 1)
previous_layer = make_sep_res_layer(
input_node=previous_layer,
in_channels=in_channels,
out_channels=out_channels,
layer_name=layer_name,
filter_size=filter_size,
filter_size_2=filter_size_2,
batch_norm=batch_norm,
is_training=is_training,
atrou_rate=curr_atrou_rate,
channel_multiplier=channel_multiplier,
divide_channels_by=divide_channels_by,
data_format=data_format,
stddev=stddev,
dropout_keep_prob=dropout_keep_prob)
y = previous_layer
return y
| deepmind-research-master | alphafold_casp13/two_dim_resnet.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Form a weighted average of several distograms.
Can also/instead form a weighted average of a set of distance histogram pickle
files, so long as they have identical hyperparameters.
"""
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from alphafold_casp13 import distogram_io
from alphafold_casp13 import parsers
flags.DEFINE_list(
'pickle_dirs', [],
'Comma separated list of directories with pickle files to ensemble.')
flags.DEFINE_list(
'weights', [],
'Comma separated list of weights for the pickle files from different dirs.')
flags.DEFINE_string(
'output_dir', None, 'Directory where to save results of the evaluation.')
FLAGS = flags.FLAGS
def ensemble_distance_histograms(pickle_dirs, weights, output_dir):
"""Find all the contact maps in the first dir, then ensemble across dirs."""
if len(pickle_dirs) <= 1:
logging.warning('Pointless to ensemble %d pickle_dirs %s',
len(pickle_dirs), pickle_dirs)
# Carry on if there's one dir, otherwise do nothing.
if not pickle_dirs:
return
tf.io.gfile.makedirs(output_dir)
one_dir_pickle_files = tf.io.gfile.glob(
os.path.join(pickle_dirs[0], '*.pickle'))
assert one_dir_pickle_files, pickle_dirs[0]
original_files = len(one_dir_pickle_files)
logging.info('Found %d files %d in first of %d dirs',
original_files, len(one_dir_pickle_files), len(pickle_dirs))
targets = [os.path.splitext(os.path.basename(f))[0]
for f in one_dir_pickle_files]
skipped = 0
wrote = 0
for t in targets:
dump_file = os.path.join(output_dir, t + '.pickle')
pickle_files = [os.path.join(pickle_dir, t + '.pickle')
for pickle_dir in pickle_dirs]
_, new_dict = ensemble_one_distance_histogram(pickle_files, weights)
if new_dict is not None:
wrote += 1
distogram_io.save_distance_histogram_from_dict(dump_file, new_dict)
msg = 'Distograms Wrote %s %d / %d Skipped %d %s' % (
t, wrote, len(one_dir_pickle_files), skipped, dump_file)
logging.info(msg)
def ensemble_one_distance_histogram(pickle_files, weights):
"""Average the given pickle_files and dump."""
dicts = []
sequence = None
max_dim = None
for picklefile in pickle_files:
if not tf.io.gfile.exists(picklefile):
logging.warning('missing %s', picklefile)
break
logging.info('loading pickle file %s', picklefile)
distance_histogram_dict = parsers.parse_distance_histogram_dict(picklefile)
if sequence is None:
sequence = distance_histogram_dict['sequence']
else:
assert sequence == distance_histogram_dict['sequence'], '%s vs %s' % (
sequence, distance_histogram_dict['sequence'])
dicts.append(distance_histogram_dict)
assert dicts[-1]['probs'].shape[0] == dicts[-1]['probs'].shape[1], (
'%d vs %d' % (dicts[-1]['probs'].shape[0], dicts[-1]['probs'].shape[1]))
assert (dicts[0]['probs'].shape[0:2] == dicts[-1]['probs'].shape[0:2]
), ('%d vs %d' % (dicts[0]['probs'].shape, dicts[-1]['probs'].shape))
if max_dim is None or max_dim < dicts[-1]['probs'].shape[2]:
max_dim = dicts[-1]['probs'].shape[2]
if len(dicts) != len(pickle_files):
logging.warning('length mismatch\n%s\nVS\n%s', dicts, pickle_files)
return sequence, None
ensemble_hist = (
sum(w * c['probs'] for w, c in zip(weights, dicts)) / sum(weights))
new_dict = dict(dicts[0])
new_dict['probs'] = ensemble_hist
return sequence, new_dict
def main(argv):
del argv # Unused.
num_dirs = len(FLAGS.pickle_dirs)
if FLAGS.weights:
assert len(FLAGS.weights) == num_dirs, (
'Supply as many weights as pickle_dirs, or no weights')
weights = [float(w) for w in FLAGS.weights]
else:
weights = [1.0 for w in range(num_dirs)]
ensemble_distance_histograms(
pickle_dirs=FLAGS.pickle_dirs,
weights=weights,
output_dir=FLAGS.output_dir)
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | alphafold_casp13/ensemble_contact_maps.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parsers for various standard biology or AlphaFold-specific formats."""
import pickle
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
def distance_histogram_dict(f):
"""Parses distance histogram dict pickle.
Distance histograms are stored as pickles of dicts.
Write one of these with contacts/write_rr_file.write_pickle_file()
Args:
f: File-like handle to distance histogram dict pickle.
Returns:
Dict with fields:
probs: (an L x L x num_bins) histogram.
num_bins: number of bins for each residue pair
min_range: left hand edge of the distance histogram
max_range: the extent of the histogram NOT the right hand edge.
"""
contact_dict = pickle.load(f, encoding='latin1')
num_res = len(contact_dict['sequence'])
if not all(key in contact_dict.keys()
for key in ['probs', 'num_bins', 'min_range', 'max_range']):
raise ValueError('The pickled contact dict doesn\'t contain all required '
'keys: probs, num_bins, min_range, max_range but %s.' %
contact_dict.keys())
if contact_dict['probs'].ndim != 3:
raise ValueError(
'Probs is not rank 3 but %d' % contact_dict['probs'].ndim)
if contact_dict['num_bins'] != contact_dict['probs'].shape[2]:
raise ValueError(
'The probs shape doesn\'t match num_bins in the third dimension. '
'Expected %d got %d.' % (contact_dict['num_bins'],
contact_dict['probs'].shape[2]))
if contact_dict['probs'].shape[:2] != (num_res, num_res):
raise ValueError(
'The first two probs dims (%i, %i) aren\'t equal to len(sequence) %i'
% (contact_dict['probs'].shape[0], contact_dict['probs'].shape[1],
num_res))
return contact_dict
def parse_distance_histogram_dict(filepath):
"""Parses distance histogram piclkle from filepath."""
with tf.io.gfile.GFile(filepath, 'rb') as f:
return distance_histogram_dict(f)
| deepmind-research-master | alphafold_casp13/parsers.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contact prediction convnet experiment example."""
from absl import logging
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from alphafold_casp13 import contacts_dataset
from alphafold_casp13 import contacts_network
def _int_ph(shape, name):
return tf.placeholder(
dtype=tf.int32, shape=shape, name=('%s_placeholder' % name))
def _float_ph(shape, name):
return tf.placeholder(
dtype=tf.float32, shape=shape, name=('%s_placeholder' % name))
class Contacts(object):
"""Contact prediction experiment."""
def __init__(
self, tfrecord, stats_file, network_config, crop_size_x, crop_size_y,
feature_normalization, normalization_exclusion):
"""Builds the TensorFlow graph."""
self.network_config = network_config
self.crop_size_x = crop_size_x
self.crop_size_y = crop_size_y
self._feature_normalization = feature_normalization
self._normalization_exclusion = normalization_exclusion
self._model = contacts_network.ContactsNet(**network_config)
self._features = network_config.features
self._scalars = network_config.scalars
self._targets = network_config.targets
# Add extra targets we need.
required_targets = ['domain_name', 'resolution', 'chain_name']
if self.model.torsion_multiplier > 0:
required_targets.extend([
'phi_angles', 'phi_mask', 'psi_angles', 'psi_mask'])
if self.model.secstruct_multiplier > 0:
required_targets.extend(['sec_structure', 'sec_structure_mask'])
if self.model.asa_multiplier > 0:
required_targets.extend(['solv_surf', 'solv_surf_mask'])
extra_targets = [t for t in required_targets if t not in self._targets]
if extra_targets:
targets = list(self._targets)
targets.extend(extra_targets)
self._targets = tuple(targets)
logging.info('Targets %s %s extra %s',
type(self._targets), self._targets, extra_targets)
logging.info('Evaluating on %s, stats: %s', tfrecord, stats_file)
self._build_evaluation_graph(tfrecord=tfrecord, stats_file=stats_file)
@property
def model(self):
return self._model
def _get_feature_normalization(self, features):
return {key: self._feature_normalization
for key in features
if key not in list(self._normalization_exclusion)}
def _build_evaluation_graph(self, tfrecord, stats_file):
"""Constructs the graph in pieces so it can be fed."""
with tf.name_scope('competitionsep'):
# Construct the dataset and mapping ops.
dataset = contacts_dataset.create_tf_dataset(
tf_record_filename=tfrecord,
features=tuple(self._features) + tuple(
self._scalars) + tuple(self._targets))
def normalize(data):
return contacts_dataset.normalize_from_stats_file(
features=data,
stats_file_path=stats_file,
feature_normalization=self._get_feature_normalization(
self._features),
copy_unnormalized=list(set(self._features) & set(self._targets)))
def convert_to_legacy(features):
return contacts_dataset.convert_to_legacy_proteins_dataset_format(
features, self._features, self._scalars, self._targets)
dataset = dataset.map(normalize)
dataset = dataset.map(convert_to_legacy)
dataset = dataset.batch(1)
# Get a batch of tensors in the legacy ProteinsDataset format.
iterator = tf.data.make_one_shot_iterator(dataset)
self._input_batch = iterator.get_next()
self.num_eval_examples = sum(
1 for _ in tf.python_io.tf_record_iterator(tfrecord))
logging.info('Eval batch:\n%s', self._input_batch)
feature_dim_1d = self._input_batch.inputs_1d.shape.as_list()[-1]
feature_dim_2d = self._input_batch.inputs_2d.shape.as_list()[-1]
feature_dim_2d *= 3 # The diagonals will be stacked before feeding.
# Now placeholders for the graph to compute the outputs for one crop.
self.inputs_1d_placeholder = _float_ph(
shape=[None, None, feature_dim_1d], name='inputs_1d')
self.residue_index_placeholder = _int_ph(
shape=[None, None], name='residue_index')
self.inputs_2d_placeholder = _float_ph(
shape=[None, None, None, feature_dim_2d], name='inputs_2d')
# 4 ints: x_start, x_end, y_start, y_end.
self.crop_placeholder = _int_ph(shape=[None, 4], name='crop')
# Finally placeholders for the graph to score the complete contact map.
self.probs_placeholder = _float_ph(shape=[None, None, None], name='probs')
self.softmax_probs_placeholder = _float_ph(
shape=[None, None, None, self.network_config.num_bins],
name='softmax_probs')
self.cb_placeholder = _float_ph(shape=[None, None, 3], name='cb')
self.cb_mask_placeholder = _float_ph(shape=[None, None], name='cb_mask')
self.lengths_placeholder = _int_ph(shape=[None], name='lengths')
if self.model.secstruct_multiplier > 0:
self.sec_structure_placeholder = _float_ph(
shape=[None, None, 8], name='sec_structure')
self.sec_structure_logits_placeholder = _float_ph(
shape=[None, None, 8], name='sec_structure_logits')
self.sec_structure_mask_placeholder = _float_ph(
shape=[None, None, 1], name='sec_structure_mask')
if self.model.asa_multiplier > 0:
self.solv_surf_placeholder = _float_ph(
shape=[None, None, 1], name='solv_surf')
self.solv_surf_logits_placeholder = _float_ph(
shape=[None, None, 1], name='solv_surf_logits')
self.solv_surf_mask_placeholder = _float_ph(
shape=[None, None, 1], name='solv_surf_mask')
if self.model.torsion_multiplier > 0:
self.torsions_truth_placeholder = _float_ph(
shape=[None, None, 2], name='torsions_truth')
self.torsions_mask_placeholder = _float_ph(
shape=[None, None, 1], name='torsions_mask')
self.torsion_logits_placeholder = _float_ph(
shape=[None, None, self.network_config.torsion_bins ** 2],
name='torsion_logits')
# Build a dict to pass all the placeholders into build.
placeholders = {
'inputs_1d_placeholder': self.inputs_1d_placeholder,
'residue_index_placeholder': self.residue_index_placeholder,
'inputs_2d_placeholder': self.inputs_2d_placeholder,
'crop_placeholder': self.crop_placeholder,
'probs_placeholder': self.probs_placeholder,
'softmax_probs_placeholder': self.softmax_probs_placeholder,
'cb_placeholder': self.cb_placeholder,
'cb_mask_placeholder': self.cb_mask_placeholder,
'lengths_placeholder': self.lengths_placeholder,
}
if self.model.secstruct_multiplier > 0:
placeholders.update({
'sec_structure': self.sec_structure_placeholder,
'sec_structure_logits_placeholder':
self.sec_structure_logits_placeholder,
'sec_structure_mask': self.sec_structure_mask_placeholder,})
if self.model.asa_multiplier > 0:
placeholders.update({
'solv_surf': self.solv_surf_placeholder,
'solv_surf_logits_placeholder': self.solv_surf_logits_placeholder,
'solv_surf_mask': self.solv_surf_mask_placeholder,})
if self.model.torsion_multiplier > 0:
placeholders.update({
'torsions_truth': self.torsions_truth_placeholder,
'torsion_logits_placeholder': self.torsion_logits_placeholder,
'torsions_truth_mask': self.torsions_mask_placeholder,})
activations = self._model(
crop_size_x=self.crop_size_x,
crop_size_y=self.crop_size_y,
placeholders=placeholders)
self.eval_probs_softmax = tf.nn.softmax(
activations[:, :, :, :self.network_config.num_bins])
self.eval_probs = tf.reduce_sum(
self.eval_probs_softmax[:, :, :, :self._model.quant_threshold()],
axis=3)
def get_one_example(self, sess):
"""Pull one example off the queue so we can feed it for evaluation."""
request_dict = {
'inputs_1d': self._input_batch.inputs_1d,
'inputs_2d': self._input_batch.inputs_2d,
'sequence_lengths': self._input_batch.sequence_lengths,
'beta_positions': self._input_batch.targets.beta_positions,
'beta_mask': self._input_batch.targets.beta_mask,
'domain_name': self._input_batch.targets.domain_name,
'chain_name': self._input_batch.targets.chain_name,
'sequences': self._input_batch.sequences,
}
if hasattr(self._input_batch.targets, 'residue_index'):
request_dict.update(
{'residue_index': self._input_batch.targets.residue_index})
if hasattr(self._input_batch.targets, 'phi_angles'):
request_dict.update(
{'phi_angles': self._input_batch.targets.phi_angles,
'psi_angles': self._input_batch.targets.psi_angles,
'phi_mask': self._input_batch.targets.phi_mask,
'psi_mask': self._input_batch.targets.psi_mask})
if hasattr(self._input_batch.targets, 'sec_structure'):
request_dict.update(
{'sec_structure': self._input_batch.targets.sec_structure,
'sec_structure_mask': self._input_batch.targets.sec_structure_mask,})
if hasattr(self._input_batch.targets, 'solv_surf'):
request_dict.update(
{'solv_surf': self._input_batch.targets.solv_surf,
'solv_surf_mask': self._input_batch.targets.solv_surf_mask,})
if hasattr(self._input_batch.targets, 'alpha_positions'):
request_dict.update(
{'alpha_positions': self._input_batch.targets.alpha_positions,
'alpha_mask': self._input_batch.targets.alpha_mask,})
batch = sess.run(request_dict)
return batch
| deepmind-research-master | alphafold_casp13/contacts_experiment.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Write contact map predictions to a tf.io.gfile.
Either write a binary contact map as an RR format text file, or a
histogram prediction as a pickle of a dict containing a numpy array.
"""
import os
import numpy as np
import six.moves.cPickle as pickle
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
RR_FORMAT = """PFRMAT RR
TARGET {}
AUTHOR DM-ORIGAMI-TEAM
METHOD {}
MODEL 1
{}
"""
def save_rr_file(filename, probs, domain, sequence,
method='dm-contacts-resnet'):
"""Save a contact probability matrix as an RR file."""
assert len(sequence) == probs.shape[0]
assert len(sequence) == probs.shape[1]
with tf.io.gfile.GFile(filename, 'w') as f:
f.write(RR_FORMAT.format(domain, method, sequence))
for i in range(probs.shape[0]):
for j in range(i + 1, probs.shape[1]):
f.write('{:d} {:d} {:d} {:d} {:f}\n'.format(
i + 1, j + 1, 0, 8, probs[j, i]))
f.write('END\n')
def save_torsions(torsions_dir, filebase, sequence, torsions_probs):
"""Save Torsions to a file as pickle of a dict."""
filename = os.path.join(torsions_dir, filebase + '.torsions')
t_dict = dict(probs=torsions_probs, sequence=sequence)
with tf.io.gfile.GFile(filename, 'w') as fh:
pickle.dump(t_dict, fh, protocol=2)
def save_distance_histogram(
filename, probs, domain, sequence, min_range, max_range, num_bins):
"""Save a distance histogram prediction matrix as a pickle file."""
dh_dict = {
'min_range': min_range,
'max_range': max_range,
'num_bins': num_bins,
'domain': domain,
'sequence': sequence,
'probs': probs.astype(np.float32)}
save_distance_histogram_from_dict(filename, dh_dict)
def save_distance_histogram_from_dict(filename, dh_dict):
"""Save a distance histogram prediction matrix as a pickle file."""
fields = ['min_range', 'max_range', 'num_bins', 'domain', 'sequence', 'probs']
missing_fields = [f for f in fields if f not in dh_dict]
assert not missing_fields, 'Fields {} missing from dictionary'.format(
missing_fields)
assert len(dh_dict['sequence']) == dh_dict['probs'].shape[0]
assert len(dh_dict['sequence']) == dh_dict['probs'].shape[1]
assert dh_dict['num_bins'] == dh_dict['probs'].shape[2]
assert dh_dict['min_range'] >= 0.0
assert dh_dict['max_range'] > 0.0
with tf.io.gfile.GFile(filename, 'wb') as fw:
pickle.dump(dh_dict, fw, protocol=2)
def contact_map_from_distogram(distogram_dict):
"""Split the boundary bin."""
num_bins = distogram_dict['probs'].shape[-1]
bin_size_angstrom = distogram_dict['max_range'] / num_bins
threshold_cts = (8.0 - distogram_dict['min_range']) / bin_size_angstrom
threshold_bin = int(threshold_cts) # Round down
pred_contacts = np.sum(distogram_dict['probs'][:, :, :threshold_bin], axis=-1)
if threshold_bin < threshold_cts: # Add on the fraction of the boundary bin.
pred_contacts += distogram_dict['probs'][:, :, threshold_bin] * (
threshold_cts - threshold_bin)
return pred_contacts
| deepmind-research-master | alphafold_casp13/distogram_io.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF wrapper for protein tf.Example datasets."""
import collections
import enum
import json
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
_ProteinDescription = collections.namedtuple(
'_ProteinDescription', (
'sequence_lengths', 'key', 'sequences', 'inputs_1d', 'inputs_2d',
'inputs_2d_diagonal', 'crops', 'scalars', 'targets'))
class FeatureType(enum.Enum):
ZERO_DIM = 0 # Shape [x]
ONE_DIM = 1 # Shape [num_res, x]
TWO_DIM = 2 # Shape [num_res, num_res, x]
# Placeholder values that will be replaced with their true value at runtime.
NUM_RES = 'num residues placeholder'
# Sizes of the protein features. NUM_RES is allowed as a placeholder to be
# replaced with the number of residues.
FEATURES = {
'aatype': (tf.float32, [NUM_RES, 21]),
'alpha_mask': (tf.int64, [NUM_RES, 1]),
'alpha_positions': (tf.float32, [NUM_RES, 3]),
'beta_mask': (tf.int64, [NUM_RES, 1]),
'beta_positions': (tf.float32, [NUM_RES, 3]),
'between_segment_residues': (tf.int64, [NUM_RES, 1]),
'chain_name': (tf.string, [1]),
'deletion_probability': (tf.float32, [NUM_RES, 1]),
'domain_name': (tf.string, [1]),
'gap_matrix': (tf.float32, [NUM_RES, NUM_RES, 1]),
'hhblits_profile': (tf.float32, [NUM_RES, 22]),
'hmm_profile': (tf.float32, [NUM_RES, 30]),
'key': (tf.string, [1]),
'mutual_information': (tf.float32, [NUM_RES, NUM_RES, 1]),
'non_gapped_profile': (tf.float32, [NUM_RES, 21]),
'num_alignments': (tf.int64, [NUM_RES, 1]),
'num_effective_alignments': (tf.float32, [1]),
'phi_angles': (tf.float32, [NUM_RES, 1]),
'phi_mask': (tf.int64, [NUM_RES, 1]),
'profile': (tf.float32, [NUM_RES, 21]),
'profile_with_prior': (tf.float32, [NUM_RES, 22]),
'profile_with_prior_without_gaps': (tf.float32, [NUM_RES, 21]),
'pseudo_bias': (tf.float32, [NUM_RES, 22]),
'pseudo_frob': (tf.float32, [NUM_RES, NUM_RES, 1]),
'pseudolikelihood': (tf.float32, [NUM_RES, NUM_RES, 484]),
'psi_angles': (tf.float32, [NUM_RES, 1]),
'psi_mask': (tf.int64, [NUM_RES, 1]),
'residue_index': (tf.int64, [NUM_RES, 1]),
'resolution': (tf.float32, [1]),
'reweighted_profile': (tf.float32, [NUM_RES, 22]),
'sec_structure': (tf.int64, [NUM_RES, 8]),
'sec_structure_mask': (tf.int64, [NUM_RES, 1]),
'seq_length': (tf.int64, [NUM_RES, 1]),
'sequence': (tf.string, [1]),
'solv_surf': (tf.float32, [NUM_RES, 1]),
'solv_surf_mask': (tf.int64, [NUM_RES, 1]),
'superfamily': (tf.string, [1]),
}
FEATURE_TYPES = {k: v[0] for k, v in FEATURES.items()}
FEATURE_SIZES = {k: v[1] for k, v in FEATURES.items()}
def shape(feature_name, num_residues, features=None):
"""Get the shape for the given feature name.
Args:
feature_name: String identifier for the feature. If the feature name ends
with "_unnormalized", theis suffix is stripped off.
num_residues: The number of residues in the current domain - some elements
of the shape can be dynamic and will be replaced by this value.
features: A feature_name to (tf_dtype, shape) lookup; defaults to FEATURES.
Returns:
List of ints representation the tensor size.
"""
features = features or FEATURES
if feature_name.endswith('_unnormalized'):
feature_name = feature_name[:-13]
unused_dtype, raw_sizes = features[feature_name]
replacements = {NUM_RES: num_residues}
sizes = [replacements.get(dimension, dimension) for dimension in raw_sizes]
return sizes
def dim(feature_name):
"""Determine the type of feature.
Args:
feature_name: String identifier for the feature to lookup. If the feature
name ends with "_unnormalized", theis suffix is stripped off.
Returns:
A FeatureType enum describing whether the feature is of size num_res or
num_res * num_res.
Raises:
ValueError: If the feature is of an unknown type.
"""
if feature_name.endswith('_unnormalized'):
feature_name = feature_name[:-13]
num_dims = len(FEATURE_SIZES[feature_name])
if num_dims == 1:
return FeatureType.ZERO_DIM
elif num_dims == 2 and FEATURE_SIZES[feature_name][0] == NUM_RES:
return FeatureType.ONE_DIM
elif num_dims == 3 and FEATURE_SIZES[feature_name][0] == NUM_RES:
return FeatureType.TWO_DIM
else:
raise ValueError('Expect feature sizes to be 2 or 3, got %i' %
len(FEATURE_SIZES[feature_name]))
def _concat_or_zeros(tensor_list, axis, tensor_shape, name):
"""Concatenates the tensors if given, otherwise returns a tensor of zeros."""
if tensor_list:
return tf.concat(tensor_list, axis=axis, name=name)
return tf.zeros(tensor_shape, name=name + '_zeros')
def parse_tfexample(raw_data, features):
"""Read a single TF Example proto and return a subset of its features.
Args:
raw_data: A serialized tf.Example proto.
features: A dictionary of features, mapping string feature names to a tuple
(dtype, shape). This dictionary should be a subset of
protein_features.FEATURES (or the dictionary itself for all features).
Returns:
A dictionary of features mapping feature names to features. Only the given
features are returned, all other ones are filtered out.
"""
feature_map = {
k: tf.io.FixedLenSequenceFeature(shape=(), dtype=v[0], allow_missing=True)
for k, v in features.items()
}
parsed_features = tf.io.parse_single_example(raw_data, feature_map)
# Find out what is the number of sequences and the number of alignments.
num_residues = tf.cast(parsed_features['seq_length'][0], dtype=tf.int32)
# Reshape the tensors according to the sequence length and num alignments.
for k, v in parsed_features.items():
new_shape = shape(feature_name=k, num_residues=num_residues)
# Make sure the feature we are reshaping is not empty.
assert_non_empty = tf.assert_greater(
tf.size(v), 0, name='assert_%s_non_empty' % k,
message='The feature %s is not set in the tf.Example. Either do not '
'request the feature or use a tf.Example that has the feature set.' % k)
with tf.control_dependencies([assert_non_empty]):
parsed_features[k] = tf.reshape(v, new_shape, name='reshape_%s' % k)
return parsed_features
def create_tf_dataset(tf_record_filename, features):
"""Creates an instance of tf.data.Dataset backed by a protein dataset SSTable.
Args:
tf_record_filename: A string with filename of the TFRecord file.
features: A list of strings of feature names to be returned in the dataset.
Returns:
A tf.data.Dataset object. Its items are dictionaries from feature names to
feature values.
"""
# Make sure these features are always read.
required_features = ['aatype', 'sequence', 'seq_length']
features = list(set(features) | set(required_features))
features = {name: FEATURES[name] for name in features}
tf_dataset = tf.data.TFRecordDataset(filenames=[tf_record_filename])
tf_dataset = tf_dataset.map(lambda raw: parse_tfexample(raw, features))
return tf_dataset
def normalize_from_stats_file(
features, stats_file_path, feature_normalization, copy_unnormalized=None):
"""Normalizes the features set in the feature_normalization by the norm stats.
Args:
features: A dictionary mapping feature names to feature tensors.
stats_file_path: A string with the path of the statistics JSON file.
feature_normalization: A dictionary specifying the normalization type for
each input feature. Acceptable values are 'std' and 'none'. If not
specified default to 'none'. Any extra features that are not present in
features will be ignored.
copy_unnormalized: A list of features whose unnormalized copy should be
added. For any feature F in this list a feature F + "_unnormalized" will
be added in the output dictionary containing the unnormalized feature.
This is useful if you have a feature you want to have both in
desired_features (normalized) and also in desired_targets (unnormalized).
See convert_to_legacy_proteins_dataset_format for more details.
Returns:
A dictionary mapping features names to feature tensors. The ones that were
specified in feature_normalization will be normalized.
Raises:
ValueError: If an unknown normalization mode is used.
"""
with tf.io.gfile.GFile(stats_file_path, 'r') as f:
norm_stats = json.loads(f.read())
if not copy_unnormalized:
copy_unnormalized = []
# We need this unnormalized in convert_to_legacy_proteins_dataset_format.
copy_unnormalized.append('num_alignments')
for feature in copy_unnormalized:
if feature in features:
features[feature + '_unnormalized'] = features[feature]
range_epsilon = 1e-12
for key, value in features.items():
if key not in feature_normalization or feature_normalization[key] == 'none':
pass
elif feature_normalization[key] == 'std':
value = tf.cast(value, dtype=tf.float32)
train_mean = tf.cast(norm_stats['mean'][key], dtype=tf.float32)
train_range = tf.sqrt(tf.cast(norm_stats['var'][key], dtype=tf.float32))
value -= train_mean
value = tf.where(
train_range > range_epsilon, value / train_range, value)
features[key] = value
else:
raise ValueError('Unknown normalization mode %s for feature %s.'
% (feature_normalization[key], key))
return features
def convert_to_legacy_proteins_dataset_format(
features, desired_features, desired_scalars, desired_targets):
"""Converts the output of tf.Dataset to the legacy format.
Args:
features: A dictionary mapping feature names to feature tensors.
desired_features: A list with the names of the desired features. These will
be filtered out of features and returned in one of the inputs_1d or
inputs_2d. The features concatenated in `inputs_1d`, `inputs_2d` will be
concatenated in the same order as they were given in `desired_features`.
desired_scalars: A list naming the desired scalars. These will
be filtered out of features and returned in scalars. If features contain
an unnormalized version of a desired scalar, it will be used.
desired_targets: A list naming the desired targets. These will
be filtered out of features and returned in targets. If features contain
an unnormalized version of a desired target, it will be used.
Returns:
A _ProteinDescription namedtuple consisting of:
sequence_length: A scalar int32 tensor with the sequence length.
key: A string tensor with the sequence key or empty if not set features.
sequences: A string tensor with the protein sequence.
inputs_1d: All 1D features in a single tensor of shape
[num_res, 1d_channels].
inputs_2d: All 2D features in a single tensor of shape
[num_res, num_res, 2d_channels].
inputs_2d_diagonal: All 2D diagonal features in a single tensor of shape
[num_res, num_res, 2d_diagonal_channels]. If no diagonal features found
in features, the tensor will be set to inputs_2d.
crops: A int32 tensor with the crop poisitions. If not set in features,
it will be set to [0, num_res, 0, num_res].
scalars: All requested scalar tensors in a list.
targets: All requested target tensors in a list.
Raises:
ValueError: If the feature size is invalid.
"""
tensors_1d = []
tensors_2d = []
tensors_2d_diagonal = []
for key in desired_features:
# Determine if the feature is 1D or 2D.
feature_dim = dim(key)
if feature_dim == FeatureType.ONE_DIM:
tensors_1d.append(tf.cast(features[key], dtype=tf.float32))
elif feature_dim == FeatureType.TWO_DIM:
if key not in features:
if not(key + '_cropped' in features and key + '_diagonal' in features):
raise ValueError(
'The 2D feature %s is not in the features dictionary and neither '
'are its cropped and diagonal versions.' % key)
else:
tensors_2d.append(
tf.cast(features[key + '_cropped'], dtype=tf.float32))
tensors_2d_diagonal.append(
tf.cast(features[key + '_diagonal'], dtype=tf.float32))
else:
tensors_2d.append(tf.cast(features[key], dtype=tf.float32))
else:
raise ValueError('Unexpected FeatureType returned: %s' % str(feature_dim))
# Determine num_res from the sequence as seq_length was possibly normalized.
num_res = tf.strings.length(features['sequence'])[0]
# Concatenate feature tensors into a single tensor
inputs_1d = _concat_or_zeros(
tensors_1d, axis=1, tensor_shape=[num_res, 0],
name='inputs_1d_concat')
inputs_2d = _concat_or_zeros(
tensors_2d, axis=2, tensor_shape=[num_res, num_res, 0],
name='inputs_2d_concat')
if tensors_2d_diagonal:
# The legacy dataset outputs the two diagonal crops stacked
# A1, B1, C1, A2, B2, C2. So convert the A1, A2, B1, B2, C1, C2 format.
diagonal_crops1 = [t[:, :, :(t.shape[2] // 2)] for t in tensors_2d_diagonal]
diagonal_crops2 = [t[:, :, (t.shape[2] // 2):] for t in tensors_2d_diagonal]
inputs_2d_diagonal = tf.concat(diagonal_crops1 + diagonal_crops2, axis=2)
else:
inputs_2d_diagonal = inputs_2d
sequence = features['sequence']
sequence_key = features.get('key', tf.constant(['']))[0]
if 'crops' in features:
crops = features['crops']
else:
crops = tf.stack([0, tf.shape(sequence)[0], 0, tf.shape(sequence)[0]])
scalar_tensors = []
for key in desired_scalars:
scalar_tensors.append(features.get(key + '_unnormalized', features[key]))
target_tensors = []
for key in desired_targets:
target_tensors.append(features.get(key + '_unnormalized', features[key]))
scalar_class = collections.namedtuple('_ScalarClass', desired_scalars)
target_class = collections.namedtuple('_TargetClass', desired_targets)
return _ProteinDescription(
sequence_lengths=num_res,
key=sequence_key,
sequences=sequence,
inputs_1d=inputs_1d,
inputs_2d=inputs_2d,
inputs_2d_diagonal=inputs_2d_diagonal,
crops=crops,
scalars=scalar_class(*scalar_tensors),
targets=target_class(*target_tensors))
| deepmind-research-master | alphafold_casp13/contacts_dataset.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Combines predictions by pasting."""
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
import six
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from alphafold_casp13 import distogram_io
from alphafold_casp13 import parsers
flags.DEFINE_string("pickle_input_dir", None,
"Directory to read pickle distance histogram files from.")
flags.DEFINE_string("output_dir", None, "Directory to write chain RR files to.")
flags.DEFINE_string("tfrecord_path", "",
"If provided, construct the average weighted by number of "
"alignments.")
flags.DEFINE_string("crop_sizes", "64,128,256", "The crop sizes to use.")
flags.DEFINE_integer("crop_step", 32, "The step size for cropping.")
FLAGS = flags.FLAGS
def generate_domains(target, sequence, crop_sizes, crop_step):
"""Take fasta files and generate a domain definition for data generation."""
logging.info("Generating crop domains for target %s", target)
windows = [int(x) for x in crop_sizes.split(",")]
num_residues = len(sequence)
domains = []
domains.append({"name": target, "description": (1, num_residues)})
for window in windows:
starts = list(range(0, num_residues - window, crop_step))
# Append a last crop to ensure we get all the way to the end of the
# sequence, even when num_residues - window is not divisible by crop_step.
if num_residues >= window:
starts += [num_residues - window]
for start in starts:
name = "%s-l%i_s%i" % (target, window, start)
domains.append({"name": name, "description": (start + 1, start + window)})
return domains
def get_weights(path):
"""Fetch all the weights from a TFRecord."""
if not path:
return {}
logging.info("Getting weights from %s", path)
weights = {}
record_iterator = tf.python_io.tf_record_iterator(path=path)
for serialized_tfexample in record_iterator:
example = tf.train.Example()
example.ParseFromString(serialized_tfexample)
domain_name = six.ensure_str(
example.features.feature["domain_name"].bytes_list.value[0])
weights[domain_name] = float(
example.features.feature["num_alignments"].int64_list.value[0])
logging.info("Weight %s: %d", domain_name, weights[domain_name])
logging.info("Loaded %d weights", len(weights))
return weights
def paste_distance_histograms(
input_dir, output_dir, weights, crop_sizes, crop_step):
"""Paste together distograms for given domains of given targets and write.
Domains distance histograms are 'pasted', meaning they are substituted
directly into the contact map. The order is determined by the order in the
domain definition file.
Args:
input_dir: String, path to directory containing chain and domain-level
distogram files.
output_dir: String, path to directory to write out chain-level distrogram
files.
weights: A dictionary with weights.
crop_sizes: The crop sizes.
crop_step: The step size for cropping.
Raises:
ValueError: if histogram parameters don't match.
"""
tf.io.gfile.makedirs(output_dir)
targets = tf.io.gfile.glob(os.path.join(input_dir, "*.pickle"))
targets = [os.path.splitext(os.path.basename(t))[0] for t in targets]
targets = set([t.split("-")[0] for t in targets])
logging.info("Pasting distance histograms for %d targets", len(targets))
for target in sorted(targets):
logging.info("%s as chain", target)
chain_pickle_path = os.path.join(input_dir, "%s.pickle" % target)
distance_histogram_dict = parsers.parse_distance_histogram_dict(
chain_pickle_path)
combined_cmap = np.array(distance_histogram_dict["probs"])
# Make the counter map 1-deep but still rank 3.
counter_map = np.ones_like(combined_cmap[:, :, 0:1])
sequence = distance_histogram_dict["sequence"]
target_domains = generate_domains(
target=target, sequence=sequence, crop_sizes=crop_sizes,
crop_step=crop_step)
# Paste in each domain.
for domain in sorted(target_domains, key=lambda x: x["name"]):
if domain["name"] == target:
logging.info("Skipping %s as domain", target)
continue
if "," in domain["description"]:
logging.info("Skipping multisegment domain %s",
domain["name"])
continue
crop_start, crop_end = domain["description"]
domain_pickle_path = os.path.join(input_dir, "%s.pickle" % domain["name"])
weight = weights.get(domain["name"], 1e9)
logging.info("Pasting %s: %d-%d. weight: %f", domain_pickle_path,
crop_start, crop_end, weight)
domain_distance_histogram_dict = parsers.parse_distance_histogram_dict(
domain_pickle_path)
for field in ["num_bins", "min_range", "max_range"]:
if domain_distance_histogram_dict[field] != distance_histogram_dict[
field]:
raise ValueError("Field {} does not match {} {}".format(
field,
domain_distance_histogram_dict[field],
distance_histogram_dict[field]))
weight_matrix_size = crop_end - crop_start + 1
weight_matrix = np.ones(
(weight_matrix_size, weight_matrix_size), dtype=np.float32) * weight
combined_cmap[crop_start - 1:crop_end, crop_start - 1:crop_end, :] += (
domain_distance_histogram_dict["probs"] *
np.expand_dims(weight_matrix, 2))
counter_map[crop_start - 1:crop_end,
crop_start - 1:crop_end, 0] += weight_matrix
# Broadcast across the histogram bins.
combined_cmap /= counter_map
# Write out full-chain cmap for folding.
output_chain_pickle_path = os.path.join(output_dir,
"{}.pickle".format(target))
logging.info("Writing to %s", output_chain_pickle_path)
distance_histogram_dict["probs"] = combined_cmap
distance_histogram_dict["target"] = target
# Save the distogram pickle file.
distogram_io.save_distance_histogram_from_dict(
output_chain_pickle_path, distance_histogram_dict)
# Compute the contact map and save it as an RR file.
contact_probs = distogram_io.contact_map_from_distogram(
distance_histogram_dict)
rr_path = os.path.join(output_dir, "%s.rr" % target)
distogram_io.save_rr_file(
filename=rr_path,
probs=contact_probs,
domain=target,
sequence=distance_histogram_dict["sequence"])
def main(argv):
del argv # Unused.
flags.mark_flag_as_required("pickle_input_dir")
weights = get_weights(FLAGS.tfrecord_path)
paste_distance_histograms(
FLAGS.pickle_input_dir, FLAGS.output_dir, weights, FLAGS.crop_sizes,
FLAGS.crop_step)
if __name__ == "__main__":
app.run(main)
| deepmind-research-master | alphafold_casp13/paste_contact_maps.py |
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Network for predicting C-beta contacts."""
from absl import logging
import sonnet
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from alphafold_casp13 import asa_output
from alphafold_casp13 import secstruct
from alphafold_casp13 import two_dim_convnet
from alphafold_casp13 import two_dim_resnet
def call_on_tuple(f):
"""Unpacks a tuple input parameter into arguments for a function f.
Mimics tuple unpacking in lambdas, which existed in Python 2 but has been
removed in Python 3.
Args:
f: A function taking multiple arguments.
Returns:
A function equivalent to f accepting a tuple, which is then unpacked.
"""
return lambda args: f(*args)
class ContactsNet(sonnet.AbstractModule):
"""A network to go from sequence to distance histograms."""
def __init__(self,
binary_code_bits,
data_format,
distance_multiplier,
features,
features_forward,
max_range,
min_range,
num_bins,
reshape_layer,
resolution_noise_scale,
scalars,
targets,
network_2d_deep,
torsion_bins=None,
skip_connect=0,
position_specific_bias_size=0,
filters_1d=(),
collapsed_batch_norm=False,
is_ca_feature=False,
asa_multiplier=0.0,
secstruct_multiplier=0.0,
torsion_multiplier=0.0,
name='contacts_net'):
"""Construct position prediction network."""
super(ContactsNet, self).__init__(name=name)
self._filters_1d = filters_1d
self._collapsed_batch_norm = collapsed_batch_norm
self._is_ca_feature = is_ca_feature
self._binary_code_bits = binary_code_bits
self._data_format = data_format
self._distance_multiplier = distance_multiplier
self._features = features
self._features_forward = features_forward
self._max_range = max_range
self._min_range = min_range
self._num_bins = num_bins
self._position_specific_bias_size = position_specific_bias_size
self._reshape_layer = reshape_layer
self._resolution_noise_scale = resolution_noise_scale
self._scalars = scalars
self._torsion_bins = torsion_bins
self._skip_connect = skip_connect
self._targets = targets
self._network_2d_deep = network_2d_deep
self.asa_multiplier = asa_multiplier
self.secstruct_multiplier = secstruct_multiplier
self.torsion_multiplier = torsion_multiplier
with self._enter_variable_scope():
if self.secstruct_multiplier > 0:
self._secstruct = secstruct.Secstruct()
if self.asa_multiplier > 0:
self._asa = asa_output.ASAOutputLayer()
if self._position_specific_bias_size:
self._position_specific_bias = tf.get_variable(
'position_specific_bias',
[self._position_specific_bias_size, self._num_bins or 1],
initializer=tf.zeros_initializer())
def quant_threshold(self, threshold=8.0):
"""Find the bin that is 8A+: we sum mass below this bin gives contact prob.
Args:
threshold: The distance threshold.
Returns:
Index of bin.
"""
# Note that this misuses the max_range as the range.
return int(
(threshold - self._min_range) * self._num_bins / float(self._max_range))
def _build(self, crop_size_x=0, crop_size_y=0, placeholders=None):
"""Puts the network into the graph.
Args:
crop_size_x: Crop a chunk out in one dimension. 0 means no cropping.
crop_size_y: Crop a chunk out in one dimension. 0 means no cropping.
placeholders: A dict containing the placeholders needed.
Returns:
A Tensor with logits of size [batch_size, num_residues, 3].
"""
crop_placeholder = placeholders['crop_placeholder']
inputs_1d = placeholders['inputs_1d_placeholder']
if self._is_ca_feature and 'aatype' in self._features:
logging.info('Collapsing aatype to is_ca_feature %s',
inputs_1d.shape.as_list()[-1])
assert inputs_1d.shape.as_list()[-1] <= 21 + (
1 if 'seq_length' in self._features else 0)
inputs_1d = inputs_1d[:, :, 7:8]
logits = self.compute_outputs(
inputs_1d=inputs_1d,
residue_index=placeholders['residue_index_placeholder'],
inputs_2d=placeholders['inputs_2d_placeholder'],
crop_x=crop_placeholder[:, 0:2],
crop_y=crop_placeholder[:, 2:4],
use_on_the_fly_stats=True,
crop_size_x=crop_size_x,
crop_size_y=crop_size_y,
data_format='NHWC', # Force NHWC for evals.
)
return logits
def compute_outputs(self, inputs_1d, residue_index, inputs_2d, crop_x, crop_y,
use_on_the_fly_stats, crop_size_x, crop_size_y,
data_format='NHWC'):
"""Given the inputs for a block, compute the network outputs."""
hidden_1d = inputs_1d
hidden_1d_list = [hidden_1d]
if len(hidden_1d_list) != 1:
hidden_1d = tf.concat(hidden_1d_list, 2)
output_dimension = self._num_bins or 1
if self._distance_multiplier > 0:
output_dimension += 1
logits, activations = self._build_2d_embedding(
hidden_1d=hidden_1d,
residue_index=residue_index,
inputs_2d=inputs_2d,
output_dimension=output_dimension,
use_on_the_fly_stats=use_on_the_fly_stats,
crop_x=crop_x,
crop_y=crop_y,
crop_size_x=crop_size_x, crop_size_y=crop_size_y,
data_format=data_format)
logits = tf.debugging.check_numerics(
logits, 'NaN in resnet activations', name='resnet_activations')
if (self.secstruct_multiplier > 0 or
self.asa_multiplier > 0 or
self.torsion_multiplier > 0):
# Make a 1d embedding by reducing the 2D activations.
# We do this in the x direction and the y direction separately.
collapse_dim = 1
join_dim = -1
embedding_1d = tf.concat(
# First targets are crop_x (axis 2) which we must reduce on axis 1
[tf.concat([tf.reduce_max(activations, axis=collapse_dim),
tf.reduce_mean(activations, axis=collapse_dim)],
axis=join_dim),
# Next targets are crop_y (axis 1) which we must reduce on axis 2
tf.concat([tf.reduce_max(activations, axis=collapse_dim+1),
tf.reduce_mean(activations, axis=collapse_dim+1)],
axis=join_dim)],
axis=collapse_dim) # Join the two crops together.
if self._collapsed_batch_norm:
embedding_1d = tf.contrib.layers.batch_norm(
embedding_1d, is_training=use_on_the_fly_stats,
fused=True, decay=0.999, scope='collapsed_batch_norm',
data_format='NHWC')
for i, nfil in enumerate(self._filters_1d):
embedding_1d = tf.contrib.layers.fully_connected(
embedding_1d,
num_outputs=nfil,
normalizer_fn=(
tf.contrib.layers.batch_norm if self._collapsed_batch_norm
else None),
normalizer_params={'is_training': use_on_the_fly_stats,
'updates_collections': None},
scope='collapsed_embed_%d' % i)
if self.torsion_multiplier > 0:
self.torsion_logits = tf.contrib.layers.fully_connected(
embedding_1d,
num_outputs=self._torsion_bins * self._torsion_bins,
activation_fn=None,
scope='torsion_logits')
self.torsion_output = tf.nn.softmax(self.torsion_logits)
if self.secstruct_multiplier > 0:
self._secstruct.make_layer_new(embedding_1d)
if self.asa_multiplier > 0:
self.asa_logits = self._asa.compute_asa_output(embedding_1d)
return logits
@staticmethod
def _concatenate_2d(hidden_1d, residue_index, hidden_2d, crop_x, crop_y,
binary_code_bits, crop_size_x, crop_size_y):
# Form the pairwise expansion of the 1D embedding
# And the residue offsets and (one) absolute position.
with tf.name_scope('Features2D'):
range_scale = 100.0 # Crude normalization factor.
n = tf.shape(hidden_1d)[1]
# pylint: disable=g-long-lambda
hidden_1d_cropped_y = tf.map_fn(
call_on_tuple(lambda c, h: tf.pad(
h[tf.maximum(0, c[0]):c[1]],
[[tf.maximum(0, -c[0]),
tf.maximum(0, crop_size_y -(n - c[0]))], [0, 0]])),
elems=(crop_y, hidden_1d), dtype=tf.float32,
back_prop=True)
range_n_y = tf.map_fn(
call_on_tuple(lambda ri, c: tf.pad(
ri[tf.maximum(0, c[0]):c[1]],
[[tf.maximum(0, -c[0]),
tf.maximum(0, crop_size_y -(n - c[0]))]])),
elems=(residue_index, crop_y), dtype=tf.int32,
back_prop=False)
hidden_1d_cropped_x = tf.map_fn(
call_on_tuple(lambda c, h: tf.pad(
h[tf.maximum(0, c[0]):c[1]],
[[tf.maximum(0, -c[0]),
tf.maximum(0, crop_size_x -(n - c[0]))], [0, 0]])),
elems=(crop_x, hidden_1d), dtype=tf.float32,
back_prop=True)
range_n_x = tf.map_fn(
call_on_tuple(lambda ri, c: tf.pad(
ri[tf.maximum(0, c[0]):c[1]],
[[tf.maximum(0, -c[0]),
tf.maximum(0, crop_size_x -(n - c[0]))]])),
elems=(residue_index, crop_x), dtype=tf.int32,
back_prop=False)
# pylint: enable=g-long-lambda
n_x = crop_size_x
n_y = crop_size_y
offset = (tf.expand_dims(tf.cast(range_n_x, tf.float32), 1) -
tf.expand_dims(tf.cast(range_n_y, tf.float32), 2)) / range_scale
position_features = [
tf.tile(
tf.reshape(
(tf.cast(range_n_y, tf.float32) - range_scale) / range_scale,
[-1, n_y, 1, 1]), [1, 1, n_x, 1],
name='TileRange'),
tf.tile(
tf.reshape(offset, [-1, n_y, n_x, 1]), [1, 1, 1, 1],
name='TileOffset')
]
channels = 2
if binary_code_bits:
# Binary coding of position.
exp_range_n_y = tf.expand_dims(range_n_y, 2)
bin_y = tf.stop_gradient(
tf.concat([tf.math.floormod(exp_range_n_y // (1 << i), 2)
for i in range(binary_code_bits)], 2))
exp_range_n_x = tf.expand_dims(range_n_x, 2)
bin_x = tf.stop_gradient(
tf.concat([tf.math.floormod(exp_range_n_x // (1 << i), 2)
for i in range(binary_code_bits)], 2))
position_features += [
tf.tile(
tf.expand_dims(tf.cast(bin_y, tf.float32), 2), [1, 1, n_x, 1],
name='TileBinRangey'),
tf.tile(
tf.expand_dims(tf.cast(bin_x, tf.float32), 1), [1, n_y, 1, 1],
name='TileBinRangex')
]
channels += 2 * binary_code_bits
augmentation_features = position_features + [
tf.tile(tf.expand_dims(hidden_1d_cropped_x, 1),
[1, n_y, 1, 1], name='Tile1Dx'),
tf.tile(tf.expand_dims(hidden_1d_cropped_y, 2),
[1, 1, n_x, 1], name='Tile1Dy')]
channels += 2 * hidden_1d.shape.as_list()[-1]
channels += hidden_2d.shape.as_list()[-1]
hidden_2d = tf.concat(
[hidden_2d] + augmentation_features, 3, name='Stack2Dfeatures')
logging.info('2d stacked features are depth %d %s', channels, hidden_2d)
hidden_2d.set_shape([None, None, None, channels])
return hidden_2d
def _build_2d_embedding(self, hidden_1d, residue_index, inputs_2d,
output_dimension, use_on_the_fly_stats, crop_x,
crop_y, crop_size_x, crop_size_y, data_format):
"""Returns NHWC logits and NHWC preactivations."""
logging.info('2d %s %s', inputs_2d, data_format)
# Stack with diagonal has already happened.
inputs_2d_cropped = inputs_2d
features_forward = None
hidden_2d = inputs_2d_cropped
hidden_2d = self._concatenate_2d(
hidden_1d, residue_index, hidden_2d, crop_x, crop_y,
self._binary_code_bits, crop_size_x, crop_size_y)
config_2d_deep = self._network_2d_deep
num_features = hidden_2d.shape.as_list()[3]
if data_format == 'NCHW':
logging.info('NCHW shape deep pre %s', hidden_2d)
hidden_2d = tf.transpose(hidden_2d, perm=[0, 3, 1, 2])
hidden_2d.set_shape([None, num_features, None, None])
logging.info('NCHW shape deep post %s', hidden_2d)
layers_forward = None
if config_2d_deep.extra_blocks:
# Optionally put some extra double-size blocks at the beginning.
with tf.variable_scope('Deep2DExtra'):
hidden_2d = two_dim_resnet.make_two_dim_resnet(
input_node=hidden_2d,
num_residues=None, # Unused
num_features=num_features,
num_predictions=2 * config_2d_deep.num_filters,
num_channels=2 * config_2d_deep.num_filters,
num_layers=config_2d_deep.extra_blocks *
config_2d_deep.num_layers_per_block,
filter_size=3,
batch_norm=config_2d_deep.use_batch_norm,
is_training=use_on_the_fly_stats,
fancy=True,
final_non_linearity=True,
atrou_rates=[1, 2, 4, 8],
data_format=data_format,
dropout_keep_prob=1.0
)
num_features = 2 * config_2d_deep.num_filters
if self._skip_connect:
layers_forward = hidden_2d
if features_forward is not None:
hidden_2d = tf.concat([hidden_2d, features_forward], 1
if data_format == 'NCHW' else 3)
with tf.variable_scope('Deep2D'):
logging.info('2d hidden shape is %s', str(hidden_2d.shape.as_list()))
contact_pre_logits = two_dim_resnet.make_two_dim_resnet(
input_node=hidden_2d,
num_residues=None, # Unused
num_features=num_features,
num_predictions=(config_2d_deep.num_filters
if self._reshape_layer else output_dimension),
num_channels=config_2d_deep.num_filters,
num_layers=config_2d_deep.num_blocks *
config_2d_deep.num_layers_per_block,
filter_size=3,
batch_norm=config_2d_deep.use_batch_norm,
is_training=use_on_the_fly_stats,
fancy=True,
final_non_linearity=self._reshape_layer,
atrou_rates=[1, 2, 4, 8],
data_format=data_format,
dropout_keep_prob=1.0
)
contact_logits = self._output_from_pre_logits(
contact_pre_logits, features_forward, layers_forward,
output_dimension, data_format, crop_x, crop_y, use_on_the_fly_stats)
if data_format == 'NCHW':
contact_pre_logits = tf.transpose(contact_pre_logits, perm=[0, 2, 3, 1])
# Both of these will be NHWC
return contact_logits, contact_pre_logits
def _output_from_pre_logits(self, contact_pre_logits, features_forward,
layers_forward, output_dimension, data_format,
crop_x, crop_y, use_on_the_fly_stats):
"""Given pre-logits, compute the final distogram/contact activations."""
config_2d_deep = self._network_2d_deep
if self._reshape_layer:
in_channels = config_2d_deep.num_filters
concat_features = [contact_pre_logits]
if features_forward is not None:
concat_features.append(features_forward)
in_channels += self._features_forward
if layers_forward is not None:
concat_features.append(layers_forward)
in_channels += 2 * config_2d_deep.num_filters
if len(concat_features) > 1:
contact_pre_logits = tf.concat(concat_features,
1 if data_format == 'NCHW' else 3)
contact_logits = two_dim_convnet.make_conv_layer(
contact_pre_logits,
in_channels=in_channels,
out_channels=output_dimension,
layer_name='output_reshape_1x1h',
filter_size=1,
filter_size_2=1,
non_linearity=False,
batch_norm=config_2d_deep.use_batch_norm,
is_training=use_on_the_fly_stats,
data_format=data_format)
else:
contact_logits = contact_pre_logits
if data_format == 'NCHW':
contact_logits = tf.transpose(contact_logits, perm=[0, 2, 3, 1])
if self._position_specific_bias_size:
# Make 2D pos-specific biases: NHWC.
biases = build_crops_biases(
self._position_specific_bias_size,
self._position_specific_bias, crop_x, crop_y, back_prop=True)
contact_logits += biases
# Will be NHWC.
return contact_logits
def update_crop_fetches(self, fetches):
"""Add auxiliary outputs for a crop to the fetches."""
if self.secstruct_multiplier > 0:
fetches['secstruct_probs'] = self._secstruct.get_q8_probs()
if self.asa_multiplier > 0:
fetches['asa_output'] = self._asa.asa_output
if self.torsion_multiplier > 0:
fetches['torsion_probs'] = self.torsion_output
def build_crops_biases(bias_size, raw_biases, crop_x, crop_y, back_prop):
"""Take the offset-specific biases and reshape them to match current crops.
Args:
bias_size: how many bias variables we're storing.
raw_biases: the bias variable
crop_x: B x 2 array of start/end for the batch
crop_y: B x 2 array of start/end for the batch
back_prop: whether to backprop through the map_fn.
Returns:
Reshaped biases.
"""
# First pad the biases with a copy of the final value to the maximum length.
max_off_diag = tf.reduce_max(
tf.maximum(tf.abs(crop_x[:, 1] - crop_y[:, 0]),
tf.abs(crop_y[:, 1] - crop_x[:, 0])))
padded_bias_size = tf.maximum(bias_size, max_off_diag)
biases = tf.concat(
[raw_biases,
tf.tile(raw_biases[-1:, :],
[padded_bias_size - bias_size, 1])], axis=0)
# Now prepend a mirror image (excluding 0th elt) for below-diagonal.
biases = tf.concat([tf.reverse(biases[1:, :], axis=[0]), biases], axis=0)
# Which diagonal of the full matrix each crop starts on (top left):
start_diag = crop_x[:, 0:1] - crop_y[:, 0:1] # B x 1
crop_size_x = tf.reduce_max(crop_x[:, 1] - crop_x[:, 0])
crop_size_y = tf.reduce_max(crop_y[:, 1] - crop_y[:, 0])
# Relative offset of each row within a crop:
# (off-diagonal decreases as y increases)
increment = tf.expand_dims(-tf.range(0, crop_size_y), 0) # 1 x crop_size_y
# Index of diagonal of first element of each row, flattened.
row_offsets = tf.reshape(start_diag + increment, [-1]) # B*crop_size_y
logging.info('row_offsets %s', row_offsets)
# Make it relative to the start of the biases array. (0-th diagonal is in
# the middle at position padded_bias_size - 1)
row_offsets += padded_bias_size - 1
# Map_fn to build the individual rows.
# B*cropsizey x cropsizex x num_bins
cropped_biases = tf.map_fn(lambda i: biases[i:i+crop_size_x, :],
elems=row_offsets, dtype=tf.float32,
back_prop=back_prop)
logging.info('cropped_biases %s', cropped_biases)
return tf.reshape(
cropped_biases, [-1, crop_size_y, crop_size_x, tf.shape(raw_biases)[-1]])
| deepmind-research-master | alphafold_casp13/contacts_network.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `bernoulli.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import tree
from gated_linear_networks import bernoulli
def _get_dataset(input_size, batch_size=None):
"""Get mock dataset."""
if batch_size:
inputs = jnp.ones([batch_size, input_size])
side_info = jnp.ones([batch_size, input_size])
targets = jnp.ones([batch_size])
else:
inputs = jnp.ones([input_size])
side_info = jnp.ones([input_size])
targets = jnp.ones([])
return inputs, side_info, targets
class GatedLinearNetworkTest(parameterized.TestCase):
# TODO(b/170843789): Factor out common test utilities.
def setUp(self):
super(GatedLinearNetworkTest, self).setUp()
self._name = "test_network"
self._rng = hk.PRNGSequence(jax.random.PRNGKey(42))
self._output_sizes = (4, 5, 6)
self._context_dim = 2
def gln_factory():
return bernoulli.GatedLinearNetwork(
output_sizes=self._output_sizes,
context_dim=self._context_dim,
name=self._name)
def inference_fn(inputs, side_info):
return gln_factory().inference(inputs, side_info)
def batch_inference_fn(inputs, side_info):
return jax.vmap(inference_fn, in_axes=(0, 0))(inputs, side_info)
def update_fn(inputs, side_info, label, learning_rate):
params, predictions, unused_loss = gln_factory().update(
inputs, side_info, label, learning_rate)
return predictions, params
def batch_update_fn(inputs, side_info, label, learning_rate):
predictions, params = jax.vmap(
update_fn, in_axes=(0, 0, 0, None))(inputs, side_info, label,
learning_rate)
avg_params = tree.map_structure(lambda x: jnp.mean(x, axis=0), params)
return predictions, avg_params
# Haiku transform functions.
self._init_fn, inference_fn_ = hk.without_apply_rng(
hk.transform_with_state(inference_fn))
self._batch_init_fn, batch_inference_fn_ = hk.without_apply_rng(
hk.transform_with_state(batch_inference_fn))
_, update_fn_ = hk.without_apply_rng(hk.transform_with_state(update_fn))
_, batch_update_fn_ = hk.without_apply_rng(
hk.transform_with_state(batch_update_fn))
self._inference_fn = jax.jit(inference_fn_)
self._batch_inference_fn = jax.jit(batch_inference_fn_)
self._update_fn = jax.jit(update_fn_)
self._batch_update_fn = jax.jit(batch_update_fn_)
@parameterized.named_parameters(("Online mode", None), ("Batch mode", 3))
def test_shapes(self, batch_size):
"""Test shapes in online and batch regimes."""
if batch_size is None:
init_fn = self._init_fn
inference_fn = self._inference_fn
else:
init_fn = self._batch_init_fn
inference_fn = self._batch_inference_fn
input_size = 10
inputs, side_info, _ = _get_dataset(input_size, batch_size)
input_size = inputs.shape[-1]
# Initialize network.
gln_params, gln_state = init_fn(next(self._rng), inputs, side_info)
# Test shapes of parameters layer-wise.
layer_input_size = input_size
for layer_idx, output_size in enumerate(self._output_sizes):
name = "{}/~/{}_layer_{}".format(self._name, self._name, layer_idx)
weights = gln_params[name]["weights"]
expected_shape = (output_size, 2**self._context_dim, layer_input_size + 1)
self.assertEqual(weights.shape, expected_shape)
layer_input_size = output_size
# Test shape of output.
output_size = sum(self._output_sizes)
predictions, _ = inference_fn(gln_params, gln_state, inputs, side_info)
expected_shape = (batch_size, output_size) if batch_size else (output_size,)
self.assertEqual(predictions.shape, expected_shape)
@parameterized.named_parameters(("Online mode", None), ("Batch mode", 3))
def test_update(self, batch_size):
"""Test network updates in online and batch regimes."""
if batch_size is None:
init_fn = self._init_fn
inference_fn = self._inference_fn
update_fn = self._update_fn
else:
init_fn = self._batch_init_fn
inference_fn = self._batch_inference_fn
update_fn = self._batch_update_fn
input_size = 10
inputs, side_info, targets = _get_dataset(input_size, batch_size)
# Initialize network.
initial_params, gln_state = init_fn(next(self._rng), inputs, side_info)
# Initial predictions.
initial_predictions, _ = inference_fn(initial_params, gln_state, inputs,
side_info)
# Test that params remain valid after consecutive updates.
gln_params = initial_params
for _ in range(3):
(_, gln_params), gln_state = update_fn(
gln_params, gln_state, inputs, side_info, targets, learning_rate=1e-4)
# Check updated weights layer-wise.
for layer_idx in range(len(self._output_sizes)):
name = "{}/~/{}_layer_{}".format(self._name, self._name, layer_idx)
initial_weights = initial_params[name]["weights"]
new_weights = gln_params[name]["weights"]
# Shape consistency.
self.assertEqual(new_weights.shape, initial_weights.shape)
# Check that different weights yield different predictions.
new_predictions, _ = inference_fn(gln_params, gln_state, inputs,
side_info)
self.assertFalse(np.array_equal(new_predictions, initial_predictions))
def test_batch_consistency(self):
"""Test consistency between online and batch updates."""
input_size = 10
batch_size = 3
inputs, side_info, targets = _get_dataset(input_size, batch_size)
# Initialize network.
gln_params, gln_state = self._batch_init_fn(
next(self._rng), inputs, side_info)
test_layer = "{}/~/{}_layer_0".format(self._name, self._name)
for _ in range(10):
# Update on full batch.
(expected_predictions, expected_params), _ = self._batch_update_fn(
gln_params, gln_state, inputs, side_info, targets, learning_rate=1e-3)
# Average updates across batch and check equivalence.
accum_predictions = []
accum_weights = []
for inputs_, side_info_, targets_ in zip(inputs, side_info, targets):
(predictions, params), _ = self._update_fn(
gln_params,
gln_state,
inputs_,
side_info_,
targets_,
learning_rate=1e-3)
accum_predictions.append(predictions)
accum_weights.append(params[test_layer]["weights"])
# Check prediction equivalence.
actual_predictions = np.stack(accum_predictions, axis=0)
np.testing.assert_array_almost_equal(actual_predictions,
expected_predictions)
# Check weight equivalence.
actual_weights = np.mean(np.stack(accum_weights, axis=0), axis=0)
expected_weights = expected_params[test_layer]["weights"]
np.testing.assert_array_almost_equal(actual_weights, expected_weights)
gln_params = expected_params
if __name__ == "__main__":
absltest.main()
| deepmind-research-master | gated_linear_networks/bernoulli_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bernoulli Gated Linear Network."""
from typing import List, Text, Tuple
import chex
import jax
import jax.numpy as jnp
import rlax
import tensorflow_probability as tfp
from gated_linear_networks import base
tfp = tfp.experimental.substrates.jax
tfd = tfp.distributions
Array = chex.Array
GLN_EPS = 0.01
MAX_WEIGHT = 200.
class GatedLinearNetwork(base.GatedLinearNetwork):
"""Bernoulli Gated Linear Network."""
def __init__(self,
output_sizes: List[int],
context_dim: int,
name: Text = "bernoulli_gln"):
"""Initialize a Bernoulli GLN."""
super(GatedLinearNetwork, self).__init__(
output_sizes,
context_dim,
inference_fn=GatedLinearNetwork._inference_fn,
update_fn=GatedLinearNetwork._update_fn,
init=jnp.zeros,
dtype=jnp.float32,
name=name)
def _add_bias(self, inputs):
return jnp.append(inputs, rlax.sigmoid(1.))
@staticmethod
def _inference_fn(
inputs: Array, # [input_size]
side_info: Array, # [side_info_size]
weights: Array, # [2**context_dim, input_size]
hyperplanes: Array, # [context_dim, side_info_size]
hyperplane_bias: Array, # [context_dim]
) -> Array:
"""Inference step for a single Beurnolli neuron."""
weight_index = GatedLinearNetwork._compute_context(side_info, hyperplanes,
hyperplane_bias)
used_weights = weights[weight_index]
inputs = rlax.logit(jnp.clip(inputs, GLN_EPS, 1. - GLN_EPS))
prediction = rlax.sigmoid(jnp.dot(used_weights, inputs))
return prediction
@staticmethod
def _update_fn(
inputs: Array, # [input_size]
side_info: Array, # [side_info_size]
weights: Array, # [2**context_dim, num_features]
hyperplanes: Array, # [context_dim, side_info_size]
hyperplane_bias: Array, # [context_dim]
target: Array, # []
learning_rate: float,
) -> Tuple[Array, Array, Array]:
"""Update step for a single Bernoulli neuron."""
def log_loss_fn(inputs, side_info, weights, hyperplanes, hyperplane_bias,
target):
"""Log loss for a single Bernoulli neuron."""
prediction = GatedLinearNetwork._inference_fn(inputs, side_info, weights,
hyperplanes,
hyperplane_bias)
prediction = jnp.clip(prediction, GLN_EPS, 1. - GLN_EPS)
return rlax.log_loss(prediction, target), prediction
grad_log_loss = jax.value_and_grad(log_loss_fn, argnums=2, has_aux=True)
((log_loss, prediction),
dloss_dweights) = grad_log_loss(inputs, side_info, weights, hyperplanes,
hyperplane_bias, target)
delta_weights = learning_rate * dloss_dweights
new_weights = jnp.clip(weights - delta_weights, -MAX_WEIGHT, MAX_WEIGHT)
return new_weights, prediction, log_loss
class LastNeuronAggregator(base.LastNeuronAggregator):
"""Bernoulli last neuron aggregator, implemented by the super class."""
pass
| deepmind-research-master | gated_linear_networks/bernoulli.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `gaussian.py`."""
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import tree
from gated_linear_networks import gaussian
def _get_dataset(input_size, batch_size=None):
"""Get mock dataset."""
if batch_size:
inputs = jnp.ones([batch_size, input_size, 2])
side_info = jnp.ones([batch_size, input_size])
targets = 0.8 * jnp.ones([batch_size])
else:
inputs = jnp.ones([input_size, 2])
side_info = jnp.ones([input_size])
targets = jnp.ones([])
return inputs, side_info, targets
class UtilsTest(absltest.TestCase):
def test_packing_identity(self):
mu = jnp.array([1., 2., 3., 4., 5.])
sigma_sq = jnp.array([6., 7., 8., 9., 10.])
mu_2, sigma_sq_2 = gaussian._unpack_inputs(
gaussian._pack_inputs(mu, sigma_sq))
np.testing.assert_array_equal(mu, mu_2)
np.testing.assert_array_equal(sigma_sq, sigma_sq_2)
class GatedLinearNetworkTest(parameterized.TestCase):
# TODO(b/170843789): Factor out common test utilities.
def setUp(self):
super(GatedLinearNetworkTest, self).setUp()
self._name = "test_network"
self._rng = hk.PRNGSequence(jax.random.PRNGKey(42))
self._output_sizes = (4, 5, 6)
self._context_dim = 2
self._bias_len = 3
def gln_factory():
return gaussian.GatedLinearNetwork(
output_sizes=self._output_sizes,
context_dim=self._context_dim,
bias_len=self._bias_len,
name=self._name,
)
def inference_fn(inputs, side_info):
return gln_factory().inference(inputs, side_info, 0.5)
def batch_inference_fn(inputs, side_info):
return jax.vmap(inference_fn, in_axes=(0, 0))(inputs, side_info)
def update_fn(inputs, side_info, label, learning_rate):
params, predictions, unused_loss = gln_factory().update(
inputs, side_info, label, learning_rate, 0.5)
return predictions, params
def batch_update_fn(inputs, side_info, label, learning_rate):
predictions, params = jax.vmap(
update_fn, in_axes=(0, 0, 0, None))(
inputs,
side_info,
label,
learning_rate)
avg_params = tree.map_structure(lambda x: jnp.mean(x, axis=0), params)
return predictions, avg_params
# Haiku transform functions.
self._init_fn, inference_fn_ = hk.without_apply_rng(
hk.transform_with_state(inference_fn))
self._batch_init_fn, batch_inference_fn_ = hk.without_apply_rng(
hk.transform_with_state(batch_inference_fn))
_, update_fn_ = hk.without_apply_rng(hk.transform_with_state(update_fn))
_, batch_update_fn_ = hk.without_apply_rng(
hk.transform_with_state(batch_update_fn))
self._inference_fn = jax.jit(inference_fn_)
self._batch_inference_fn = jax.jit(batch_inference_fn_)
self._update_fn = jax.jit(update_fn_)
self._batch_update_fn = jax.jit(batch_update_fn_)
@parameterized.named_parameters(("Online mode", None), ("Batch mode", 3))
def test_shapes(self, batch_size):
"""Test shapes in online and batch regimes."""
if batch_size is None:
init_fn = self._init_fn
inference_fn = self._inference_fn
else:
init_fn = self._batch_init_fn
inference_fn = self._batch_inference_fn
input_size = 10
inputs, side_info, _ = _get_dataset(input_size, batch_size)
# Initialize network.
gln_params, gln_state = init_fn(next(self._rng), inputs, side_info)
# Test shapes of parameters layer-wise.
layer_input_size = input_size
for layer_idx, output_size in enumerate(self._output_sizes):
name = "{}/~/{}_layer_{}".format(self._name, self._name, layer_idx)
weights = gln_params[name]["weights"]
expected_shape = (output_size, 2**self._context_dim,
layer_input_size + self._bias_len)
self.assertEqual(weights.shape, expected_shape)
layer_input_size = output_size
# Test shape of output.
output_size = sum(self._output_sizes)
predictions, _ = inference_fn(gln_params, gln_state, inputs, side_info)
expected_shape = (batch_size, output_size,
2) if batch_size else (output_size, 2)
self.assertEqual(predictions.shape, expected_shape)
@parameterized.named_parameters(("Online mode", None), ("Batch mode", 3))
def test_update(self, batch_size):
"""Test network updates in online and batch regimes."""
if batch_size is None:
init_fn = self._init_fn
inference_fn = self._inference_fn
update_fn = self._update_fn
else:
init_fn = self._batch_init_fn
inference_fn = self._batch_inference_fn
update_fn = self._batch_update_fn
inputs, side_info, targets = _get_dataset(10, batch_size)
# Initialize network.
initial_params, gln_state = init_fn(next(self._rng), inputs, side_info)
# Initial predictions.
initial_predictions, _ = inference_fn(initial_params, gln_state, inputs,
side_info)
# Test that params remain valid after consecutive updates.
gln_params = initial_params
for _ in range(3):
(_, gln_params), _ = update_fn(
gln_params, gln_state, inputs, side_info, targets, learning_rate=1e-4)
# Check updated weights layer-wise.
for layer_idx in range(len(self._output_sizes)):
name = "{}/~/{}_layer_{}".format(self._name, self._name, layer_idx)
initial_weights = initial_params[name]["weights"]
new_weights = gln_params[name]["weights"]
# Shape consistency.
self.assertEqual(new_weights.shape, initial_weights.shape)
# Check that different weights yield different predictions.
new_predictions, _ = inference_fn(gln_params, gln_state, inputs,
side_info)
self.assertFalse(np.array_equal(new_predictions, initial_predictions))
def test_batch_consistency(self):
"""Test consistency between online and batch updates."""
batch_size = 3
inputs, side_info, targets = _get_dataset(10, batch_size)
# Initialize network.
gln_params, gln_state = self._batch_init_fn(
next(self._rng), inputs, side_info)
test_layer = "{}/~/{}_layer_0".format(self._name, self._name)
for _ in range(10):
# Update on full batch.
(expected_predictions, expected_params), _ = self._batch_update_fn(
gln_params, gln_state, inputs, side_info, targets, learning_rate=1e-3)
# Average updates across batch and check equivalence.
accum_predictions = []
accum_weights = []
for inputs_, side_info_, targets_ in zip(inputs, side_info, targets):
(predictions, params), _ = self._update_fn(
gln_params,
gln_state,
inputs_,
side_info_,
targets_,
learning_rate=1e-3)
accum_predictions.append(predictions)
accum_weights.append(params[test_layer]["weights"])
# Check prediction equivalence.
actual_predictions = np.stack(accum_predictions, axis=0)
np.testing.assert_array_almost_equal(actual_predictions,
expected_predictions)
# Check weight equivalence.
actual_weights = np.mean(np.stack(accum_weights, axis=0), axis=0)
expected_weights = expected_params[test_layer]["weights"]
np.testing.assert_array_almost_equal(actual_weights, expected_weights)
gln_params = expected_params
if __name__ == "__main__":
absltest.main()
| deepmind-research-master | gated_linear_networks/gaussian_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for Gated Linear Networks."""
import abc
import collections
import functools
import inspect
from typing import Any, Callable, Optional, Sequence, Tuple
import chex
import haiku as hk
import jax
import jax.numpy as jnp
Array = chex.Array
DType = Any
Initializer = hk.initializers.Initializer
Shape = Sequence[int]
EPS = 1e-12
MIN_ALPHA = 1e-5
def _l2_normalize(x: Array, axis: int) -> Array:
return x / jnp.sqrt(jnp.maximum(jnp.sum(x**2, axis, keepdims=True), EPS))
def _wrapped_fn_argnames(fun):
"""Returns list of argnames of a (possibly wrapped) function."""
return tuple(inspect.signature(fun).parameters)
def _vmap(fun, in_axes=0, out_axes=0, parameters=None):
"""JAX vmap with human-friendly axes."""
def _axes(fun, d):
"""Maps dict {kwarg_i, : val_i} to [None, ..., val_i, ..., None]."""
argnames = _wrapped_fn_argnames(fun) if not parameters else parameters
for key in d:
if key not in argnames:
raise ValueError(f"{key} is not a valid axis.")
return tuple(d.get(key, None) for key in argnames)
in_axes = _axes(fun, in_axes) if isinstance(in_axes, dict) else in_axes
return jax.vmap(fun, in_axes, out_axes)
# Map a neuron-level function across a layer.
_layer_vmap = functools.partial(
_vmap,
in_axes=({
"weights": 0,
"hyperplanes": 0,
"hyperplane_bias": 0,
}))
class NormalizedRandomNormal(hk.initializers.RandomNormal):
"""Random normal initializer with l2-normalization."""
def __init__(self,
stddev: float = 1.,
mean: float = 0.,
normalize_axis: int = 0):
super(NormalizedRandomNormal, self).__init__(stddev, mean)
self._normalize_axis = normalize_axis
def __call__(self, shape: Shape, dtype: DType) -> Array:
if self._normalize_axis >= len(shape):
raise ValueError("Cannot normalize axis {} for ndim = {}.".format(
self._normalize_axis, len(shape)))
weights = super(NormalizedRandomNormal, self).__call__(shape, dtype)
return _l2_normalize(weights, axis=self._normalize_axis)
class ShapeScaledConstant(hk.initializers.Initializer):
"""Initializes with a constant dependent on last dimension of input shape."""
def __call__(self, shape: Shape, dtype: DType) -> jnp.ndarray:
constant = 1. / shape[-1]
return jnp.broadcast_to(constant, shape).astype(dtype)
class LocalUpdateModule(hk.Module):
"""Abstract base class for GLN variants and utils."""
def __init__(self, name: Optional[str] = None):
if hasattr(self, "__call__"):
raise ValueError("Do not implement `__call__` for a LocalUpdateModule." +
" Implement `inference` and `update` instead.")
super(LocalUpdateModule, self).__init__(name)
@abc.abstractmethod
def inference(self, *args, **kwargs):
"""Module inference step."""
@abc.abstractmethod
def update(self, *args, **kwargs):
"""Module update step."""
@property
@abc.abstractmethod
def output_sizes(self) -> Shape:
"""Returns network output sizes."""
class GatedLinearNetwork(LocalUpdateModule):
"""Abstract base class for a multi-layer Gated Linear Network."""
def __init__(self,
output_sizes: Shape,
context_dim: int,
inference_fn: Callable[..., Array],
update_fn: Callable[..., Array],
init: Initializer,
hyp_w_init: Optional[Initializer] = None,
hyp_b_init: Optional[Initializer] = None,
dtype: DType = jnp.float32,
name: str = "gated_linear_network"):
"""Initialize a GatedLinearNetwork as a sequence of GatedLinearLayers."""
super(GatedLinearNetwork, self).__init__(name=name)
self._layers = []
self._output_sizes = output_sizes
for i, output_size in enumerate(self._output_sizes):
layer = _GatedLinearLayer(
output_size=output_size,
context_dim=context_dim,
update_fn=update_fn,
inference_fn=inference_fn,
init=init,
hyp_w_init=hyp_w_init,
hyp_b_init=hyp_b_init,
dtype=dtype,
name=name + "_layer_{}".format(i))
self._layers.append(layer)
self._name = name
@abc.abstractmethod
def _add_bias(self, inputs):
pass
def inference(self, inputs: Array, side_info: Array, *args,
**kwargs) -> Array:
"""GatedLinearNetwork inference."""
predictions_per_layer = []
predictions = inputs
for layer in self._layers:
predictions = self._add_bias(predictions)
predictions = layer.inference(predictions, side_info, *args, **kwargs)
predictions_per_layer.append(predictions)
return jnp.concatenate(predictions_per_layer, axis=0)
def update(self, inputs, side_info, target, learning_rate, *args, **kwargs):
"""GatedLinearNetwork update."""
all_params = []
all_predictions = []
all_losses = []
predictions = inputs
for layer in self._layers:
predictions = self._add_bias(predictions)
# Note: This is correct because returned predictions are pre-update.
params, predictions, log_loss = layer.update(predictions, side_info,
target, learning_rate, *args,
**kwargs)
all_params.append(params)
all_predictions.append(predictions)
all_losses.append(log_loss)
new_params = dict(collections.ChainMap(*all_params))
predictions = jnp.concatenate(all_predictions, axis=0)
log_loss = jnp.concatenate(all_losses, axis=0)
return new_params, predictions, log_loss
@property
def output_sizes(self):
return self._output_sizes
@staticmethod
def _compute_context(
side_info: Array, # [side_info_size]
hyperplanes: Array, # [context_dim, side_info_size]
hyperplane_bias: Array, # [context_dim]
) -> Array:
# Index weights by side information.
context_dim = hyperplane_bias.shape[0]
proj = jnp.dot(hyperplanes, side_info)
bits = (proj > hyperplane_bias).astype(jnp.int32)
weight_index = jnp.sum(
bits *
jnp.array([2**i for i in range(context_dim)])) if context_dim else 0
return weight_index
class _GatedLinearLayer(LocalUpdateModule):
"""A single layer of a Gated Linear Network."""
def __init__(self,
output_size: int,
context_dim: int,
inference_fn: Callable[..., Array],
update_fn: Callable[..., Array],
init: Initializer,
hyp_w_init: Optional[Initializer] = None,
hyp_b_init: Optional[Initializer] = None,
dtype: DType = jnp.float32,
name: str = "gated_linear_layer"):
"""Initialize a GatedLinearLayer."""
super(_GatedLinearLayer, self).__init__(name=name)
self._output_size = output_size
self._context_dim = context_dim
self._inference_fn = inference_fn
self._update_fn = update_fn
self._init = init
self._hyp_w_init = hyp_w_init
self._hyp_b_init = hyp_b_init
self._dtype = dtype
self._name = name
def _get_weights(self, input_size):
"""Get (or initialize) weight parameters."""
weights = hk.get_parameter(
"weights",
shape=(self._output_size, 2**self._context_dim, input_size),
dtype=self._dtype,
init=self._init,
)
return weights
def _get_hyperplanes(self, side_info_size):
"""Get (or initialize) hyperplane weights and bias."""
hyp_w_init = self._hyp_w_init or NormalizedRandomNormal(
stddev=1., normalize_axis=1)
hyperplanes = hk.get_state(
"hyperplanes",
shape=(self._output_size, self._context_dim, side_info_size),
init=hyp_w_init)
hyp_b_init = self._hyp_b_init or hk.initializers.RandomNormal(stddev=0.05)
hyperplane_bias = hk.get_state(
"hyperplane_bias",
shape=(self._output_size, self._context_dim),
init=hyp_b_init)
return hyperplanes, hyperplane_bias
def inference(self, inputs: Array, side_info: Array, *args,
**kwargs) -> Array:
"""GatedLinearLayer inference."""
# Initialize layer weights.
weights = self._get_weights(inputs.shape[0])
# Initialize fixed random hyperplanes.
side_info_size = side_info.shape[0]
hyperplanes, hyperplane_bias = self._get_hyperplanes(side_info_size)
# Perform layer-wise inference by mapping along output_size (num_neurons).
layer_inference = _layer_vmap(self._inference_fn)
predictions = layer_inference(inputs, side_info, weights, hyperplanes,
hyperplane_bias, *args, **kwargs)
return predictions
def update(self, inputs: Array, side_info: Array, target: Array,
learning_rate: float, *args,
**kwargs) -> Tuple[Array, Array, Array]:
"""GatedLinearLayer update."""
# Fetch layer weights.
weights = self._get_weights(inputs.shape[0])
# Fetch fixed random hyperplanes.
side_info_size = side_info.shape[0]
hyperplanes, hyperplane_bias = self._get_hyperplanes(side_info_size)
# Perform layer-wise update by mapping along output_size (num_neurons).
layer_update = _layer_vmap(self._update_fn)
new_weights, predictions, log_loss = layer_update(inputs, side_info,
weights, hyperplanes,
hyperplane_bias, target,
learning_rate, *args,
**kwargs)
assert new_weights.shape == weights.shape
params = {self.module_name: {"weights": new_weights}}
return params, predictions, log_loss
@property
def output_sizes(self):
return self._output_size
class Mutator(LocalUpdateModule):
"""Abstract base class for GLN Mutators."""
def __init__(
self,
network_factory: Callable[..., LocalUpdateModule],
name: str,
):
super(Mutator, self).__init__(name=name)
self._network = network_factory()
self._name = name
@property
def output_sizes(self):
return self._network.output_sizes
class LastNeuronAggregator(Mutator):
"""Last neuron aggregator: network output is read from the last neuron."""
def __init__(
self,
network_factory: Callable[..., LocalUpdateModule],
name: str = "last_neuron",
):
super(LastNeuronAggregator, self).__init__(network_factory, name)
if self._network.output_sizes[-1] != 1:
raise ValueError(
"LastNeuronAggregator requires the last GLN layer to have"
" output_size = 1.")
def inference(self, *args, **kwargs) -> Array:
predictions = self._network.inference(*args, **kwargs)
return predictions[-1]
def update(self, *args, **kwargs) -> Tuple[Array, Array, Array]:
params_t, predictions_tm1, loss_tm1 = self._network.update(*args, **kwargs)
return params_t, predictions_tm1[-1], loss_tm1[-1]
| deepmind-research-master | gated_linear_networks/base.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gaussian Gated Linear Network."""
from typing import Callable, List, Text, Tuple
import chex
import jax
import jax.numpy as jnp
import tensorflow_probability as tfp
from gated_linear_networks import base
tfp = tfp.experimental.substrates.jax
tfd = tfp.distributions
Array = chex.Array
MIN_SIGMA_SQ_AGGREGATOR = 0.5
MAX_SIGMA_SQ = 1e5
MAX_WEIGHT = 1e3
MIN_WEIGHT = -1e3
def _unpack_inputs(inputs: Array) -> Tuple[Array, Array]:
inputs = jnp.atleast_2d(inputs)
chex.assert_rank(inputs, 2)
(mu, sigma_sq) = [jnp.squeeze(x, 1) for x in jnp.hsplit(inputs, 2)]
return mu, sigma_sq
def _pack_inputs(mu: Array, sigma_sq: Array) -> Array:
mu = jnp.atleast_1d(mu)
sigma_sq = jnp.atleast_1d(sigma_sq)
chex.assert_rank([mu, sigma_sq], 1)
return jnp.vstack([mu, sigma_sq]).T
class GatedLinearNetwork(base.GatedLinearNetwork):
"""Gaussian Gated Linear Network."""
def __init__(
self,
output_sizes: List[int],
context_dim: int,
bias_len: int = 3,
bias_max_mu: float = 1.,
bias_sigma_sq: float = 1.,
name: Text = "gaussian_gln"):
"""Initialize a Gaussian GLN."""
super(GatedLinearNetwork, self).__init__(
output_sizes,
context_dim,
inference_fn=GatedLinearNetwork._inference_fn,
update_fn=GatedLinearNetwork._update_fn,
init=base.ShapeScaledConstant(),
dtype=jnp.float64,
name=name)
self._bias_len = bias_len
self._bias_max_mu = bias_max_mu
self._bias_sigma_sq = bias_sigma_sq
def _add_bias(self, inputs):
mu = jnp.linspace(-1. * self._bias_max_mu, self._bias_max_mu,
self._bias_len)
sigma_sq = self._bias_sigma_sq * jnp.ones_like(mu)
bias = _pack_inputs(mu, sigma_sq)
return jnp.concatenate([inputs, bias], axis=0)
@staticmethod
def _inference_fn(
inputs: Array, # [input_size, 2]
side_info: Array, # [side_info_size]
weights: Array, # [2**context_dim, input_size]
hyperplanes: Array, # [context_dim, side_info_size]
hyperplane_bias: Array, # [context_dim]
min_sigma_sq: float,
) -> Array:
"""Inference step for a single Gaussian neuron."""
mu_in, sigma_sq_in = _unpack_inputs(inputs)
weight_index = GatedLinearNetwork._compute_context(side_info, hyperplanes,
hyperplane_bias)
used_weights = weights[weight_index]
# This projection operation is differentiable and affects the gradients.
used_weights = GatedLinearNetwork._project_weights(inputs, used_weights,
min_sigma_sq)
sigma_sq_out = 1. / jnp.sum(used_weights / sigma_sq_in)
mu_out = sigma_sq_out * jnp.sum((used_weights * mu_in) / sigma_sq_in)
prediction = jnp.hstack((mu_out, sigma_sq_out))
return prediction
@staticmethod
def _project_weights(inputs: Array, # [input_size]
weights: Array, # [2**context_dim, num_features]
min_sigma_sq: float) -> Array:
"""Implements hard projection."""
# This projection should be performed before the sigma related ones.
weights = jnp.minimum(jnp.maximum(MIN_WEIGHT, weights), MAX_WEIGHT)
_, sigma_sq_in = _unpack_inputs(inputs)
lambda_in = 1. / sigma_sq_in
sigma_sq_out = 1. / weights.dot(lambda_in)
# If w.dot(x) < U, linearly project w such that w.dot(x) = U.
weights = jnp.where(
sigma_sq_out < min_sigma_sq, weights - lambda_in *
(1. / sigma_sq_out - 1. / min_sigma_sq) / jnp.sum(lambda_in**2),
weights)
# If w.dot(x) > U, linearly project w such that w.dot(x) = U.
weights = jnp.where(
sigma_sq_out > MAX_SIGMA_SQ, weights - lambda_in *
(1. / sigma_sq_out - 1. / MAX_SIGMA_SQ) / jnp.sum(lambda_in**2),
weights)
return weights
@staticmethod
def _update_fn(
inputs: Array, # [input_size]
side_info: Array, # [side_info_size]
weights: Array, # [2**context_dim, num_features]
hyperplanes: Array, # [context_dim, side_info_size]
hyperplane_bias: Array, # [context_dim]
target: Array, # []
learning_rate: float,
min_sigma_sq: float, # needed for inference (weight projection)
) -> Tuple[Array, Array, Array]:
"""Update step for a single Gaussian neuron."""
def log_loss_fn(inputs, side_info, weights, hyperplanes, hyperplane_bias,
target):
"""Log loss for a single Gaussian neuron."""
prediction = GatedLinearNetwork._inference_fn(inputs, side_info, weights,
hyperplanes,
hyperplane_bias,
min_sigma_sq)
mu, sigma_sq = prediction.T
loss = -tfd.Normal(mu, jnp.sqrt(sigma_sq)).log_prob(target)
return loss, prediction
grad_log_loss = jax.value_and_grad(log_loss_fn, argnums=2, has_aux=True)
(log_loss,
prediction), dloss_dweights = grad_log_loss(inputs, side_info, weights,
hyperplanes, hyperplane_bias,
target)
delta_weights = learning_rate * dloss_dweights
return weights - delta_weights, prediction, log_loss
class ConstantInputSigma(base.Mutator):
"""Input pre-processing by concatenating a constant sigma^2."""
def __init__(
self,
network_factory: Callable[..., GatedLinearNetwork],
input_sigma_sq: float,
name: Text = "constant_input_sigma",
):
super(ConstantInputSigma, self).__init__(network_factory, name)
self._input_sigma_sq = input_sigma_sq
def inference(self, inputs, *args, **kwargs):
"""ConstantInputSigma inference."""
chex.assert_rank(inputs, 1)
sigma_sq = self._input_sigma_sq * jnp.ones_like(inputs)
return self._network.inference(_pack_inputs(inputs, sigma_sq), *args,
**kwargs)
def update(self, inputs, *args, **kwargs):
"""ConstantInputSigma update."""
chex.assert_rank(inputs, 1)
sigma_sq = self._input_sigma_sq * jnp.ones_like(inputs)
return self._network.update(_pack_inputs(inputs, sigma_sq), *args, **kwargs)
class LastNeuronAggregator(base.LastNeuronAggregator):
"""Gaussian last neuron aggregator, implemented by the super class."""
pass
| deepmind-research-master | gated_linear_networks/gaussian.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for `utils.py`."""
from absl.testing import absltest
import haiku as hk
import jax
import numpy as np
from gated_linear_networks.examples import utils
class MeanStdEstimator(absltest.TestCase):
def test_statistics(self):
num_features = 100
feature_size = 3
samples = np.random.normal(
loc=5., scale=2., size=(num_features, feature_size))
true_mean = np.mean(samples, axis=0)
true_std = np.std(samples, axis=0)
def tick_(sample):
return utils.MeanStdEstimator()(sample)
init_fn, apply_fn = hk.without_apply_rng(hk.transform_with_state(tick_))
tick = jax.jit(apply_fn)
params, state = init_fn(rng=None, sample=samples[0])
for sample in samples:
(mean, std), state = tick(params, state, sample)
np.testing.assert_array_almost_equal(mean, true_mean, decimal=5)
np.testing.assert_array_almost_equal(std, true_std, decimal=5)
if __name__ == '__main__':
absltest.main()
| deepmind-research-master | gated_linear_networks/examples/utils_test.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Online MNIST classification example with Bernoulli GLN."""
from absl import app
from absl import flags
import haiku as hk
import jax
import jax.numpy as jnp
import rlax
from gated_linear_networks import bernoulli
from gated_linear_networks.examples import utils
MAX_TRAIN_STEPS = flags.DEFINE_integer(
name='max_train_steps',
default=None,
help='Maximum number of training steps to perform (None=no limit)',
)
# Small example network, achieves ~95% test set accuracy =======================
# Network parameters.
NUM_LAYERS = flags.DEFINE_integer(
name='num_layers',
default=2,
help='Number of network layers',
)
NEURONS_PER_LAYER = flags.DEFINE_integer(
name='neurons_per_layer',
default=100,
help='Number of neurons per layer',
)
CONTEXT_DIM = flags.DEFINE_integer(
name='context_dim',
default=1,
help='Context vector size',
)
# Learning rate schedule.
MAX_LR = flags.DEFINE_float(
name='max_lr',
default=0.003,
help='Maximum learning rate',
)
LR_CONSTANT = flags.DEFINE_float(
name='lr_constant',
default=1.0,
help='Learning rate constant parameter',
)
LR_DECAY = flags.DEFINE_float(
name='lr_decay',
default=0.1,
help='Learning rate decay parameter',
)
# Logging parameters.
EVALUATE_EVERY = flags.DEFINE_integer(
name='evaluate_every',
default=1000,
help='Number of training steps per evaluation epoch',
)
def main(unused_argv):
# Load MNIST dataset =========================================================
mnist_data, info = utils.load_deskewed_mnist(
name='mnist', batch_size=-1, with_info=True)
num_classes = info.features['label'].num_classes
(train_images, train_labels) = (mnist_data['train']['image'],
mnist_data['train']['label'])
(test_images, test_labels) = (mnist_data['test']['image'],
mnist_data['test']['label'])
# Build a (binary) GLN classifier ============================================
def network_factory():
def gln_factory():
output_sizes = [NEURONS_PER_LAYER.value] * NUM_LAYERS.value + [1]
return bernoulli.GatedLinearNetwork(
output_sizes=output_sizes, context_dim=CONTEXT_DIM.value)
return bernoulli.LastNeuronAggregator(gln_factory)
def extract_features(image):
mean, stddev = utils.MeanStdEstimator()(image)
standardized_img = (image - mean) / (stddev + 1.)
inputs = rlax.sigmoid(standardized_img)
side_info = standardized_img
return inputs, side_info
def inference_fn(image, *args, **kwargs):
inputs, side_info = extract_features(image)
return network_factory().inference(inputs, side_info, *args, **kwargs)
def update_fn(image, *args, **kwargs):
inputs, side_info = extract_features(image)
return network_factory().update(inputs, side_info, *args, **kwargs)
init_, inference_ = hk.without_apply_rng(
hk.transform_with_state(inference_fn))
_, update_ = hk.without_apply_rng(hk.transform_with_state(update_fn))
# Map along class dimension to create a one-vs-all classifier ================
@jax.jit
def init(dummy_image, key):
"""One-vs-all classifier init fn."""
dummy_images = jnp.stack([dummy_image] * num_classes, axis=0)
keys = jax.random.split(key, num_classes)
return jax.vmap(init_, in_axes=(0, 0))(keys, dummy_images)
@jax.jit
def accuracy(params, state, image, label):
"""One-vs-all classifier inference fn."""
fn = jax.vmap(inference_, in_axes=(0, 0, None))
predictions, unused_state = fn(params, state, image)
return (jnp.argmax(predictions) == label).astype(jnp.float32)
@jax.jit
def update(params, state, step, image, label):
"""One-vs-all classifier update fn."""
# Learning rate schedules.
learning_rate = jnp.minimum(
MAX_LR.value, LR_CONSTANT.value / (1. + LR_DECAY.value * step))
# Update weights and report log-loss.
targets = hk.one_hot(jnp.asarray(label), num_classes)
fn = jax.vmap(update_, in_axes=(0, 0, None, 0, None))
out = fn(params, state, image, targets, learning_rate)
(params, unused_predictions, log_loss), state = out
return (jnp.mean(log_loss), params), state
# Train on train split =======================================================
dummy_image = train_images[0]
params, state = init(dummy_image, jax.random.PRNGKey(42))
for step, (image, label) in enumerate(zip(train_images, train_labels), 1):
(unused_loss, params), state = update(
params,
state,
step,
image,
label,
)
# Evaluate on test split ===================================================
if not step % EVALUATE_EVERY.value:
batch_accuracy = jax.vmap(accuracy, in_axes=(None, None, 0, 0))
accuracies = batch_accuracy(params, state, test_images, test_labels)
total_accuracy = float(jnp.mean(accuracies))
# Report statistics.
print({
'step': step,
'accuracy': float(total_accuracy),
})
if MAX_TRAIN_STEPS.value is not None and step >= MAX_TRAIN_STEPS.value:
return
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | gated_linear_networks/examples/bernoulli_mnist.py |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Haiku modules for feature processing."""
import copy
from typing import Tuple
import chex
import haiku as hk
import jax.numpy as jnp
import numpy as np
from scipy.ndimage import interpolation
import tensorflow_datasets as tfds
Array = chex.Array
def _moments(image):
"""Compute the first and second moments of a given image."""
c0, c1 = np.mgrid[:image.shape[0], :image.shape[1]]
total_image = np.sum(image)
m0 = np.sum(c0 * image) / total_image
m1 = np.sum(c1 * image) / total_image
m00 = np.sum((c0 - m0)**2 * image) / total_image
m11 = np.sum((c1 - m1)**2 * image) / total_image
m01 = np.sum((c0 - m0) * (c1 - m1) * image) / total_image
mu_vector = np.array([m0, m1])
covariance_matrix = np.array([[m00, m01], [m01, m11]])
return mu_vector, covariance_matrix
def _deskew(image):
"""Image deskew."""
c, v = _moments(image)
alpha = v[0, 1] / v[0, 0]
affine = np.array([[1, 0], [alpha, 1]])
ocenter = np.array(image.shape) / 2.0
offset = c - np.dot(affine, ocenter)
return interpolation.affine_transform(image, affine, offset=offset)
def _deskew_dataset(dataset):
"""Dataset deskew."""
deskewed = copy.deepcopy(dataset)
for k, before in dataset.items():
images = before["image"]
num_images = images.shape[0]
after = np.stack([_deskew(i) for i in np.squeeze(images, axis=-1)], axis=0)
deskewed[k]["image"] = np.reshape(after, (num_images, -1))
return deskewed
def load_deskewed_mnist(*a, **k):
"""Returns deskewed MNIST numpy dataset."""
mnist_data, info = tfds.load(*a, **k)
mnist_data = tfds.as_numpy(mnist_data)
deskewed_data = _deskew_dataset(mnist_data)
return deskewed_data, info
class MeanStdEstimator(hk.Module):
"""Online mean and standard deviation estimator using Welford's algorithm."""
def __call__(self, sample: jnp.DeviceArray) -> Tuple[Array, Array]:
if len(sample.shape) > 1:
raise ValueError("sample must be a rank 0 or 1 DeviceArray.")
count = hk.get_state("count", shape=(), dtype=jnp.int32, init=jnp.zeros)
mean = hk.get_state(
"mean", shape=sample.shape, dtype=jnp.float32, init=jnp.zeros)
m2 = hk.get_state(
"m2", shape=sample.shape, dtype=jnp.float32, init=jnp.zeros)
count += 1
delta = sample - mean
mean += delta / count
delta_2 = sample - mean
m2 += delta * delta_2
hk.set_state("count", count)
hk.set_state("mean", mean)
hk.set_state("m2", m2)
stddev = jnp.sqrt(m2 / count)
return mean, stddev
| deepmind-research-master | gated_linear_networks/examples/utils.py |
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GAN modules."""
import collections
import math
import sonnet as snt
import tensorflow.compat.v1 as tf
from cs_gan import utils
class GAN(object):
"""Standard generative adversarial network setup.
The aim of the generator is to generate samples which fool a discriminator.
Does not make any assumptions about the discriminator and generator loss
functions.
Trained module components:
* discriminator
* generator
For the standard GAN algorithm, generator_inputs is a vector of noise (either
Gaussian or uniform).
"""
def __init__(self, discriminator, generator,
num_z_iters=None, z_step_size=None,
z_project_method=None, optimisation_cost_weight=None):
"""Constructs the module.
Args:
discriminator: The discriminator network. A sonnet module. See `nets.py`.
generator: The generator network. A sonnet module. For examples, see
`nets.py`.
num_z_iters: an integer, the number of latent optimisation steps.
z_step_size: an integer, latent optimisation step size.
z_project_method: the method for projecting latent after optimisation,
a string from {'norm', 'clip'}.
optimisation_cost_weight: a float, how much to penalise the distance of z
moved by latent optimisation.
"""
self._discriminator = discriminator
self.generator = generator
self.num_z_iters = num_z_iters
self.z_project_method = z_project_method
if z_step_size:
self._log_step_size_module = snt.TrainableVariable(
[],
initializers={'w': tf.constant_initializer(math.log(z_step_size))})
self.z_step_size = tf.exp(self._log_step_size_module())
self._optimisation_cost_weight = optimisation_cost_weight
def connect(self, data, generator_inputs):
"""Connects the components and returns the losses, outputs and debug ops.
Args:
data: a `tf.Tensor`: `[batch_size, ...]`. There are no constraints on the
rank
of this tensor, but it has to be compatible with the shapes expected
by the discriminator.
generator_inputs: a `tf.Tensor`: `[g_in_batch_size, ...]`. It does not
have to have the same batch size as the `data` tensor. There are not
constraints on the rank of this tensor, but it has to be compatible
with the shapes the generator network supports as inputs.
Returns:
An `ModelOutputs` instance.
"""
samples, optimised_z = utils.optimise_and_sample(
generator_inputs, self, data, is_training=True)
optimisation_cost = utils.get_optimisation_cost(generator_inputs,
optimised_z)
# Pass in the labels to the discriminator in case we are using a
# discriminator which makes use of labels. The labels can be None.
disc_data_logits = self._discriminator(data)
disc_sample_logits = self._discriminator(samples)
disc_data_loss = utils.cross_entropy_loss(
disc_data_logits,
tf.ones(tf.shape(disc_data_logits[:, 0]), dtype=tf.int32))
disc_sample_loss = utils.cross_entropy_loss(
disc_sample_logits,
tf.zeros(tf.shape(disc_sample_logits[:, 0]), dtype=tf.int32))
disc_loss = disc_data_loss + disc_sample_loss
generator_loss = utils.cross_entropy_loss(
disc_sample_logits,
tf.ones(tf.shape(disc_sample_logits[:, 0]), dtype=tf.int32))
optimization_components = self._build_optimization_components(
discriminator_loss=disc_loss, generator_loss=generator_loss,
optimisation_cost=optimisation_cost)
debug_ops = {}
debug_ops['disc_data_loss'] = disc_data_loss
debug_ops['disc_sample_loss'] = disc_sample_loss
debug_ops['disc_loss'] = disc_loss
debug_ops['gen_loss'] = generator_loss
debug_ops['opt_cost'] = optimisation_cost
if hasattr(self, 'z_step_size'):
debug_ops['z_step_size'] = self.z_step_size
return utils.ModelOutputs(
optimization_components, debug_ops)
def gen_loss_fn(self, data, samples):
"""Generator loss as latent optimisation's error function."""
del data
disc_sample_logits = self._discriminator(samples)
generator_loss = utils.cross_entropy_loss(
disc_sample_logits,
tf.ones(tf.shape(disc_sample_logits[:, 0]), dtype=tf.int32))
return generator_loss
def _build_optimization_components(
self, generator_loss=None, discriminator_loss=None,
optimisation_cost=None):
"""Create the optimization components for this module."""
discriminator_vars = _get_and_check_variables(self._discriminator)
generator_vars = _get_and_check_variables(self.generator)
if hasattr(self, '_log_step_size_module'):
step_vars = _get_and_check_variables(self._log_step_size_module)
generator_vars += step_vars
optimization_components = collections.OrderedDict()
optimization_components['disc'] = utils.OptimizationComponent(
discriminator_loss, discriminator_vars)
if self._optimisation_cost_weight:
generator_loss += self._optimisation_cost_weight * optimisation_cost
optimization_components['gen'] = utils.OptimizationComponent(
generator_loss, generator_vars)
return optimization_components
def get_variables(self):
disc_vars = _get_and_check_variables(self._discriminator)
gen_vars = _get_and_check_variables(self.generator)
return disc_vars, gen_vars
def _get_and_check_variables(module):
module_variables = module.get_all_variables()
if not module_variables:
raise ValueError(
'Module {} has no variables! Variables needed for training.'.format(
module.module_name))
# TensorFlow optimizers require lists to be passed in.
return list(module_variables)
| deepmind-research-master | cs_gan/gan.py |
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from cs_gan import cs
from cs_gan import file_utils
from cs_gan import utils
tfd = tfp.distributions
flags.DEFINE_string(
'mode', 'recons', 'Model mode.')
flags.DEFINE_integer(
'num_training_iterations', 10000000,
'Number of training iterations.')
flags.DEFINE_integer(
'batch_size', 64, 'Training batch size.')
flags.DEFINE_integer(
'num_measurements', 25, 'The number of measurements')
flags.DEFINE_integer(
'num_latents', 100, 'The number of latents')
flags.DEFINE_integer(
'num_z_iters', 3, 'The number of latent optimisation steps.')
flags.DEFINE_float(
'z_step_size', 0.01, 'Step size for latent optimisation.')
flags.DEFINE_string(
'z_project_method', 'norm', 'The method to project z.')
flags.DEFINE_integer(
'summary_every_step', 1000,
'The interval at which to log debug ops.')
flags.DEFINE_integer(
'export_every', 10,
'The interval at which to export samples.')
flags.DEFINE_string(
'dataset', 'mnist', 'The dataset used for learning (cifar|mnist.')
flags.DEFINE_float('learning_rate', 1e-4, 'Learning rate.')
flags.DEFINE_string(
'output_dir', '/tmp/cs_gan/cs', 'Location where to save output files.')
FLAGS = flags.FLAGS
# Log info level (for Hooks).
tf.logging.set_verbosity(tf.logging.INFO)
def main(argv):
del argv
utils.make_output_dir(FLAGS.output_dir)
data_processor = utils.DataProcessor()
images = utils.get_train_dataset(data_processor, FLAGS.dataset,
FLAGS.batch_size)
logging.info('Learning rate: %d', FLAGS.learning_rate)
# Construct optimizers.
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
# Create the networks and models.
generator = utils.get_generator(FLAGS.dataset)
metric_net = utils.get_metric_net(FLAGS.dataset, FLAGS.num_measurements)
model = cs.CS(metric_net, generator,
FLAGS.num_z_iters, FLAGS.z_step_size, FLAGS.z_project_method)
prior = utils.make_prior(FLAGS.num_latents)
generator_inputs = prior.sample(FLAGS.batch_size)
model_output = model.connect(images, generator_inputs)
optimization_components = model_output.optimization_components
debug_ops = model_output.debug_ops
reconstructions, _ = utils.optimise_and_sample(
generator_inputs, model, images, is_training=False)
global_step = tf.train.get_or_create_global_step()
update_op = optimizer.minimize(
optimization_components.loss,
var_list=optimization_components.vars,
global_step=global_step)
sample_exporter = file_utils.FileExporter(
os.path.join(FLAGS.output_dir, 'reconstructions'))
# Hooks.
debug_ops['it'] = global_step
# Abort training on Nans.
nan_hook = tf.train.NanTensorHook(optimization_components.loss)
# Step counter.
step_conter_hook = tf.train.StepCounterHook()
checkpoint_saver_hook = tf.train.CheckpointSaverHook(
checkpoint_dir=utils.get_ckpt_dir(FLAGS.output_dir), save_secs=10 * 60)
loss_summary_saver_hook = tf.train.SummarySaverHook(
save_steps=FLAGS.summary_every_step,
output_dir=os.path.join(FLAGS.output_dir, 'summaries'),
summary_op=utils.get_summaries(debug_ops))
hooks = [checkpoint_saver_hook, nan_hook, step_conter_hook,
loss_summary_saver_hook]
# Start training.
with tf.train.MonitoredSession(hooks=hooks) as sess:
logging.info('starting training')
for i in range(FLAGS.num_training_iterations):
sess.run(update_op)
if i % FLAGS.export_every == 0:
reconstructions_np, data_np = sess.run([reconstructions, images])
# Create an object which gets data and does the processing.
data_np = data_processor.postprocess(data_np)
reconstructions_np = data_processor.postprocess(reconstructions_np)
sample_exporter.save(reconstructions_np, 'reconstructions')
sample_exporter.save(data_np, 'data')
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | cs_gan/main_cs.py |
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
from cs_gan import file_utils
from cs_gan import gan
from cs_gan import image_metrics
from cs_gan import utils
flags.DEFINE_integer(
'num_training_iterations', 1200000,
'Number of training iterations.')
flags.DEFINE_string(
'ode_mode', 'rk4', 'Integration method.')
flags.DEFINE_integer(
'batch_size', 64, 'Training batch size.')
flags.DEFINE_float(
'grad_reg_weight', 0.02, 'Step size for latent optimisation.')
flags.DEFINE_string(
'opt_name', 'gd', 'Name of the optimiser (gd|adam).')
flags.DEFINE_bool(
'schedule_lr', True, 'The method to project z.')
flags.DEFINE_bool(
'reg_first_grad_only', True, 'Whether only to regularise the first grad.')
flags.DEFINE_integer(
'num_latents', 128, 'The number of latents')
flags.DEFINE_integer(
'summary_every_step', 1000,
'The interval at which to log debug ops.')
flags.DEFINE_integer(
'image_metrics_every_step', 1000,
'The interval at which to log (expensive) image metrics.')
flags.DEFINE_integer(
'export_every', 10,
'The interval at which to export samples.')
# Use 50k to reproduce scores from the paper. Default to 10k here to avoid the
# runtime error caused by too large graph with 50k samples on some machines.
flags.DEFINE_integer(
'num_eval_samples', 10000,
'The number of samples used to evaluate FID/IS.')
flags.DEFINE_string(
'dataset', 'cifar', 'The dataset used for learning (cifar|mnist).')
flags.DEFINE_string(
'output_dir', '/tmp/ode_gan/gan', 'Location where to save output files.')
flags.DEFINE_float('disc_lr', 4e-2, 'Discriminator Learning rate.')
flags.DEFINE_float('gen_lr', 4e-2, 'Generator Learning rate.')
flags.DEFINE_bool(
'run_real_data_metrics', False,
'Whether or not to run image metrics on real data.')
flags.DEFINE_bool(
'run_sample_metrics', True,
'Whether or not to run image metrics on samples.')
FLAGS = flags.FLAGS
# Log info level (for Hooks).
tf.logging.set_verbosity(tf.logging.INFO)
def _copy_vars(v_list):
"""Copy variables in v_list."""
t_list = []
for v in v_list:
t_list.append(tf.identity(v))
return t_list
def _restore_vars(v_list, t_list):
"""Restore variables in v_list from t_list."""
ops = []
for v, t in zip(v_list, t_list):
ops.append(v.assign(t))
return ops
def _scale_vars(s, v_list):
"""Scale all variables in v_list by s."""
return [s * v for v in v_list]
def _acc_grads(g_sum, g_w, g):
"""Accumulate gradients in g, weighted by g_w."""
return [g_sum_i + g_w * g_i for g_sum_i, g_i in zip(g_sum, g)]
def _compute_reg_grads(gen_grads, disc_vars):
"""Compute gradients norm (this is an upper-bpund of the full-batch norm)."""
gen_norm = tf.accumulate_n([tf.reduce_sum(u * u) for u in gen_grads])
disc_reg_grads = tf.gradients(gen_norm, disc_vars)
return disc_reg_grads
def run_model(prior, images, model, disc_reg_weight):
"""Run the model with new data and samples.
Args:
prior: the noise source as the generator input.
images: images sampled from dataset.
model: a GAN model defined in gan.py.
disc_reg_weight: regularisation weight for discrmininator gradients.
Returns:
debug_ops: statistics from the model, see gan.py for more detials.
disc_grads: discriminator gradients.
gen_grads: generator gradients.
"""
generator_inputs = prior.sample(FLAGS.batch_size)
model_output = model.connect(images, generator_inputs)
optimization_components = model_output.optimization_components
disc_grads = tf.gradients(
optimization_components['disc'].loss,
optimization_components['disc'].vars)
gen_grads = tf.gradients(
optimization_components['gen'].loss,
optimization_components['gen'].vars)
if disc_reg_weight > 0.0:
reg_grads = _compute_reg_grads(gen_grads,
optimization_components['disc'].vars)
disc_grads = _acc_grads(disc_grads, disc_reg_weight, reg_grads)
debug_ops = model_output.debug_ops
return debug_ops, disc_grads, gen_grads
def update_model(model, disc_grads, gen_grads, disc_opt, gen_opt,
global_step, update_scale):
"""Update model with gradients."""
disc_vars, gen_vars = model.get_variables()
with tf.control_dependencies(gen_grads + disc_grads):
disc_update_op = disc_opt.apply_gradients(
zip(_scale_vars(update_scale, disc_grads),
disc_vars))
gen_update_op = gen_opt.apply_gradients(
zip(_scale_vars(update_scale, gen_grads),
gen_vars),
global_step=global_step)
update_op = tf.group([disc_update_op, gen_update_op])
return update_op
def main(argv):
del argv
utils.make_output_dir(FLAGS.output_dir)
data_processor = utils.DataProcessor()
# Compute the batch-size multiplier
if FLAGS.ode_mode == 'rk2':
batch_mul = 2
elif FLAGS.ode_mode == 'rk4':
batch_mul = 4
else:
batch_mul = 1
images = utils.get_train_dataset(data_processor, FLAGS.dataset,
int(FLAGS.batch_size * batch_mul))
image_splits = tf.split(images, batch_mul)
logging.info('Generator learning rate: %d', FLAGS.gen_lr)
logging.info('Discriminator learning rate: %d', FLAGS.disc_lr)
global_step = tf.train.get_or_create_global_step()
# Construct optimizers.
if FLAGS.opt_name == 'adam':
disc_opt = tf.train.AdamOptimizer(FLAGS.disc_lr, beta1=0.5, beta2=0.999)
gen_opt = tf.train.AdamOptimizer(FLAGS.gen_lr, beta1=0.5, beta2=0.999)
elif FLAGS.opt_name == 'gd':
if FLAGS.schedule_lr:
gd_disc_lr = tf.train.piecewise_constant(
global_step,
values=[FLAGS.disc_lr / 4., FLAGS.disc_lr, FLAGS.disc_lr / 2.],
boundaries=[500, 400000])
gd_gen_lr = tf.train.piecewise_constant(
global_step,
values=[FLAGS.gen_lr / 4., FLAGS.gen_lr, FLAGS.gen_lr / 2.],
boundaries=[500, 400000])
else:
gd_disc_lr = FLAGS.disc_lr
gd_gen_lr = FLAGS.gen_lr
disc_opt = tf.train.GradientDescentOptimizer(gd_disc_lr)
gen_opt = tf.train.GradientDescentOptimizer(gd_gen_lr)
else:
raise ValueError('Unknown ODE mode!')
# Create the networks and models.
generator = utils.get_generator(FLAGS.dataset)
metric_net = utils.get_metric_net(FLAGS.dataset, use_sn=False)
model = gan.GAN(metric_net, generator)
prior = utils.make_prior(FLAGS.num_latents)
# Setup ODE parameters.
if FLAGS.ode_mode == 'rk2':
ode_grad_weights = [0.5, 0.5]
step_scale = [1.0]
elif FLAGS.ode_mode == 'rk4':
ode_grad_weights = [1. / 6., 1. / 3., 1. / 3., 1. / 6.]
step_scale = [0.5, 0.5, 1.]
elif FLAGS.ode_mode == 'euler':
# Euler update
ode_grad_weights = [1.0]
step_scale = []
else:
raise ValueError('Unknown ODE mode!')
# Extra steps for RK updates.
num_extra_steps = len(step_scale)
if FLAGS.reg_first_grad_only:
first_reg_weight = FLAGS.grad_reg_weight / ode_grad_weights[0]
other_reg_weight = 0.0
else:
first_reg_weight = FLAGS.grad_reg_weight
other_reg_weight = FLAGS.grad_reg_weight
debug_ops, disc_grads, gen_grads = run_model(prior, image_splits[0],
model, first_reg_weight)
disc_vars, gen_vars = model.get_variables()
final_disc_grads = _scale_vars(ode_grad_weights[0], disc_grads)
final_gen_grads = _scale_vars(ode_grad_weights[0], gen_grads)
restore_ops = []
# Preparing for further RK steps.
if num_extra_steps > 0:
# copy the variables before they are changed by update_op
saved_disc_vars = _copy_vars(disc_vars)
saved_gen_vars = _copy_vars(gen_vars)
# Enter RK loop.
with tf.control_dependencies(saved_disc_vars + saved_gen_vars):
step_deps = []
for i_step in range(num_extra_steps):
with tf.control_dependencies(step_deps):
# Compute gradient steps for intermediate updates.
update_op = update_model(
model, disc_grads, gen_grads, disc_opt, gen_opt,
None, step_scale[i_step])
with tf.control_dependencies([update_op]):
_, disc_grads, gen_grads = run_model(
prior, image_splits[i_step + 1], model, other_reg_weight)
# Accumlate gradients for final update.
final_disc_grads = _acc_grads(final_disc_grads,
ode_grad_weights[i_step + 1],
disc_grads)
final_gen_grads = _acc_grads(final_gen_grads,
ode_grad_weights[i_step + 1],
gen_grads)
# Make new restore_op for each step.
restore_ops = []
restore_ops += _restore_vars(disc_vars, saved_disc_vars)
restore_ops += _restore_vars(gen_vars, saved_gen_vars)
step_deps = restore_ops
with tf.control_dependencies(restore_ops):
update_op = update_model(
model, final_disc_grads, final_gen_grads, disc_opt, gen_opt,
global_step, 1.0)
samples = generator(prior.sample(FLAGS.batch_size), is_training=False)
# Get data needed to compute FID. We also compute metrics on
# real data as a sanity check and as a reference point.
eval_real_data = utils.get_real_data_for_eval(FLAGS.num_eval_samples,
FLAGS.dataset,
split='train')
def sample_fn(x):
return utils.optimise_and_sample(x, module=model,
data=None, is_training=False)[0]
if FLAGS.run_sample_metrics:
sample_metrics = image_metrics.get_image_metrics_for_samples(
eval_real_data, sample_fn,
prior, data_processor,
num_eval_samples=FLAGS.num_eval_samples)
else:
sample_metrics = {}
if FLAGS.run_real_data_metrics:
data_metrics = image_metrics.get_image_metrics(
eval_real_data, eval_real_data)
else:
data_metrics = {}
sample_exporter = file_utils.FileExporter(
os.path.join(FLAGS.output_dir, 'samples'))
# Hooks.
debug_ops['it'] = global_step
# Abort training on Nans.
nan_disc_hook = tf.train.NanTensorHook(debug_ops['disc_loss'])
nan_gen_hook = tf.train.NanTensorHook(debug_ops['gen_loss'])
# Step counter.
step_conter_hook = tf.train.StepCounterHook()
checkpoint_saver_hook = tf.train.CheckpointSaverHook(
checkpoint_dir=utils.get_ckpt_dir(FLAGS.output_dir), save_secs=10 * 60)
loss_summary_saver_hook = tf.train.SummarySaverHook(
save_steps=FLAGS.summary_every_step,
output_dir=os.path.join(FLAGS.output_dir, 'summaries'),
summary_op=utils.get_summaries(debug_ops))
metrics_summary_saver_hook = tf.train.SummarySaverHook(
save_steps=FLAGS.image_metrics_every_step,
output_dir=os.path.join(FLAGS.output_dir, 'summaries'),
summary_op=utils.get_summaries(sample_metrics))
hooks = [checkpoint_saver_hook, metrics_summary_saver_hook,
nan_disc_hook, nan_gen_hook, step_conter_hook,
loss_summary_saver_hook]
# Start training.
with tf.train.MonitoredSession(hooks=hooks) as sess:
logging.info('starting training')
for key, value in sess.run(data_metrics).items():
logging.info('%s: %d', key, value)
for i in range(FLAGS.num_training_iterations):
sess.run(update_op)
if i % FLAGS.export_every == 0:
samples_np, data_np = sess.run([samples, image_splits[0]])
# Create an object which gets data and does the processing.
data_np = data_processor.postprocess(data_np)
samples_np = data_processor.postprocess(samples_np)
sample_exporter.save(samples_np, 'samples')
sample_exporter.save(data_np, 'data')
if __name__ == '__main__':
tf.enable_resource_variables()
app.run(main)
| deepmind-research-master | cs_gan/main_ode.py |
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GAN modules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sonnet as snt
import tensorflow.compat.v1 as tf
from cs_gan import utils
class CS(object):
"""Compressed Sensing Module."""
def __init__(self, metric_net, generator,
num_z_iters, z_step_size, z_project_method):
"""Constructs the module.
Args:
metric_net: the measurement network.
generator: The generator network. A sonnet module. For examples, see
`nets.py`.
num_z_iters: an integer, the number of latent optimisation steps.
z_step_size: an integer, latent optimisation step size.
z_project_method: the method for projecting latent after optimisation,
a string from {'norm', 'clip'}.
"""
self._measure = metric_net
self.generator = generator
self.num_z_iters = num_z_iters
self.z_project_method = z_project_method
self._log_step_size_module = snt.TrainableVariable(
[],
initializers={'w': tf.constant_initializer(math.log(z_step_size))})
self.z_step_size = tf.exp(self._log_step_size_module())
def connect(self, data, generator_inputs):
"""Connects the components and returns the losses, outputs and debug ops.
Args:
data: a `tf.Tensor`: `[batch_size, ...]`. There are no constraints on the
rank
of this tensor, but it has to be compatible with the shapes expected
by the discriminator.
generator_inputs: a `tf.Tensor`: `[g_in_batch_size, ...]`. It does not
have to have the same batch size as the `data` tensor. There are not
constraints on the rank of this tensor, but it has to be compatible
with the shapes the generator network supports as inputs.
Returns:
An `ModelOutputs` instance.
"""
samples, optimised_z = utils.optimise_and_sample(
generator_inputs, self, data, is_training=True)
optimisation_cost = utils.get_optimisation_cost(generator_inputs,
optimised_z)
debug_ops = {}
initial_samples = self.generator(generator_inputs, is_training=True)
generator_loss = tf.reduce_mean(self.gen_loss_fn(data, samples))
# compute the RIP loss
# (\sqrt{F(x_1 - x_2)^2} - \sqrt{(x_1 - x_2)^2})^2
# as a triplet loss for 3 pairs of images.
r1 = self._get_rip_loss(samples, initial_samples)
r2 = self._get_rip_loss(samples, data)
r3 = self._get_rip_loss(initial_samples, data)
rip_loss = tf.reduce_mean((r1 + r2 + r3) / 3.0)
total_loss = generator_loss + rip_loss
optimization_components = self._build_optimization_components(
generator_loss=total_loss)
debug_ops['rip_loss'] = rip_loss
debug_ops['recons_loss'] = tf.reduce_mean(
tf.norm(snt.BatchFlatten()(samples)
- snt.BatchFlatten()(data), axis=-1))
debug_ops['z_step_size'] = self.z_step_size
debug_ops['opt_cost'] = optimisation_cost
debug_ops['gen_loss'] = generator_loss
return utils.ModelOutputs(
optimization_components, debug_ops)
def _get_rip_loss(self, img1, img2):
r"""Compute the RIP loss from two images.
The RIP loss: (\sqrt{F(x_1 - x_2)^2} - \sqrt{(x_1 - x_2)^2})^2
Args:
img1: an image (x_1), 4D tensor of shape [batch_size, W, H, C].
img2: an other image (x_2), 4D tensor of shape [batch_size, W, H, C].
"""
m1 = self._measure(img1)
m2 = self._measure(img2)
img_diff_norm = tf.norm(snt.BatchFlatten()(img1)
- snt.BatchFlatten()(img2), axis=-1)
m_diff_norm = tf.norm(m1 - m2, axis=-1)
return tf.square(img_diff_norm - m_diff_norm)
def _get_measurement_error(self, target_img, sample_img):
"""Compute the measurement error of sample images given the targets."""
m_targets = self._measure(target_img)
m_samples = self._measure(sample_img)
return tf.reduce_sum(tf.square(m_targets - m_samples), -1)
def gen_loss_fn(self, data, samples):
"""Generator loss as latent optimisation's error function."""
return self._get_measurement_error(data, samples)
def _build_optimization_components(
self, generator_loss=None, discriminator_loss=None):
"""Create the optimization components for this module."""
metric_vars = _get_and_check_variables(self._measure)
generator_vars = _get_and_check_variables(self.generator)
step_vars = _get_and_check_variables(self._log_step_size_module)
assert discriminator_loss is None
optimization_components = utils.OptimizationComponent(
generator_loss, generator_vars + metric_vars + step_vars)
return optimization_components
def _get_and_check_variables(module):
module_variables = module.get_all_variables()
if not module_variables:
raise ValueError(
'Module {} has no variables! Variables needed for training.'.format(
module.module_name))
# TensorFlow optimizers require lists to be passed in.
return list(module_variables)
| deepmind-research-master | cs_gan/cs.py |
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for latent optimisation."""
import collections
import os
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from cs_gan import nets
tfd = tfp.distributions
class ModelOutputs(
collections.namedtuple('AdversarialModelOutputs',
['optimization_components', 'debug_ops'])):
"""All the information produced by the adversarial module.
Fields:
* `optimization_components`: A dictionary. Each entry in this dictionary
corresponds to a module to train using their own optimizer. The keys are
names of the components, and the values are `common.OptimizationComponent`
instances. The keys of this dict can be made keys of the configuration
used by the main train loop, to define the configuration of the
optimization details for each module.
* `debug_ops`: A dictionary, from string to a scalar `tf.Tensor`. Quantities
used for tracking training.
"""
class OptimizationComponent(
collections.namedtuple('OptimizationComponent', ['loss', 'vars'])):
"""Information needed by the optimizer to train modules.
Usage:
`optimizer.minimize(
opt_compoment.loss, var_list=opt_component.vars)`
Fields:
* `loss`: A `tf.Tensor` the loss of the module.
* `vars`: A list of variables, the ones which will be used to minimize the
loss.
"""
def cross_entropy_loss(logits, expected):
"""The cross entropy classification loss between logits and expected values.
The loss proposed by the original GAN paper: https://arxiv.org/abs/1406.2661.
Args:
logits: a `tf.Tensor`, the model produced logits.
expected: a `tf.Tensor`, the expected output.
Returns:
A scalar `tf.Tensor`, the average loss obtained on the given inputs.
Raises:
ValueError: if the logits do not have shape [batch_size, 2].
"""
num_logits = logits.get_shape()[1]
if num_logits != 2:
raise ValueError(('Invalid number of logits for cross_entropy_loss! '
'cross_entropy_loss supports only 2 output logits!'))
return tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=expected))
def optimise_and_sample(init_z, module, data, is_training):
"""Optimising generator latent variables and sample."""
if module.num_z_iters is None or module.num_z_iters == 0:
z_final = init_z
else:
init_loop_vars = (0, _project_z(init_z, module.z_project_method))
loop_cond = lambda i, _: i < module.num_z_iters
def loop_body(i, z):
loop_samples = module.generator(z, is_training)
gen_loss = module.gen_loss_fn(data, loop_samples)
z_grad = tf.gradients(gen_loss, z)[0]
z -= module.z_step_size * z_grad
z = _project_z(z, module.z_project_method)
return i + 1, z
# Use the following static loop for debugging
# z = init_z
# for _ in xrange(num_z_iters):
# _, z = loop_body(0, z)
# z_final = z
_, z_final = tf.while_loop(loop_cond,
loop_body,
init_loop_vars)
return module.generator(z_final, is_training), z_final
def get_optimisation_cost(initial_z, optimised_z):
optimisation_cost = tf.reduce_mean(
tf.reduce_sum((optimised_z - initial_z)**2, -1))
return optimisation_cost
def _project_z(z, project_method='clip'):
"""To be used for projected gradient descent over z."""
if project_method == 'norm':
z_p = tf.nn.l2_normalize(z, axis=-1)
elif project_method == 'clip':
z_p = tf.clip_by_value(z, -1, 1)
else:
raise ValueError('Unknown project_method: {}'.format(project_method))
return z_p
class DataProcessor(object):
def preprocess(self, x):
return x * 2 - 1
def postprocess(self, x):
return (x + 1) / 2.
def _get_np_data(data_processor, dataset, split='train'):
"""Get the dataset as numpy arrays."""
index = 0 if split == 'train' else 1
if dataset == 'mnist':
# Construct the dataset.
x, _ = tf.keras.datasets.mnist.load_data()[index]
# Note: tf dataset is binary so we convert it to float.
x = x.astype(np.float32)
x = x / 255.
x = x.reshape((-1, 28, 28, 1))
if dataset == 'cifar':
x, _ = tf.keras.datasets.cifar10.load_data()[index]
x = x.astype(np.float32)
x = x / 255.
if data_processor:
# Normalize data if a processor is given.
x = data_processor.preprocess(x)
return x
def make_output_dir(output_dir):
logging.info('Creating output dir %s', output_dir)
if not tf.gfile.IsDirectory(output_dir):
tf.gfile.MakeDirs(output_dir)
def get_ckpt_dir(output_dir):
ckpt_dir = os.path.join(output_dir, 'ckpt')
if not tf.gfile.IsDirectory(ckpt_dir):
tf.gfile.MakeDirs(ckpt_dir)
return ckpt_dir
def get_real_data_for_eval(num_eval_samples, dataset, split='valid'):
data = _get_np_data(data_processor=None, dataset=dataset, split=split)
data = data[:num_eval_samples]
return tf.constant(data)
def get_summaries(ops):
summaries = []
for name, op in ops.items():
# Ensure to log the value ops before writing them in the summary.
# We do this instead of a hook to ensure IS/FID are never computed twice.
print_op = tf.print(name, [op], output_stream=tf.logging.info)
with tf.control_dependencies([print_op]):
summary = tf.summary.scalar(name, op)
summaries.append(summary)
return summaries
def get_train_dataset(data_processor, dataset, batch_size):
"""Creates the training data tensors."""
x_train = _get_np_data(data_processor, dataset, split='train')
# Create the TF dataset.
dataset = tf.data.Dataset.from_tensor_slices(x_train)
# Shuffle and repeat the dataset for training.
# This is required because we want to do multiple passes through the entire
# dataset when training.
dataset = dataset.shuffle(100000).repeat()
# Batch the data and return the data batch.
one_shot_iterator = dataset.batch(batch_size).make_one_shot_iterator()
data_batch = one_shot_iterator.get_next()
return data_batch
def get_generator(dataset):
if dataset == 'mnist':
return nets.MLPGeneratorNet()
if dataset == 'cifar':
return nets.ConvGenNet()
def get_metric_net(dataset, num_outputs=2, use_sn=True):
if dataset == 'mnist':
return nets.MLPMetricNet(num_outputs)
if dataset == 'cifar':
return nets.ConvMetricNet(num_outputs, use_sn)
def make_prior(num_latents):
# Zero mean, unit variance prior.
prior_mean = tf.zeros(shape=(num_latents), dtype=tf.float32)
prior_scale = tf.ones(shape=(num_latents), dtype=tf.float32)
return tfd.Normal(loc=prior_mean, scale=prior_scale)
| deepmind-research-master | cs_gan/utils.py |
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute image metrics: IS, FID."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow_gan as tfgan
def get_image_metrics_for_samples(
real_images, generator, prior, data_processor, num_eval_samples):
"""Compute inception score and FID."""
max_classifier_batch = 10
num_batches = num_eval_samples // max_classifier_batch
def sample_fn(arg):
del arg
samples = generator(prior.sample(max_classifier_batch))
# Samples must be in [-1, 1], as expected by TFGAN.
# Resizing to appropriate size is done by TFGAN.
return samples
fake_outputs = tfgan.eval.sample_and_run_inception(
sample_fn,
sample_inputs=[1.0] * num_batches) # Dummy inputs.
fake_logits = fake_outputs['logits']
inception_score = tfgan.eval.classifier_score_from_logits(fake_logits)
real_outputs = tfgan.eval.run_inception(
data_processor.preprocess(real_images), num_batches=num_batches)
fid = tfgan.eval.frechet_classifier_distance_from_activations(
real_outputs['pool_3'], fake_outputs['pool_3'])
return {
'inception_score': inception_score,
'fid': fid}
| deepmind-research-master | cs_gan/image_metrics.py |
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File utilities."""
import math
import os
import numpy as np
from PIL import Image
class FileExporter(object):
"""File exporter utilities."""
def __init__(self, path, grid_height=None, zoom=1):
"""Constructor.
Arguments:
path: The directory to save data to.
grid_height: How many data elements tall to make the grid, if appropriate.
The width will be chosen based on height. If None, automatically
determined.
zoom: How much to zoom in each data element by, if appropriate.
"""
if not os.path.exists(path):
os.makedirs(path)
self._path = path
self._zoom = zoom
self._grid_height = grid_height
def _reshape(self, data):
"""Reshape given data into image format."""
batch_size, height, width, n_channels = data.shape
if self._grid_height:
grid_height = self._grid_height
else:
grid_height = int(math.floor(math.sqrt(batch_size)))
grid_width = int(math.ceil(batch_size/grid_height))
if n_channels == 1:
data = np.tile(data, (1, 1, 1, 3))
n_channels = 3
if n_channels != 3:
raise ValueError('Image batch must have either 1 or 3 channels, but '
'was {}'.format(n_channels))
shape = (height * grid_height, width * grid_width, n_channels)
buf = np.full(shape, 255, dtype=np.uint8)
multiplier = 1 if data.dtype in (np.int32, np.int64) else 255
for k in range(batch_size):
i = k // grid_width
j = k % grid_width
arr = data[k]
x, y = i * height, j * width
buf[x:x + height, y:y + width, :] = np.clip(
multiplier * arr, 0, 255).astype(np.uint8)
if self._zoom > 1:
buf = buf.repeat(self._zoom, axis=0).repeat(self._zoom, axis=1)
return buf
def save(self, data, name):
data = self._reshape(data)
relative_name = '{}_last.png'.format(name)
target_file = os.path.join(self._path, relative_name)
img = Image.fromarray(data)
img.save(target_file, format='PNG')
| deepmind-research-master | cs_gan/file_utils.py |
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
from cs_gan import file_utils
from cs_gan import gan
from cs_gan import image_metrics
from cs_gan import utils
flags.DEFINE_integer(
'num_training_iterations', 200000,
'Number of training iterations.')
flags.DEFINE_integer(
'batch_size', 64, 'Training batch size.')
flags.DEFINE_integer(
'num_latents', 128, 'The number of latents')
flags.DEFINE_integer(
'summary_every_step', 1000,
'The interval at which to log debug ops.')
flags.DEFINE_integer(
'image_metrics_every_step', 2000,
'The interval at which to log (expensive) image metrics.')
flags.DEFINE_integer(
'export_every', 10,
'The interval at which to export samples.')
flags.DEFINE_integer(
'num_eval_samples', 10000,
'The number of samples used to evaluate FID/IS')
flags.DEFINE_string(
'dataset', 'cifar', 'The dataset used for learning (cifar|mnist.')
flags.DEFINE_float(
'optimisation_cost_weight', 3., 'weight for latent optimisation cost.')
flags.DEFINE_integer(
'num_z_iters', 3, 'The number of latent optimisation steps.'
'It falls back to vanilla GAN when num_z_iters is set to 0.')
flags.DEFINE_float(
'z_step_size', 0.01, 'Step size for latent optimisation.')
flags.DEFINE_string(
'z_project_method', 'norm', 'The method to project z.')
flags.DEFINE_string(
'output_dir', '/tmp/cs_gan/gan', 'Location where to save output files.')
flags.DEFINE_float('disc_lr', 2e-4, 'Discriminator Learning rate.')
flags.DEFINE_float('gen_lr', 2e-4, 'Generator Learning rate.')
flags.DEFINE_bool(
'run_real_data_metrics', False,
'Whether or not to run image metrics on real data.')
flags.DEFINE_bool(
'run_sample_metrics', True,
'Whether or not to run image metrics on samples.')
FLAGS = flags.FLAGS
# Log info level (for Hooks).
tf.logging.set_verbosity(tf.logging.INFO)
def main(argv):
del argv
utils.make_output_dir(FLAGS.output_dir)
data_processor = utils.DataProcessor()
images = utils.get_train_dataset(data_processor, FLAGS.dataset,
FLAGS.batch_size)
logging.info('Generator learning rate: %d', FLAGS.gen_lr)
logging.info('Discriminator learning rate: %d', FLAGS.disc_lr)
# Construct optimizers.
disc_optimizer = tf.train.AdamOptimizer(FLAGS.disc_lr, beta1=0.5, beta2=0.999)
gen_optimizer = tf.train.AdamOptimizer(FLAGS.gen_lr, beta1=0.5, beta2=0.999)
# Create the networks and models.
generator = utils.get_generator(FLAGS.dataset)
metric_net = utils.get_metric_net(FLAGS.dataset)
model = gan.GAN(metric_net, generator,
FLAGS.num_z_iters, FLAGS.z_step_size,
FLAGS.z_project_method, FLAGS.optimisation_cost_weight)
prior = utils.make_prior(FLAGS.num_latents)
generator_inputs = prior.sample(FLAGS.batch_size)
model_output = model.connect(images, generator_inputs)
optimization_components = model_output.optimization_components
debug_ops = model_output.debug_ops
samples = generator(generator_inputs, is_training=False)
global_step = tf.train.get_or_create_global_step()
# We pass the global step both to the disc and generator update ops.
# This means that the global step will not be the same as the number of
# iterations, but ensures that hooks which rely on global step work correctly.
disc_update_op = disc_optimizer.minimize(
optimization_components['disc'].loss,
var_list=optimization_components['disc'].vars,
global_step=global_step)
gen_update_op = gen_optimizer.minimize(
optimization_components['gen'].loss,
var_list=optimization_components['gen'].vars,
global_step=global_step)
# Get data needed to compute FID. We also compute metrics on
# real data as a sanity check and as a reference point.
eval_real_data = utils.get_real_data_for_eval(FLAGS.num_eval_samples,
FLAGS.dataset,
split='train')
def sample_fn(x):
return utils.optimise_and_sample(x, module=model,
data=None, is_training=False)[0]
if FLAGS.run_sample_metrics:
sample_metrics = image_metrics.get_image_metrics_for_samples(
eval_real_data, sample_fn,
prior, data_processor,
num_eval_samples=FLAGS.num_eval_samples)
else:
sample_metrics = {}
if FLAGS.run_real_data_metrics:
data_metrics = image_metrics.get_image_metrics(
eval_real_data, eval_real_data)
else:
data_metrics = {}
sample_exporter = file_utils.FileExporter(
os.path.join(FLAGS.output_dir, 'samples'))
# Hooks.
debug_ops['it'] = global_step
# Abort training on Nans.
nan_disc_hook = tf.train.NanTensorHook(optimization_components['disc'].loss)
nan_gen_hook = tf.train.NanTensorHook(optimization_components['gen'].loss)
# Step counter.
step_conter_hook = tf.train.StepCounterHook()
checkpoint_saver_hook = tf.train.CheckpointSaverHook(
checkpoint_dir=utils.get_ckpt_dir(FLAGS.output_dir), save_secs=10 * 60)
loss_summary_saver_hook = tf.train.SummarySaverHook(
save_steps=FLAGS.summary_every_step,
output_dir=os.path.join(FLAGS.output_dir, 'summaries'),
summary_op=utils.get_summaries(debug_ops))
metrics_summary_saver_hook = tf.train.SummarySaverHook(
save_steps=FLAGS.image_metrics_every_step,
output_dir=os.path.join(FLAGS.output_dir, 'summaries'),
summary_op=utils.get_summaries(sample_metrics))
hooks = [checkpoint_saver_hook, metrics_summary_saver_hook,
nan_disc_hook, nan_gen_hook, step_conter_hook,
loss_summary_saver_hook]
# Start training.
with tf.train.MonitoredSession(hooks=hooks) as sess:
logging.info('starting training')
for key, value in sess.run(data_metrics).items():
logging.info('%s: %d', key, value)
for i in range(FLAGS.num_training_iterations):
sess.run(disc_update_op)
sess.run(gen_update_op)
if i % FLAGS.export_every == 0:
samples_np, data_np = sess.run([samples, images])
# Create an object which gets data and does the processing.
data_np = data_processor.postprocess(data_np)
samples_np = data_processor.postprocess(samples_np)
sample_exporter.save(samples_np, 'samples')
sample_exporter.save(data_np, 'data')
if __name__ == '__main__':
app.run(main)
| deepmind-research-master | cs_gan/main.py |
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Network utilities."""
import functools
import re
import numpy as np
import sonnet as snt
import tensorflow.compat.v1 as tf
import tensorflow_gan as tfgan
def _sn_custom_getter():
def name_filter(name):
match = re.match(r'.*w(_.*)?$', name)
return match is not None
return tfgan.features.spectral_normalization_custom_getter(
name_filter=name_filter)
class ConvGenNet(snt.AbstractModule):
"""As in the SN paper."""
def __init__(self, name='conv_gen'):
super(ConvGenNet, self).__init__(name=name)
def _build(self, inputs, is_training):
batch_size = inputs.get_shape().as_list()[0]
first_shape = [4, 4, 512]
norm_ctor = snt.BatchNormV2
norm_ctor_config = {'scale': True}
up_tensor = snt.Linear(np.prod(first_shape))(inputs)
first_tensor = tf.reshape(up_tensor, shape=[batch_size] + first_shape)
net = snt.nets.ConvNet2DTranspose(
output_channels=[256, 128, 64, 3],
output_shapes=[(8, 8), (16, 16), (32, 32), (32, 32)],
kernel_shapes=[(4, 4), (4, 4), (4, 4), (3, 3)],
strides=[2, 2, 2, 1],
normalization_ctor=norm_ctor,
normalization_kwargs=norm_ctor_config,
normalize_final=False,
paddings=[snt.SAME], activate_final=False, activation=tf.nn.relu)
output = net(first_tensor, is_training=is_training)
return tf.nn.tanh(output)
class ConvMetricNet(snt.AbstractModule):
"""Convolutional discriminator (metric) architecture."""
def __init__(self, num_outputs=2, use_sn=True, name='sn_metric'):
super(ConvMetricNet, self).__init__(name=name)
self._num_outputs = num_outputs
self._use_sn = use_sn
def _build(self, inputs):
def build_net():
net = snt.nets.ConvNet2D(
output_channels=[64, 64, 128, 128, 256, 256, 512],
kernel_shapes=[
(3, 3), (4, 4), (3, 3), (4, 4), (3, 3), (4, 4), (3, 3)],
strides=[1, 2, 1, 2, 1, 2, 1],
paddings=[snt.SAME], activate_final=True,
activation=functools.partial(tf.nn.leaky_relu, alpha=0.1))
linear = snt.Linear(self._num_outputs)
output = linear(snt.BatchFlatten()(net(inputs)))
return output
if self._use_sn:
with tf.variable_scope('', custom_getter=_sn_custom_getter()):
output = build_net()
else:
output = build_net()
return output
class MLPGeneratorNet(snt.AbstractModule):
"""MNIST generator net."""
def __init__(self, name='mlp_generator'):
super(MLPGeneratorNet, self).__init__(name=name)
def _build(self, inputs, is_training=True):
del is_training
net = snt.nets.MLP([500, 500, 784], activation=tf.nn.leaky_relu)
out = net(inputs)
out = tf.nn.tanh(out)
return snt.BatchReshape([28, 28, 1])(out)
class MLPMetricNet(snt.AbstractModule):
"""Same as in Grover and Ermon, ICLR workshop 2017."""
def __init__(self, num_outputs=2, name='mlp_metric'):
super(MLPMetricNet, self).__init__(name=name)
self._layer_size = [500, 500, num_outputs]
def _build(self, inputs):
net = snt.nets.MLP(self._layer_size,
activation=tf.nn.leaky_relu)
output = net(snt.BatchFlatten()(inputs))
return output
| deepmind-research-master | cs_gan/nets.py |
# Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sonnet as snt
import tensorflow.compat.v1 as tf
from cs_gan import gan
class DummyGenerator(snt.AbstractModule):
def __init__(self):
super(DummyGenerator, self).__init__(name='dummy_generator')
def _build(self, inputs, is_training):
return snt.Linear(10)(inputs)
class GanTest(tf.test.TestCase):
def testConnect(self):
discriminator = snt.Linear(2)
generator = DummyGenerator()
model = gan.GAN(
discriminator, generator,
num_z_iters=0, z_step_size=0.1,
z_project_method='none', optimisation_cost_weight=0.0)
generator_inputs = tf.ones((16, 3), dtype=tf.float32)
data = tf.ones((16, 10))
opt_compoments, _ = model.connect(data, generator_inputs)
self.assertIn('disc', opt_compoments)
self.assertIn('gen', opt_compoments)
self.assertCountEqual(
opt_compoments['disc'].vars,
discriminator.get_variables())
self.assertCountEqual(
opt_compoments['gen'].vars,
generator.get_variables() + model._log_step_size_module.get_variables())
if __name__ == '__main__':
tf.test.main()
| deepmind-research-master | cs_gan/tests/gan_test.py |
# Copyright 2019 Deepmind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for the Transporter module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import tensorflow.compat.v1 as tf
from transporter import transporter
IMAGE_H = 16
IMAGE_W = 16
IMAGE_C = 3
BATCH_SIZE = 4
IMAGE_BATCH_SHAPE = (BATCH_SIZE, IMAGE_H, IMAGE_W, IMAGE_C)
FILTERS = (16, 16, 32, 32, 64, 64)
STRIDES = (1, 1, 2, 1, 2, 1)
KERNEL_SIZES = (7, 3, 3, 3, 3, 3)
class TransporterTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
{'norm_type': 'batch'},
{'norm_type': 'layer'},
{'norm_type': 'instance'})
def test_output_shape(self, norm_type):
encoder_ctor = transporter.Encoder
encoder_kwargs = {
'filters': FILTERS,
'strides': STRIDES,
'kernel_sizes': KERNEL_SIZES,
'norm_type': norm_type,
}
decoder_filters = 4
num_keypoints = 5
gauss_std = 0.1
encoder = encoder_ctor(name='encoder', **encoder_kwargs)
keypoint_encoder = encoder_ctor(name='keypoint_encoder', **encoder_kwargs)
keypointer = transporter.KeyPointer(keypoint_encoder=keypoint_encoder,
num_keypoints=num_keypoints,
gauss_std=gauss_std)
decoder = transporter.Decoder(initial_filters=decoder_filters,
output_size=[IMAGE_H, IMAGE_W],
output_channels=IMAGE_C,
norm_type=norm_type)
model = transporter.Transporter(encoder=encoder,
decoder=decoder,
keypointer=keypointer)
image_a = tf.random.normal(IMAGE_BATCH_SHAPE)
image_b = tf.random.normal(IMAGE_BATCH_SHAPE)
transporter_results = model(image_a, image_b, is_training=True)
reconstructed_image_b = transporter_results['reconstructed_image_b']
self.assertEqual(reconstructed_image_b.shape, IMAGE_BATCH_SHAPE)
def testIncorrectEncoderShapes(self):
"""Test that a possible misconfiguration throws an error as expected.
If the two encoders used produce different spatial sizes for their
feature maps, this should cause an error when multiplying tensors together.
"""
decoder_filters = 4
num_keypoints = 5
gauss_std = 0.1
encoder = transporter.Encoder(
filters=FILTERS,
strides=STRIDES,
kernel_sizes=KERNEL_SIZES)
# Use less conv layers in this, in particular one less stride 2 layer, so
# we will get a different spatial output resolution.
keypoint_encoder = transporter.Encoder(
filters=FILTERS[:-2],
strides=STRIDES[:-2],
kernel_sizes=KERNEL_SIZES[:-2])
keypointer = transporter.KeyPointer(
keypoint_encoder=keypoint_encoder,
num_keypoints=num_keypoints,
gauss_std=gauss_std)
decoder = transporter.Decoder(
initial_filters=decoder_filters,
output_size=[IMAGE_H, IMAGE_W],
output_channels=IMAGE_C)
model = transporter.Transporter(
encoder=encoder,
decoder=decoder,
keypointer=keypointer)
with self.assertRaisesRegexp(ValueError, 'Dimensions must be equal'):
model(tf.random.normal(IMAGE_BATCH_SHAPE),
tf.random.normal(IMAGE_BATCH_SHAPE),
is_training=True)
class EncoderTest(tf.test.TestCase):
def test_output_shape(self):
image_batch = tf.random.normal(shape=IMAGE_BATCH_SHAPE)
filters = (4, 4, 8, 8, 16, 16)
encoder = transporter.Encoder(filters=filters,
strides=STRIDES,
kernel_sizes=KERNEL_SIZES)
features = encoder(image_batch, is_training=True)
self.assertEqual(features.shape, (BATCH_SIZE,
IMAGE_H // 4,
IMAGE_W // 4,
filters[-1]))
class KeyPointerTest(tf.test.TestCase):
def test_output_shape(self):
image_batch = tf.random.normal(shape=IMAGE_BATCH_SHAPE)
num_keypoints = 6
gauss_std = 0.1
keypoint_encoder = transporter.Encoder(filters=FILTERS,
strides=STRIDES,
kernel_sizes=KERNEL_SIZES)
keypointer = transporter.KeyPointer(keypoint_encoder=keypoint_encoder,
num_keypoints=num_keypoints,
gauss_std=gauss_std)
keypointer_results = keypointer(image_batch, is_training=True)
self.assertEqual(keypointer_results['centers'].shape,
(BATCH_SIZE, num_keypoints, 2))
self.assertEqual(keypointer_results['heatmaps'].shape,
(BATCH_SIZE, IMAGE_H // 4, IMAGE_W // 4, num_keypoints))
class DecoderTest(tf.test.TestCase):
def test_output_shape(self):
feature_batch = tf.random.normal(shape=(BATCH_SIZE,
IMAGE_H // 4,
IMAGE_W // 4,
64))
decoder = transporter.Decoder(initial_filters=64,
output_size=[IMAGE_H, IMAGE_W],
output_channels=IMAGE_C)
reconstructed_image_batch = decoder(feature_batch, is_training=True)
self.assertEqual(reconstructed_image_batch.shape, IMAGE_BATCH_SHAPE)
def test_encoder_decoder_output_shape(self):
image_batch = tf.random.normal(shape=IMAGE_BATCH_SHAPE)
encoder = transporter.Encoder(filters=FILTERS,
strides=STRIDES,
kernel_sizes=KERNEL_SIZES)
decoder = transporter.Decoder(initial_filters=4,
output_size=[IMAGE_H, IMAGE_W],
output_channels=IMAGE_C)
features = encoder(image_batch, is_training=True)
reconstructed_images = decoder(features, is_training=True)
self.assertEqual(reconstructed_images.shape, IMAGE_BATCH_SHAPE)
if __name__ == '__main__':
tf.test.main()
| deepmind-research-master | transporter/transporter_test.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.