python_code
stringlengths 0
91.3k
|
---|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Helper functions for multi head/network (Ensemble-DQN and REM) agents."""
import collections
import typing
import numpy as np
import tensorflow.compat.v1 as tf
MultiHeadNetworkType = collections.namedtuple(
'multi_head_dqn_network', ['q_heads', 'unordered_q_heads', 'q_values'])
DQNNetworkType = collections.namedtuple('dqn_network', ['q_values'])
MultiNetworkNetworkType = collections.namedtuple(
'multi_network_dqn_network',
['q_networks', 'unordered_q_networks', 'q_values'])
QuantileNetworkType = collections.namedtuple(
'qr_dqn_network', ['q_values', 'logits', 'probabilities'])
class QuantileNetwork(tf.keras.Model):
"""Keras network for QR-DQN agent.
Attributes:
num_actions: An integer representing the number of actions.
num_atoms: An integer representing the number of quantiles of the value
function distribution.
conv1: First convolutional tf.keras layer with ReLU.
conv2: Second convolutional tf.keras layer with ReLU.
conv3: Third convolutional tf.keras layer with ReLU.
flatten: A tf.keras Flatten layer.
dense1: Penultimate fully-connected layer with ReLU.
dense2: Final fully-connected layer with `num_actions` * `num_atoms` units.
"""
def __init__(self,
num_actions: int,
num_atoms: int,
name: str = 'quantile_network'):
"""Convolutional network used to compute the agent's Q-value distribution.
Args:
num_actions: int, number of actions.
num_atoms: int, the number of buckets of the value function distribution.
name: str, used to create scope for network parameters.
"""
super(QuantileNetwork, self).__init__(name=name)
self.num_actions = num_actions
self.num_atoms = num_atoms
activation_fn = tf.keras.activations.relu # ReLU activation.
self._kernel_initializer = tf.keras.initializers.VarianceScaling(
scale=1.0 / np.sqrt(3.0), mode='fan_in', distribution='uniform')
# Defining layers.
self.conv1 = tf.keras.layers.Conv2D(
filters=32, kernel_size=[8, 8],
strides=4,
padding='same',
activation=activation_fn,
kernel_initializer=self._kernel_initializer)
self.conv2 = tf.keras.layers.Conv2D(
filters=64, kernel_size=[4, 4],
strides=2,
padding='same',
activation=activation_fn,
kernel_initializer=self._kernel_initializer)
self.conv3 = tf.keras.layers.Conv2D(
filters=64, kernel_size=[3, 3],
strides=1,
padding='same',
activation=activation_fn,
kernel_initializer=self._kernel_initializer)
self.flatten = tf.keras.layers.Flatten()
self.dense1 = tf.keras.layers.Dense(
units=512,
activation=activation_fn,
kernel_initializer=self._kernel_initializer)
self.dense2 = tf.keras.layers.Dense(
units=num_actions * num_atoms,
kernel_initializer=self._kernel_initializer,
activation=None)
def call(self, state):
"""Calculates the distribution of Q-values using the input state tensor."""
net = tf.cast(state, tf.float32)
net = tf.div(net, 255.)
net = self.conv1(net)
net = self.conv2(net)
net = self.conv3(net)
net = self.flatten(net)
net = self.dense1(net)
net = self.dense2(net)
logits = tf.reshape(net, [-1, self.num_actions, self.num_atoms])
probabilities = tf.keras.activations.softmax(tf.zeros_like(logits))
q_values = tf.reduce_mean(logits, axis=2)
return QuantileNetworkType(q_values, logits, probabilities)
class MultiHeadQNetwork(tf.keras.Model):
"""Multi-head convolutional network to compute multiple Q-value estimates.
Attributes:
num_actions: An integer representing the number of actions.
num_heads: An integer representing the number of Q-heads.
conv1: First convolutional tf.keras layer with ReLU.
conv2: Second convolutional tf.keras layer with ReLU.
conv3: Third convolutional tf.keras layer with ReLU.
flatten: A tf.keras Flatten layer.
dense1: Penultimate fully-connected layer with ReLU.
dense2: Final fully-connected layer with `num_actions` * `num_heads` units.
"""
def __init__(self,
num_actions: int,
num_heads: int,
transform_strategy: typing.Optional[str] = None,
name: typing.Optional[str] = None,
**kwargs):
"""Creates the layers used calculating return distributions.
Args:
num_actions: number of actions.
num_heads: number of Q-heads.
transform_strategy: Possible options include (1) 'IDENTITY' for no
transformation (Ensemble-DQN) (2) 'STOCHASTIC' for random convex
combination (REM).
name: used to create scope for network parameters.
**kwargs: Arbitrary keyword arguments. Used for passing
`transform_matrix`, the matrix for transforming the Q-values if the
passed `transform_strategy` is `STOCHASTIC`.
"""
super(MultiHeadQNetwork, self).__init__(name=name)
activation_fn = tf.keras.activations.relu
self.num_actions = num_actions
self.num_heads = num_heads
self._transform_strategy = transform_strategy
self._kwargs = kwargs
self._kernel_initializer = tf.keras.initializers.VarianceScaling(
scale=1.0 / np.sqrt(3.0), mode='fan_in', distribution='uniform')
# Defining layers.
self.conv1 = tf.keras.layers.Conv2D(
32, [8, 8],
strides=4,
padding='same',
activation=activation_fn,
kernel_initializer=self._kernel_initializer,
name='Conv')
self.conv2 = tf.keras.layers.Conv2D(
64, [4, 4],
strides=2,
padding='same',
activation=activation_fn,
kernel_initializer=self._kernel_initializer,
name='Conv')
self.conv3 = tf.keras.layers.Conv2D(
64, [3, 3],
strides=1,
padding='same',
activation=activation_fn,
kernel_initializer=self._kernel_initializer,
name='Conv')
self.flatten = tf.keras.layers.Flatten()
self.dense1 = tf.keras.layers.Dense(
512,
activation=activation_fn,
kernel_initializer=self._kernel_initializer,
name='fully_connected')
self.dense2 = tf.keras.layers.Dense(
num_actions * num_heads,
kernel_initializer=self._kernel_initializer,
name='fully_connected')
def call(self, state):
"""Creates the output tensor/op given the input state tensor.
See https://www.tensorflow.org/api_docs/python/tf/keras/Model for more
information on this. Note that tf.keras.Model implements `call` which is
wrapped by `__call__` function by tf.keras.Model.
Args:
state: Tensor, input tensor.
Returns:
collections.namedtuple, output ops (graph mode) or output tensors (eager).
"""
net = tf.cast(state, tf.float32)
net = tf.div(net, 255.)
net = self.conv1(net)
net = self.conv2(net)
net = self.conv3(net)
net = self.flatten(net)
net = self.dense1(net)
net = self.dense2(net)
unordered_q_heads = tf.reshape(net, [-1, self.num_actions, self.num_heads])
q_heads, q_values = combine_q_functions(
unordered_q_heads, self._transform_strategy, **self._kwargs)
return MultiHeadNetworkType(q_heads, unordered_q_heads, q_values)
def combine_q_functions(q_functions, transform_strategy, **kwargs):
"""Utility function for combining multiple Q functions.
Args:
q_functions: Multiple Q-functions concatenated.
transform_strategy: str, Possible options include (1) 'IDENTITY' for no
transformation (2) 'STOCHASTIC' for random convex combination.
**kwargs: Arbitrary keyword arguments. Used for passing `transform_matrix`,
the matrix for transforming the Q-values if the passed
`transform_strategy` is `STOCHASTIC`.
Returns:
q_functions: Modified Q-functions.
q_values: Q-values based on combining the multiple heads.
"""
# Create q_values before reordering the heads for training
q_values = tf.reduce_mean(q_functions, axis=-1)
if transform_strategy == 'STOCHASTIC':
left_stochastic_matrix = kwargs.get('transform_matrix')
if left_stochastic_matrix is None:
raise ValueError('None value provided for stochastic matrix')
q_functions = tf.tensordot(
q_functions, left_stochastic_matrix, axes=[[2], [0]])
elif transform_strategy == 'IDENTITY':
tf.logging.info('Identity transformation Q-function heads')
else:
raise ValueError(
'{} is not a valid reordering strategy'.format(transform_strategy))
return q_functions, q_values
class NatureDQNNetwork(tf.keras.Model):
"""The convolutional network used to compute the agent's Q-values.
Attributes:
num_actions: An integer representing the number of actions.
conv1: First convolutional tf.keras layer with ReLU.
conv2: Second convolutional tf.keras layer with ReLU.
conv3: Third convolutional tf.keras layer with ReLU.
flatten: A tf.keras Flatten layer.
dense1: Penultimate fully-connected layer with ReLU.
dense2: Final fully-connected layer with `num_actions` units.
"""
def __init__(self, num_actions: int, name: typing.Optional[str] = None):
"""Creates the layers used for calculating Q-values.
Args:
num_actions: number of actions.
name: used to create scope for network parameters.
"""
super(NatureDQNNetwork, self).__init__(name=name)
self.num_actions = num_actions
# Defining layers.
activation_fn = tf.keras.activations.relu
# Setting names of the layers manually to make variable names more similar
# with tf.slim variable names/checkpoints.
self.conv1 = tf.keras.layers.Conv2D(
32, [8, 8],
strides=4,
padding='same',
activation=activation_fn,
name='Conv')
self.conv2 = tf.keras.layers.Conv2D(
64, [4, 4],
strides=2,
padding='same',
activation=activation_fn,
name='Conv')
self.conv3 = tf.keras.layers.Conv2D(
64, [3, 3],
strides=1,
padding='same',
activation=activation_fn,
name='Conv')
self.flatten = tf.keras.layers.Flatten()
self.dense1 = tf.keras.layers.Dense(
512, activation=activation_fn, name='fully_connected')
self.dense2 = tf.keras.layers.Dense(num_actions, name='fully_connected')
def call(self, state):
"""Creates the output tensor/op given the state tensor as input.
See https://www.tensorflow.org/api_docs/python/tf/keras/Model for more
information on this. Note that tf.keras.Model implements `call` which is
wrapped by `__call__` function by tf.keras.Model.
Parameters created here will have scope according to the `name` argument
given at `.__init__()` call.
Args:
state: Tensor, input tensor.
Returns:
collections.namedtuple, output ops (graph mode) or output tensors (eager).
"""
net = tf.cast(state, tf.float32)
net = tf.div(net, 255.)
net = self.conv1(net)
net = self.conv2(net)
net = self.conv3(net)
net = self.flatten(net)
net = self.dense1(net)
return DQNNetworkType(self.dense2(net))
class MulitNetworkQNetwork(tf.keras.Model):
"""Multiple convolutional networks to compute Q-value estimates.
Attributes:
num_actions: An inteer representing the number of actions.
num_networks: An integer representing the number of Q-networks.
"""
def __init__(self,
num_actions: int,
num_networks: int,
transform_strategy: typing.Optional[str] = None,
name: typing.Optional[str] = None,
**kwargs):
"""Creates the networks used calculating multiple Q-values.
Args:
num_actions: number of actions.
num_networks: number of separate Q-networks.
transform_strategy: Possible options include (1) 'IDENTITY' for no
transformation (Ensemble-DQN) (2) 'STOCHASTIC' for random convex
combination (REM).
name: used to create scope for network parameters.
**kwargs: Arbitrary keyword arguments. Used for passing
`transform_matrix`, the matrix for transforming the Q-values if only
the passed `transform_strategy` is `STOCHASTIC`.
"""
super(MulitNetworkQNetwork, self).__init__(name=name)
self.num_actions = num_actions
self.num_networks = num_networks
self._transform_strategy = transform_strategy
self._kwargs = kwargs
self._device_fn = kwargs.pop('device_fn', lambda i: '/gpu:0')
# Create multiple Q-networks
self._q_networks = []
for i in range(self.num_networks):
with tf.device(self._device_fn(i)):
q_net = NatureDQNNetwork(num_actions, name='subnet_{}'.format(i))
self._q_networks.append(q_net)
def call(self, state):
"""Creates the output tensor/op given the input state tensor.
See https://www.tensorflow.org/api_docs/python/tf/keras/Model for more
information on this. Note that tf.keras.Model implements `call` which is
wrapped by `__call__` function by tf.keras.Model.
Args:
state: Tensor, input tensor.
Returns:
collections.namedtuple, output ops (graph mode) or output tensors (eager).
"""
unordered_q_networks = [
network(state).q_values for network in self._q_networks]
unordered_q_networks = tf.stack(unordered_q_networks, axis=-1)
q_networks, q_values = combine_q_functions(unordered_q_networks,
self._transform_strategy,
**self._kwargs)
return MultiNetworkNetworkType(q_networks, unordered_q_networks, q_values)
def random_stochastic_matrix(dim, num_cols=None, dtype=tf.float32):
"""Generates a random left stochastic matrix."""
mat_shape = (dim, dim) if num_cols is None else (dim, num_cols)
mat = tf.random.uniform(shape=mat_shape, dtype=dtype)
mat /= tf.norm(mat, ord=1, axis=0, keepdims=True)
return mat
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi Head DQN agent."""
import os
from batch_rl.multi_head import atari_helpers
from dopamine.agents.dqn import dqn_agent
import gin
import tensorflow.compat.v1 as tf
@gin.configurable
class MultiHeadDQNAgent(dqn_agent.DQNAgent):
"""DQN agent with multiple heads."""
def __init__(self,
sess,
num_actions,
num_heads=1,
transform_strategy='IDENTITY',
num_convex_combinations=1,
network=atari_helpers.MultiHeadQNetwork,
init_checkpoint_dir=None,
**kwargs):
"""Initializes the agent and constructs the components of its graph.
Args:
sess: tf.Session, for executing ops.
num_actions: int, number of actions the agent can take at any state.
num_heads: int, Number of heads per action output of the Q function.
transform_strategy: str, Possible options include (1)
'STOCHASTIC' for multiplication with a left stochastic matrix. (2)
'IDENTITY', in which case the heads are not transformed.
num_convex_combinations: If transform_strategy is 'STOCHASTIC',
then this argument specifies the number of random
convex combinations to be created. If None, `num_heads` convex
combinations are created.
network: tf.Keras.Model. A call to this object will return an
instantiation of the network provided. The network returned can be run
with different inputs to create different outputs. See
atari_helpers.MultiHeadQNetwork as an example.
init_checkpoint_dir: str, directory from which initial checkpoint before
training is loaded if there doesn't exist any checkpoint in the current
agent directory. If None, no initial checkpoint is loaded.
**kwargs: Arbitrary keyword arguments.
"""
tf.logging.info('Creating MultiHeadDQNAgent with following parameters:')
tf.logging.info('\t num_heads: %d', num_heads)
tf.logging.info('\t transform_strategy: %s', transform_strategy)
tf.logging.info('\t num_convex_combinations: %d', num_convex_combinations)
tf.logging.info('\t init_checkpoint_dir: %s', init_checkpoint_dir)
self.num_heads = num_heads
if init_checkpoint_dir is not None:
self._init_checkpoint_dir = os.path.join(
init_checkpoint_dir, 'checkpoints')
else:
self._init_checkpoint_dir = None
self._q_heads_transform = None
self._num_convex_combinations = num_convex_combinations
self.transform_strategy = transform_strategy
super(MultiHeadDQNAgent, self).__init__(
sess, num_actions, network=network, **kwargs)
def _create_network(self, name):
"""Builds a multi-head Q-network that outputs Q-values for multiple heads.
Args:
name: str, this name is passed to the tf.keras.Model and used to create
variable scope under the hood by the tf.keras.Model.
Returns:
network: tf.keras.Model, the network instantiated by the Keras model.
"""
kwargs = {} # Used for passing the transformation matrix if any
if self._q_heads_transform is None:
if self.transform_strategy == 'STOCHASTIC':
tf.logging.info('Creating q_heads transformation matrix..')
self._q_heads_transform = atari_helpers.random_stochastic_matrix(
self.num_heads, num_cols=self._num_convex_combinations)
if self._q_heads_transform is not None:
kwargs.update({'transform_matrix': self._q_heads_transform})
network = self.network(
num_actions=self.num_actions,
num_heads=self.num_heads,
transform_strategy=self.transform_strategy,
name=name,
**kwargs)
return network
def _build_target_q_op(self):
"""Build an op used as a target for the Q-value.
Returns:
target_q_op: An op calculating the Q-value.
"""
# Get the maximum Q-value across the actions dimension for each head.
replay_next_qt_max = tf.reduce_max(
self._replay_next_target_net_outputs.q_heads, axis=1)
is_non_terminal = 1. - tf.cast(self._replay.terminals, tf.float32)
is_non_terminal = tf.expand_dims(is_non_terminal, axis=-1)
rewards = tf.expand_dims(self._replay.rewards, axis=-1)
return rewards + (
self.cumulative_gamma * replay_next_qt_max * is_non_terminal)
def _build_train_op(self):
"""Builds a training op.
Returns:
train_op: An op performing one step of training from replay data.
"""
actions = self._replay.actions
indices = tf.stack([tf.range(actions.shape[0]), actions], axis=-1)
replay_chosen_q = tf.gather_nd(
self._replay_net_outputs.q_heads, indices=indices)
target = tf.stop_gradient(self._build_target_q_op())
loss = tf.losses.huber_loss(
target, replay_chosen_q, reduction=tf.losses.Reduction.NONE)
q_head_losses = tf.reduce_mean(loss, axis=0)
final_loss = tf.reduce_mean(q_head_losses)
if self.summary_writer is not None:
with tf.variable_scope('Losses'):
tf.summary.scalar('HuberLoss', final_loss)
return self.optimizer.minimize(final_loss)
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Multi Q-Network DQN agent."""
import copy
import os
from batch_rl.multi_head import atari_helpers
from dopamine.agents.dqn import dqn_agent
import gin
import tensorflow.compat.v1 as tf
@gin.configurable
class MultiNetworkDQNAgent(dqn_agent.DQNAgent):
"""DQN agent with multiple heads."""
def __init__(self,
sess,
num_actions,
num_networks=1,
transform_strategy='IDENTITY',
num_convex_combinations=1,
network=atari_helpers.MulitNetworkQNetwork,
init_checkpoint_dir=None,
use_deep_exploration=False,
**kwargs):
"""Initializes the agent and constructs the components of its graph.
Args:
sess: tf.Session, for executing ops.
num_actions: int, number of actions the agent can take at any state.
num_networks: int, Number of different Q-functions.
transform_strategy: str, Possible options include (1) 'STOCHASTIC' for
multiplication with a left stochastic matrix. (2) 'IDENTITY', in which
case the heads are not transformed.
num_convex_combinations: If transform_strategy is 'STOCHASTIC',
then this argument specifies the number of random
convex combinations to be created. If None, `num_heads` convex
combinations are created.
network: tf.Keras.Model. A call to this object will return an
instantiation of the network provided. The network returned can be run
with different inputs to create different outputs. See
atari_helpers.MultiNetworkQNetwork as an example.
init_checkpoint_dir: str, directory from which initial checkpoint before
training is loaded if there doesn't exist any checkpoint in the current
agent directory. If None, no initial checkpoint is loaded.
use_deep_exploration: Adaptation of Bootstrapped DQN for REM exploration.
**kwargs: Arbitrary keyword arguments.
"""
tf.logging.info('Creating MultiNetworkDQNAgent with following parameters:')
tf.logging.info('\t num_networks: %d', num_networks)
tf.logging.info('\t transform_strategy: %s', transform_strategy)
tf.logging.info('\t num_convex_combinations: %d', num_convex_combinations)
tf.logging.info('\t init_checkpoint_dir: %s', init_checkpoint_dir)
tf.logging.info('\t use_deep_exploration %s', use_deep_exploration)
self.num_networks = num_networks
if init_checkpoint_dir is not None:
self._init_checkpoint_dir = os.path.join(init_checkpoint_dir,
'checkpoints')
else:
self._init_checkpoint_dir = None
# The transform matrix should be created on device specified by tf_device
# if the transform_strategy is UNIFORM_STOCHASTIC or STOCHASTIC
self._q_networks_transform = None
self._num_convex_combinations = num_convex_combinations
self.transform_strategy = transform_strategy
self.use_deep_exploration = use_deep_exploration
super(MultiNetworkDQNAgent, self).__init__(
sess, num_actions, network=network, **kwargs)
def _create_network(self, name):
"""Builds a multi-network Q-network that outputs Q-values for each network.
Args:
name: str, this name is passed to the tf.keras.Model and used to create
variable scope under the hood by the tf.keras.Model.
Returns:
network: tf.keras.Model, the network instantiated by the Keras model.
"""
# Pass the device_fn to place Q-networks on different devices
kwargs = {'device_fn': lambda i: '/gpu:{}'.format(i // 4)}
if self._q_networks_transform is None:
if self.transform_strategy == 'STOCHASTIC':
tf.logging.info('Creating q_networks transformation matrix..')
self._q_networks_transform = atari_helpers.random_stochastic_matrix(
self.num_networks, num_cols=self._num_convex_combinations)
if self._q_networks_transform is not None:
kwargs.update({'transform_matrix': self._q_networks_transform})
return self.network(
num_actions=self.num_actions,
num_networks=self.num_networks,
transform_strategy=self.transform_strategy,
name=name,
**kwargs)
def _build_target_q_op(self):
"""Build an op used as a target for the Q-value.
Returns:
target_q_op: An op calculating the Q-value.
"""
# Get the maximum Q-value across the actions dimension for each head.
replay_next_qt_max = tf.reduce_max(
self._replay_next_target_net_outputs.q_networks, axis=1)
is_non_terminal = 1. - tf.cast(self._replay.terminals, tf.float32)
is_non_terminal = tf.expand_dims(is_non_terminal, axis=-1)
rewards = tf.expand_dims(self._replay.rewards, axis=-1)
return rewards + (
self.cumulative_gamma * replay_next_qt_max * is_non_terminal)
def begin_episode(self, observation):
"""Returns the agent's first action for this episode.
Args:
observation: numpy array, the environment's initial observation.
Returns:
int, the selected action.
"""
if self.use_deep_exploration:
# Randomly pick a Q-function from all possible Q-functions for data
# collection each episode for online experiments, similar to deep
# exploration strategy proposed by Bootstrapped DQN
self._sess.run(self._update_episode_q_function)
return super(MultiNetworkDQNAgent, self).begin_episode(observation)
def _build_networks(self):
super(MultiNetworkDQNAgent, self)._build_networks()
# q_argmax is only used for picking an action
self._q_argmax_eval = tf.argmax(self._net_outputs.q_values, axis=1)[0]
if self.use_deep_exploration:
if self.transform_strategy.endswith('STOCHASTIC'):
q_transform = atari_helpers.random_stochastic_matrix(
self.num_networks, num_cols=1)
self._q_episode_transform = tf.get_variable(
trainable=False,
dtype=tf.float32,
shape=q_transform.get_shape().as_list(),
name='q_episode_transform')
self._update_episode_q_function = self._q_episode_transform.assign(
q_transform)
episode_q_function = tf.tensordot(
self._net_outputs.unordered_q_networks,
self._q_episode_transform, axes=[[2], [0]])
self._q_argmax_train = tf.argmax(episode_q_function[:, :, 0], axis=1)[0]
elif self.transform_strategy == 'IDENTITY':
self._q_function_index = tf.Variable(
initial_value=0,
trainable=False,
dtype=tf.int32,
shape=(),
name='q_head_episode')
self._update_episode_q_function = self._q_function_index.assign(
tf.random.uniform(
shape=(), maxval=self.num_networks, dtype=tf.int32))
q_function = self._net_outputs.unordered_q_networks[
:, :, self._q_function_index]
# This is only used for picking an action
self._q_argmax_train = tf.argmax(q_function, axis=1)[0]
else:
self._q_argmax_train = self._q_argmax_eval
def _select_action(self):
if self.eval_mode:
self._q_argmax = self._q_argmax_eval
else:
self._q_argmax = self._q_argmax_train
return super(MultiNetworkDQNAgent, self)._select_action()
def _build_train_op(self):
"""Builds a training op.
Returns:
train_op: An op performing one step of training from replay data.
"""
actions = self._replay.actions
indices = tf.stack([tf.range(actions.shape[0]), actions], axis=-1)
replay_chosen_q = tf.gather_nd(
self._replay_net_outputs.q_networks, indices=indices)
target = tf.stop_gradient(self._build_target_q_op())
loss = tf.losses.huber_loss(
target, replay_chosen_q, reduction=tf.losses.Reduction.NONE)
q_head_losses = tf.reduce_mean(loss, axis=0)
final_loss = tf.reduce_mean(q_head_losses)
if self.summary_writer is not None:
with tf.variable_scope('Losses'):
tf.summary.scalar('HuberLoss', final_loss)
self.optimizers = [copy.deepcopy(self.optimizer) for _ in
range(self.num_networks)]
train_ops = []
for i in range(self.num_networks):
var_list = tf.trainable_variables(scope='Online/subnet_{}'.format(i))
train_op = self.optimizers[i].minimize(final_loss, var_list=var_list)
train_ops.append(train_op)
return tf.group(*train_ops, name='merged_train_op')
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distributional RL agent using quantile regression.
This loss is computed as in "Distributional Reinforcement Learning with Quantile
Regression" - Dabney et. al, 2017"
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from batch_rl.multi_head import atari_helpers
from dopamine.agents.dqn import dqn_agent
from dopamine.agents.rainbow import rainbow_agent
import gin
import tensorflow.compat.v1 as tf
@gin.configurable
class QuantileAgent(rainbow_agent.RainbowAgent):
"""An extension of Rainbow to perform quantile regression."""
def __init__(self,
sess,
num_actions,
kappa=1.0,
network=atari_helpers.QuantileNetwork,
num_atoms=200,
gamma=0.99,
update_horizon=1,
min_replay_history=50000,
update_period=4,
target_update_period=10000,
epsilon_fn=dqn_agent.linearly_decaying_epsilon,
epsilon_train=0.1,
epsilon_eval=0.05,
epsilon_decay_period=1000000,
replay_scheme='prioritized',
tf_device='/cpu:0',
optimizer=tf.train.AdamOptimizer(
learning_rate=0.00005, epsilon=0.0003125),
summary_writer=None,
summary_writing_frequency=500):
"""Initializes the agent and constructs the Graph.
Args:
sess: A `tf.Session` object for running associated ops.
num_actions: Int, number of actions the agent can take at any state.
kappa: Float, Huber loss cutoff.
network: tf.Keras.Model, expects 3 parameters: num_actions, num_atoms,
network_type. A call to this object will return an instantiation of the
network provided. The network returned can be run with different inputs
to create different outputs. See atari_helpers.QuantileNetwork
as an example.
num_atoms: Int, the number of buckets for the value function distribution.
gamma: Float, exponential decay factor as commonly used in the RL
literature.
update_horizon: Int, horizon at which updates are performed, the 'n' in
n-step update.
min_replay_history: Int, number of stored transitions for training to
start.
update_period: Int, period between DQN updates.
target_update_period: Int, ppdate period for the target network.
epsilon_fn: Function expecting 4 parameters: (decay_period, step,
warmup_steps, epsilon), and which returns the epsilon value used for
exploration during training.
epsilon_train: Float, final epsilon for training.
epsilon_eval: Float, epsilon during evaluation.
epsilon_decay_period: Int, number of steps for epsilon to decay.
replay_scheme: String, replay memory scheme to be used. Choices are:
uniform - Standard (DQN) replay buffer (Mnih et al., 2015)
prioritized - Prioritized replay buffer (Schaul et al., 2015)
tf_device: Tensorflow device with which the value function is computed
and trained.
optimizer: A `tf.train.Optimizer` object for training the model.
summary_writer: SummaryWriter object for outputting training statistics.
Summary writing disabled if set to None.
summary_writing_frequency: int, frequency with which summaries will be
written. Lower values will result in slower training.
"""
self.kappa = kappa
super(QuantileAgent, self).__init__(
sess=sess,
num_actions=num_actions,
network=network,
num_atoms=num_atoms,
gamma=gamma,
update_horizon=update_horizon,
min_replay_history=min_replay_history,
update_period=update_period,
target_update_period=target_update_period,
epsilon_fn=epsilon_fn,
epsilon_train=epsilon_train,
epsilon_eval=epsilon_eval,
epsilon_decay_period=epsilon_decay_period,
replay_scheme=replay_scheme,
tf_device=tf_device,
optimizer=optimizer,
summary_writer=summary_writer,
summary_writing_frequency=summary_writing_frequency)
def _create_network(self, name):
"""Builds a Quantile ConvNet.
Equivalent to Rainbow ConvNet, only now the output logits are interpreted
as quantiles.
Args:
name: str, this name is passed to the tf.keras.Model and used to create
variable scope under the hood by the tf.keras.Model.
Returns:
network: tf.keras.Model, the network instantiated by the Keras model.
"""
network = self.network(self.num_actions, self._num_atoms, name=name)
return network
def _build_target_distribution(self):
batch_size = tf.shape(self._replay.rewards)[0]
# size of rewards: batch_size x 1
rewards = self._replay.rewards[:, None]
# size of tiled_support: batch_size x num_atoms
is_terminal_multiplier = 1. - tf.cast(self._replay.terminals, tf.float32)
# Incorporate terminal state to discount factor.
# size of gamma_with_terminal: batch_size x 1
gamma_with_terminal = self.cumulative_gamma * is_terminal_multiplier
gamma_with_terminal = gamma_with_terminal[:, None]
# size of next_qt_argmax: 1 x batch_size
next_qt_argmax = tf.argmax(
self._replay_next_target_net_outputs.q_values, axis=1)[:, None]
batch_indices = tf.range(tf.to_int64(batch_size))[:, None]
# size of next_qt_argmax: batch_size x 2
batch_indexed_next_qt_argmax = tf.concat(
[batch_indices, next_qt_argmax], axis=1)
# size of next_logits (next quantiles): batch_size x num_atoms
next_logits = tf.gather_nd(
self._replay_next_target_net_outputs.logits,
batch_indexed_next_qt_argmax)
return rewards + gamma_with_terminal * next_logits
def _build_train_op(self):
"""Builds a training op.
Returns:
train_op: An op performing one step of training.
"""
target_distribution = tf.stop_gradient(self._build_target_distribution())
# size of indices: batch_size x 1.
indices = tf.range(tf.shape(self._replay_net_outputs.logits)[0])[:, None]
# size of reshaped_actions: batch_size x 2.
reshaped_actions = tf.concat([indices, self._replay.actions[:, None]], 1)
# For each element of the batch, fetch the logits for its selected action.
chosen_action_logits = tf.gather_nd(self._replay_net_outputs.logits,
reshaped_actions)
bellman_errors = (target_distribution[:, None, :] -
chosen_action_logits[:, :, None]) # Input `u' of Eq. 9.
huber_loss = ( # Eq. 9 of paper.
tf.to_float(tf.abs(bellman_errors) <= self.kappa) *
0.5 * bellman_errors ** 2 +
tf.to_float(tf.abs(bellman_errors) > self.kappa) *
self.kappa * (tf.abs(bellman_errors) - 0.5 * self.kappa))
tau_hat = ((tf.range(self._num_atoms, dtype=tf.float32) + 0.5) /
self._num_atoms) # Quantile midpoints. See Lemma 2 of paper.
quantile_huber_loss = ( # Eq. 10 of paper.
tf.abs(tau_hat[None, :, None] - tf.to_float(bellman_errors < 0)) *
huber_loss)
# Sum over tau dimension, average over target value dimension.
loss = tf.reduce_sum(tf.reduce_mean(quantile_huber_loss, 2), 1)
if self._replay_scheme == 'prioritized':
target_priorities = self._replay.tf_get_priority(self._replay.indices)
# The original prioritized experience replay uses a linear exponent
# schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of 0.5
# on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders) suggested
# a fixed exponent actually performs better, except on Pong.
loss_weights = 1.0 / tf.sqrt(target_priorities + 1e-10)
loss_weights /= tf.reduce_max(loss_weights)
# Rainbow and prioritized replay are parametrized by an exponent alpha,
# but in both cases it is set to 0.5 - for simplicity's sake we leave it
# as is here, using the more direct tf.sqrt(). Taking the square root
# "makes sense", as we are dealing with a squared loss.
# Add a small nonzero value to the loss to avoid 0 priority items. While
# technically this may be okay, setting all items to 0 priority will cause
# troubles, and also result in 1.0 / 0.0 = NaN correction terms.
update_priorities_op = self._replay.tf_set_priority(
self._replay.indices, tf.sqrt(loss + 1e-10))
# Weight loss by inverse priorities.
loss = loss_weights * loss
else:
update_priorities_op = tf.no_op()
with tf.control_dependencies([update_priorities_op]):
if self.summary_writer is not None:
with tf.variable_scope('Losses'):
tf.summary.scalar('QuantileLoss', tf.reduce_mean(loss))
return self.optimizer.minimize(tf.reduce_mean(loss)), loss
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""The entry point for running experiments for collecting replay datasets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
from absl import app
from absl import flags
from batch_rl.baselines.agents import dqn_agent
from batch_rl.baselines.agents import quantile_agent
from batch_rl.baselines.agents import random_agent
from batch_rl.baselines.run_experiment import LoggedRunner
from dopamine.discrete_domains import run_experiment
from dopamine.discrete_domains import train as base_train # pylint: disable=unused-import
import tensorflow.compat.v1 as tf
flags.DEFINE_string('agent_name', 'dqn', 'Name of the agent.')
FLAGS = flags.FLAGS
def create_agent(sess, environment, replay_log_dir, summary_writer=None):
"""Creates a DQN agent.
Args:
sess: A `tf.Session`object for running associated ops.
environment: An Atari 2600 environment.
replay_log_dir: Directory to which log the replay buffers periodically.
summary_writer: A Tensorflow summary writer to pass to the agent
for in-agent training statistics in Tensorboard.
Returns:
A DQN agent with metrics.
"""
if FLAGS.agent_name == 'dqn':
agent = dqn_agent.LoggedDQNAgent
elif FLAGS.agent_name == 'quantile':
agent = quantile_agent.LoggedQuantileAgent
elif FLAGS.agent_name == 'random':
agent = random_agent.RandomAgent
else:
raise ValueError('{} is not a valid agent name'.format(FLAGS.agent_name))
return agent(sess, num_actions=environment.action_space.n,
replay_log_dir=replay_log_dir, summary_writer=summary_writer)
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
run_experiment.load_gin_configs(FLAGS.gin_files, FLAGS.gin_bindings)
# Create the replay log dir.
replay_log_dir = os.path.join(FLAGS.base_dir, 'replay_logs')
tf.logging.info('Saving replay buffer data to {}'.format(replay_log_dir))
create_agent_fn = functools.partial(
create_agent, replay_log_dir=replay_log_dir)
runner = LoggedRunner(FLAGS.base_dir, create_agent_fn)
runner.run_experiment()
if __name__ == '__main__':
flags.mark_flag_as_required('base_dir')
app.run(main)
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logged Runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dopamine.discrete_domains import run_experiment
import gin
@gin.configurable
class LoggedRunner(run_experiment.Runner):
def run_experiment(self):
super(LoggedRunner, self).run_experiment()
# Log the replay buffer at the end
self._agent.log_final_buffer()
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Random agent."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dopamine.agents.dqn import dqn_agent
import numpy as np
import gin
@gin.configurable
class RandomAgent(dqn_agent.DQNAgent):
"""Random Agent."""
def __init__(self, sess, num_actions, replay_log_dir, **kwargs):
"""This maintains all the DQN default argument values."""
self._replay_log_dir = replay_log_dir
super(RandomAgent, self).__init__(sess, num_actions, **kwargs)
def step(self, reward, observation):
"""Returns a random action."""
return np.random.randint(self.num_actions)
def log_final_buffer(self):
pass
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantile Regression Agent with logged replay buffer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from batch_rl.baselines.replay_memory import logged_prioritized_replay_buffer
from batch_rl.multi_head import quantile_agent
import gin
@gin.configurable
class LoggedQuantileAgent(quantile_agent.QuantileAgent):
"""An implementation of the Quantile agent with replay buffer logging to disk."""
def __init__(self, sess, num_actions, replay_log_dir, **kwargs):
"""Initializes the agent and constructs the components of its graph.
Args:
sess: tf.Session, for executing ops.
num_actions: int, number of actions the agent can take at any state.
replay_log_dir: str, log Directory to save the replay buffer to disk
periodically.
**kwargs: Arbitrary keyword arguments.
"""
assert replay_log_dir is not None
self._replay_log_dir = replay_log_dir
super(LoggedQuantileAgent, self).__init__(sess, num_actions, **kwargs)
def log_final_buffer(self):
self._replay.memory.log_final_buffer()
def _build_replay_buffer(self, use_staging):
"""Creates the replay buffer used by the agent.
Args:
use_staging: bool, if True, uses a staging area to prefetch data for
faster training.
Returns:
A `WrappedPrioritizedReplayBuffer` object.
Raises:
ValueError: if given an invalid replay scheme.
"""
if self._replay_scheme not in ['uniform', 'prioritized']:
raise ValueError('Invalid replay scheme: {}'.format(self._replay_scheme))
# Both replay schemes use the same data structure, but the 'uniform' scheme
# sets all priorities to the same value (which yields uniform sampling).
return logged_prioritized_replay_buffer.WrappedLoggedPrioritizedReplayBuffer(
log_dir=self._replay_log_dir,
observation_shape=self.observation_shape,
stack_size=self.stack_size,
use_staging=use_staging,
update_horizon=self.update_horizon,
gamma=self.gamma,
observation_dtype=self.observation_dtype.as_numpy_dtype)
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN Agent with logged replay buffer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from batch_rl.baselines.replay_memory import logged_replay_buffer
from dopamine.agents.dqn import dqn_agent
import gin
@gin.configurable
class LoggedDQNAgent(dqn_agent.DQNAgent):
"""An implementation of the DQN agent with replay buffer logging to disk."""
def __init__(self, sess, num_actions, replay_log_dir, **kwargs):
"""Initializes the agent and constructs the components of its graph.
Args:
sess: tf.Session, for executing ops.
num_actions: int, number of actions the agent can take at any state.
replay_log_dir: str, log Directory to save the replay buffer to disk
periodically.
**kwargs: Arbitrary keyword arguments.
"""
assert replay_log_dir is not None
# Set replay_log_dir before calling parent's initializer
self._replay_log_dir = replay_log_dir
super(LoggedDQNAgent, self).__init__(sess, num_actions, **kwargs)
def log_final_buffer(self):
self._replay.memory.log_final_buffer()
def _build_replay_buffer(self, use_staging):
"""Creates the replay buffer used by the agent.
Args:
use_staging: bool, if True, uses a staging area to prefetch data for
faster training.
Returns:
A WrapperReplayBuffer object.
"""
return logged_replay_buffer.WrappedLoggedReplayBuffer(
log_dir=self._replay_log_dir,
observation_shape=self.observation_shape,
stack_size=self.stack_size,
use_staging=use_staging,
update_horizon=self.update_horizon,
gamma=self.gamma,
observation_dtype=self.observation_dtype.as_numpy_dtype)
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logged Prioritized Replay Buffer."""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import pickle
from dopamine.replay_memory import circular_replay_buffer
from dopamine.replay_memory import prioritized_replay_buffer
import gin
import numpy as np
import tensorflow.compat.v1 as tf
STORE_FILENAME_PREFIX = circular_replay_buffer.STORE_FILENAME_PREFIX
class OutOfGraphLoggedPrioritizedReplayBuffer(
prioritized_replay_buffer.OutOfGraphPrioritizedReplayBuffer):
"""A logged out-of-graph Replay Buffer for Prioritized Experience Replay."""
def __init__(self, log_dir, *args, **kwargs):
"""Initializes OutOfGraphLoggedPrioritizedReplayBuffer."""
super(OutOfGraphLoggedPrioritizedReplayBuffer, self).__init__(
*args, **kwargs)
self._log_count = 0
self._log_dir = log_dir
tf.gfile.MakeDirs(self._log_dir)
def add(self, observation, action, reward, terminal, *args):
super(OutOfGraphLoggedPrioritizedReplayBuffer, self).add(
observation, action, reward, terminal, *args)
# Log the replay buffer every time the replay buffer is filled to capacity.
cur_size = self.add_count % self._replay_capacity
if cur_size == self._replay_capacity - 1:
self._log_buffer()
self._log_count += 1
def load(self, checkpoint_dir, suffix):
super(OutOfGraphLoggedPrioritizedReplayBuffer, self).load(
checkpoint_dir, suffix)
self._log_count = self.add_count // self._replay_capacity
def _log_buffer(self):
"""This method will save all the replay buffer's state in a single file."""
checkpointable_elements = self._return_checkpointable_elements()
for attr in checkpointable_elements:
filename = self._generate_filename(self._log_dir, attr, self._log_count)
with tf.gfile.Open(filename, 'wb') as f:
with gzip.GzipFile(fileobj=f) as outfile:
if attr.startswith(STORE_FILENAME_PREFIX):
array_name = attr[len(STORE_FILENAME_PREFIX):]
np.save(outfile, self._store[array_name], allow_pickle=False)
# Some numpy arrays might not be part of storage
elif isinstance(self.__dict__[attr], np.ndarray):
np.save(outfile, self.__dict__[attr], allow_pickle=False)
else:
pickle.dump(self.__dict__[attr], outfile)
tf.logging.info('Replay buffer logged to ckpt {number} in {dir}'.format(
number=self._log_count, dir=self._log_dir))
def log_final_buffer(self):
"""Logs the replay buffer at the end of training."""
add_count = self.add_count
self.add_count = np.array(self.cursor())
self._log_buffer()
self._log_count += 1
self.add_count = add_count
@gin.configurable(denylist=['observation_shape', 'stack_size',
'update_horizon', 'gamma'])
class WrappedLoggedPrioritizedReplayBuffer(
circular_replay_buffer.WrappedReplayBuffer):
"""Wrapper of OutOfGraphLoggedPrioritizedReplayBuffer with in-graph sampling."""
def __init__(self,
log_dir,
observation_shape,
stack_size,
use_staging=True,
replay_capacity=1000000,
batch_size=32,
update_horizon=1,
gamma=0.99,
max_sample_attempts=1000,
extra_storage_types=None,
observation_dtype=np.uint8,
action_shape=(),
action_dtype=np.int32,
reward_shape=(),
reward_dtype=np.float32):
"""Initializes WrappedLoggedPrioritizedReplayBuffer."""
memory = OutOfGraphLoggedPrioritizedReplayBuffer(
log_dir, observation_shape, stack_size, replay_capacity, batch_size,
update_horizon, gamma, max_sample_attempts,
extra_storage_types=extra_storage_types,
observation_dtype=observation_dtype)
super(WrappedLoggedPrioritizedReplayBuffer, self).__init__(
observation_shape,
stack_size,
use_staging,
replay_capacity,
batch_size,
update_horizon,
gamma,
wrapped_memory=memory,
extra_storage_types=extra_storage_types,
observation_dtype=observation_dtype,
action_shape=action_shape,
action_dtype=action_dtype,
reward_shape=reward_shape,
reward_dtype=reward_dtype)
def tf_set_priority(self, indices, priorities):
"""Sets the priorities for the given indices.
Args:
indices: tf.Tensor with dtype int32 and shape [n].
priorities: tf.Tensor with dtype float and shape [n].
Returns:
A tf op setting the priorities for prioritized sampling.
"""
return tf.py_func(
self.memory.set_priority, [indices, priorities], [],
name='prioritized_replay_set_priority_py_func')
def tf_get_priority(self, indices):
"""Gets the priorities for the given indices.
Args:
indices: tf.Tensor with dtype int32 and shape [n].
Returns:
priorities: tf.Tensor with dtype float and shape [n], the priorities at
the indices.
"""
return tf.py_func(
self.memory.get_priority, [indices],
tf.float32,
name='prioritized_replay_get_priority_py_func')
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logged Replay Buffer."""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import pickle
from dopamine.replay_memory import circular_replay_buffer
import gin
import numpy as np
import tensorflow.compat.v1 as tf
STORE_FILENAME_PREFIX = circular_replay_buffer.STORE_FILENAME_PREFIX
class OutOfGraphLoggedReplayBuffer(
circular_replay_buffer.OutOfGraphReplayBuffer):
"""Logs the replay buffer to disk everytime it's full."""
def __init__(self, log_dir, *args, **kwargs):
super(OutOfGraphLoggedReplayBuffer, self).__init__(*args, **kwargs)
self._log_count = 0
self._log_dir = log_dir
tf.gfile.MakeDirs(self._log_dir)
def add(self, observation, action, reward, terminal, *args):
super(OutOfGraphLoggedReplayBuffer, self).add(
observation, action, reward, terminal, *args)
# Log the replay buffer to a file in self._log_dir if the replay buffer
# is full.
cur_size = self.add_count % self._replay_capacity
if cur_size == self._replay_capacity - 1:
self._log_buffer()
self._log_count += 1
def load(self, checkpoint_dir, suffix):
super(OutOfGraphLoggedReplayBuffer, self).load(checkpoint_dir, suffix)
self._log_count = self.add_count // self._replay_capacity
def _log_buffer(self):
"""This method will save all the replay buffer's state in a single file."""
checkpointable_elements = self._return_checkpointable_elements()
for attr in checkpointable_elements:
filename = self._generate_filename(self._log_dir, attr, self._log_count)
with tf.gfile.Open(filename, 'wb') as f:
with gzip.GzipFile(fileobj=f) as outfile:
# Checkpoint the np arrays in self._store with np.save instead of
# pickling the dictionary is critical for file size and performance.
# STORE_FILENAME_PREFIX indicates that the variable is contained in
# self._store.
if attr.startswith(STORE_FILENAME_PREFIX):
array_name = attr[len(STORE_FILENAME_PREFIX):]
np.save(outfile, self._store[array_name], allow_pickle=False)
# Some numpy arrays might not be part of storage
elif isinstance(self.__dict__[attr], np.ndarray):
np.save(outfile, self.__dict__[attr], allow_pickle=False)
else:
pickle.dump(self.__dict__[attr], outfile)
tf.logging.info('Replay buffer logged to ckpt {number} in {dir}'.format(
number=self._log_count, dir=self._log_dir))
def log_final_buffer(self):
"""Logs the replay buffer at the end of training."""
add_count = self.add_count
self.add_count = np.array(self.cursor())
self._log_buffer()
self._log_count += 1
self.add_count = add_count
@gin.configurable(denylist=['observation_shape', 'stack_size',
'update_horizon', 'gamma'])
class WrappedLoggedReplayBuffer(circular_replay_buffer.WrappedReplayBuffer):
"""Wrapper of OutOfGraphLoggedReplayBuffer with an in graph sampling mechanism."""
def __init__(self,
log_dir,
observation_shape,
stack_size,
use_staging=True,
replay_capacity=1000000,
batch_size=32,
update_horizon=1,
gamma=0.99,
wrapped_memory=None,
max_sample_attempts=1000,
extra_storage_types=None,
observation_dtype=np.uint8,
action_shape=(),
action_dtype=np.int32,
reward_shape=(),
reward_dtype=np.float32):
"""Initializes WrappedLoggedReplayBuffer."""
memory = OutOfGraphLoggedReplayBuffer(
log_dir, observation_shape, stack_size, replay_capacity, batch_size,
update_horizon, gamma, max_sample_attempts,
extra_storage_types=extra_storage_types,
observation_dtype=observation_dtype)
super(WrappedLoggedReplayBuffer, self).__init__(
observation_shape,
stack_size,
use_staging=use_staging,
replay_capacity=replay_capacity,
batch_size=batch_size,
update_horizon=update_horizon,
gamma=gamma,
wrapped_memory=memory,
max_sample_attempts=max_sample_attempts,
extra_storage_types=extra_storage_types,
observation_dtype=observation_dtype,
action_shape=action_shape,
action_dtype=action_dtype,
reward_shape=reward_shape,
reward_dtype=reward_dtype)
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""The entry point for running experiments with fixed replay datasets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import os
from absl import app
from absl import flags
from batch_rl.fixed_replay import run_experiment
from batch_rl.fixed_replay.agents import dqn_agent
from batch_rl.fixed_replay.agents import multi_head_dqn_agent
from batch_rl.fixed_replay.agents import quantile_agent
from batch_rl.fixed_replay.agents import rainbow_agent
from dopamine.discrete_domains import run_experiment as base_run_experiment
from dopamine.discrete_domains import train as base_train # pylint: disable=unused-import
import tensorflow.compat.v1 as tf
flags.DEFINE_string('agent_name', 'dqn', 'Name of the agent.')
flags.DEFINE_string('replay_dir', None, 'Directory from which to load the '
'replay data')
flags.DEFINE_string('init_checkpoint_dir', None, 'Directory from which to load '
'the initial checkpoint before training starts.')
FLAGS = flags.FLAGS
def create_agent(sess, environment, replay_data_dir, summary_writer=None):
"""Creates a DQN agent.
Args:
sess: A `tf.Session`object for running associated ops.
environment: An Atari 2600 environment.
replay_data_dir: Directory to which log the replay buffers periodically.
summary_writer: A Tensorflow summary writer to pass to the agent
for in-agent training statistics in Tensorboard.
Returns:
A DQN agent with metrics.
"""
if FLAGS.agent_name == 'dqn':
agent = dqn_agent.FixedReplayDQNAgent
elif FLAGS.agent_name == 'c51':
agent = rainbow_agent.FixedReplayRainbowAgent
elif FLAGS.agent_name == 'quantile':
agent = quantile_agent.FixedReplayQuantileAgent
elif FLAGS.agent_name == 'multi_head_dqn':
agent = multi_head_dqn_agent.FixedReplayMultiHeadDQNAgent
else:
raise ValueError('{} is not a valid agent name'.format(FLAGS.agent_name))
return agent(sess, num_actions=environment.action_space.n,
replay_data_dir=replay_data_dir, summary_writer=summary_writer,
init_checkpoint_dir=FLAGS.init_checkpoint_dir)
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
base_run_experiment.load_gin_configs(FLAGS.gin_files, FLAGS.gin_bindings)
replay_data_dir = os.path.join(FLAGS.replay_dir, 'replay_logs')
create_agent_fn = functools.partial(
create_agent, replay_data_dir=replay_data_dir)
runner = run_experiment.FixedReplayRunner(FLAGS.base_dir, create_agent_fn)
runner.run_experiment()
if __name__ == '__main__':
flags.mark_flag_as_required('replay_dir')
flags.mark_flag_as_required('base_dir')
app.run(main)
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runner for experiments with a fixed replay buffer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from dopamine.discrete_domains import checkpointer
from dopamine.discrete_domains import iteration_statistics
from dopamine.discrete_domains import run_experiment
import gin
import tensorflow.compat.v1 as tf
@gin.configurable
class FixedReplayRunner(run_experiment.Runner):
"""Object that handles running Dopamine experiments with fixed replay buffer."""
def _initialize_checkpointer_and_maybe_resume(self, checkpoint_file_prefix):
super(FixedReplayRunner, self)._initialize_checkpointer_and_maybe_resume(
checkpoint_file_prefix)
# Code for the loading a checkpoint at initialization
init_checkpoint_dir = self._agent._init_checkpoint_dir # pylint: disable=protected-access
if (self._start_iteration == 0) and (init_checkpoint_dir is not None):
if checkpointer.get_latest_checkpoint_number(self._checkpoint_dir) < 0:
# No checkpoint loaded yet, read init_checkpoint_dir
init_checkpointer = checkpointer.Checkpointer(
init_checkpoint_dir, checkpoint_file_prefix)
latest_init_checkpoint = checkpointer.get_latest_checkpoint_number(
init_checkpoint_dir)
if latest_init_checkpoint >= 0:
experiment_data = init_checkpointer.load_checkpoint(
latest_init_checkpoint)
if self._agent.unbundle(
init_checkpoint_dir, latest_init_checkpoint, experiment_data):
if experiment_data is not None:
assert 'logs' in experiment_data
assert 'current_iteration' in experiment_data
self._logger.data = experiment_data['logs']
self._start_iteration = experiment_data['current_iteration'] + 1
tf.logging.info(
'Reloaded checkpoint from %s and will start from iteration %d',
init_checkpoint_dir, self._start_iteration)
def _run_train_phase(self):
"""Run training phase."""
self._agent.eval_mode = False
start_time = time.time()
for _ in range(self._training_steps):
self._agent._train_step() # pylint: disable=protected-access
time_delta = time.time() - start_time
tf.logging.info('Average training steps per second: %.2f',
self._training_steps / time_delta)
def _run_one_iteration(self, iteration):
"""Runs one iteration of agent/environment interaction."""
statistics = iteration_statistics.IterationStatistics()
tf.logging.info('Starting iteration %d', iteration)
# pylint: disable=protected-access
if not self._agent._replay_suffix:
# Reload the replay buffer
self._agent._replay.memory.reload_buffer(num_buffers=5)
# pylint: enable=protected-access
self._run_train_phase()
num_episodes_eval, average_reward_eval = self._run_eval_phase(statistics)
self._save_tensorboard_summaries(
iteration, num_episodes_eval, average_reward_eval)
return statistics.data_lists
def _save_tensorboard_summaries(self, iteration,
num_episodes_eval,
average_reward_eval):
"""Save statistics as tensorboard summaries.
Args:
iteration: int, The current iteration number.
num_episodes_eval: int, number of evaluation episodes run.
average_reward_eval: float, The average evaluation reward.
"""
summary = tf.Summary(value=[
tf.Summary.Value(tag='Eval/NumEpisodes',
simple_value=num_episodes_eval),
tf.Summary.Value(tag='Eval/AverageReturns',
simple_value=average_reward_eval)
])
self._summary_writer.add_summary(summary, iteration)
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi Head DQN agent with fixed replay buffer(s)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from batch_rl.fixed_replay.replay_memory import fixed_replay_buffer
from batch_rl.multi_head import multi_head_dqn_agent
import gin
import tensorflow.compat.v1 as tf
@gin.configurable
class FixedReplayMultiHeadDQNAgent(multi_head_dqn_agent.MultiHeadDQNAgent):
"""MultiHeadDQNAgent with fixed replay buffer(s)."""
def __init__(self, sess, num_actions, replay_data_dir, replay_suffix=None,
**kwargs):
"""Initializes the agent and constructs the components of its graph.
Args:
sess: tf.Session, for executing ops.
num_actions: int, number of actions the agent can take at any state.
replay_data_dir: str, log Directory from which to load the replay buffer.
replay_suffix: int, If not None, then only load the replay buffer
corresponding to the specific suffix in data directory.
**kwargs: Arbitrary keyword arguments.
"""
assert replay_data_dir is not None
tf.logging.info(
'Creating FixedReplayMultiHeadDQNAgent with replay directory: %s',
replay_data_dir)
tf.logging.info('\t replay_suffix %s', replay_suffix)
# Set replay_log_dir before calling parent's initializer
self._replay_data_dir = replay_data_dir
self._replay_suffix = replay_suffix
super(FixedReplayMultiHeadDQNAgent, self).__init__(
sess, num_actions, **kwargs)
def step(self, reward, observation):
"""Records the most recent transition and returns the agent's next action.
Args:
reward: float, the reward received from the agent's most recent action.
observation: numpy array, the most recent observation.
Returns:
int, the selected action.
"""
self._record_observation(observation)
self.action = self._select_action()
return self.action
def end_episode(self, reward):
assert self.eval_mode, 'Eval mode is not set to be True.'
super(FixedReplayMultiHeadDQNAgent, self).end_episode(reward)
def _build_replay_buffer(self, use_staging):
"""Creates the replay buffer used by the agent."""
return fixed_replay_buffer.WrappedFixedReplayBuffer(
data_dir=self._replay_data_dir,
replay_suffix=self._replay_suffix,
observation_shape=self.observation_shape,
stack_size=self.stack_size,
use_staging=use_staging,
update_horizon=self.update_horizon,
gamma=self.gamma,
observation_dtype=self.observation_dtype.as_numpy_dtype)
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Multi Network DQN agent with fixed replay buffer(s)."""
from batch_rl.fixed_replay.replay_memory import fixed_replay_buffer
from batch_rl.multi_head import multi_network_dqn_agent
import gin
import tensorflow.compat.v1 as tf
@gin.configurable
class FixedReplayMultiNetworkDQNAgent(
multi_network_dqn_agent.MultiNetworkDQNAgent):
"""MultiNetworkDQNAgent with fixed replay buffer(s)."""
def __init__(self, sess, num_actions, replay_data_dir, replay_suffix=None,
**kwargs):
"""Initializes the agent and constructs the components of its graph.
Args:
sess: tf.Session, for executing ops.
num_actions: int, number of actions the agent can take at any state.
replay_data_dir: str, log Directory from which to load the replay buffer.
replay_suffix: int, If not None, then only load the replay buffer
corresponding to the specific suffix in data directory.
**kwargs: Arbitrary keyword arguments.
"""
assert replay_data_dir is not None
tf.logging.info(
'Creating FixedReplayMultiNetworkDQNAgent with replay directory: %s',
replay_data_dir)
tf.logging.info('\t replay_suffix %s', replay_suffix)
# Set replay_log_dir before calling parent's initializer
self._replay_data_dir = replay_data_dir
self._replay_suffix = replay_suffix
super(FixedReplayMultiNetworkDQNAgent, self).__init__(
sess, num_actions, **kwargs)
def step(self, reward, observation):
"""Records the most recent transition and returns the agent's next action.
Args:
reward: float, the reward received from the agent's most recent action.
observation: numpy array, the most recent observation.
Returns:
int, the selected action.
"""
self._record_observation(observation)
self.action = self._select_action()
return self.action
def end_episode(self, reward):
assert self.eval_mode, 'Eval mode is not set to be True.'
super(FixedReplayMultiNetworkDQNAgent, self).end_episode(reward)
def _build_replay_buffer(self, use_staging):
"""Creates the replay buffer used by the agent."""
return fixed_replay_buffer.WrappedFixedReplayBuffer(
data_dir=self._replay_data_dir,
replay_suffix=self._replay_suffix,
observation_shape=self.observation_shape,
stack_size=self.stack_size,
use_staging=use_staging,
update_horizon=self.update_horizon,
gamma=self.gamma,
observation_dtype=self.observation_dtype.as_numpy_dtype)
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Quantile Regression agent (QR-DQN) with fixed replay buffer(s)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from batch_rl.fixed_replay.replay_memory import fixed_replay_buffer
from batch_rl.multi_head import quantile_agent
import gin
import tensorflow.compat.v1 as tf
@gin.configurable
class FixedReplayQuantileAgent(quantile_agent.QuantileAgent):
"""An implementation of the DQN agent with fixed replay buffer(s)."""
def __init__(self, sess, num_actions, replay_data_dir, replay_suffix=None,
init_checkpoint_dir=None, **kwargs):
"""Initializes the agent and constructs the components of its graph.
Args:
sess: tf.Session, for executing ops.
num_actions: int, number of actions the agent can take at any state.
replay_data_dir: str, log Directory from which to load the replay buffer.
replay_suffix: int, If not None, then only load the replay buffer
corresponding to the specific suffix in data directory.
init_checkpoint_dir: str, directory from which initial checkpoint before
training is loaded if there doesn't exist any checkpoint in the current
agent directory. If None, no initial checkpoint is loaded.
**kwargs: Arbitrary keyword arguments.
"""
assert replay_data_dir is not None
# Set replay_log_dir before calling parent's initializer
tf.logging.info(
'Creating FixedReplayAgent with replay directory: %s', replay_data_dir)
tf.logging.info('\t init_checkpoint_dir: %s', init_checkpoint_dir)
tf.logging.info('\t replay_suffix %s', replay_suffix)
self._replay_data_dir = replay_data_dir
self._replay_suffix = replay_suffix
if init_checkpoint_dir is not None:
self._init_checkpoint_dir = os.path.join(
init_checkpoint_dir, 'checkpoints')
else:
self._init_checkpoint_dir = None
super(FixedReplayQuantileAgent, self).__init__(
sess, num_actions, **kwargs)
def step(self, reward, observation):
"""Records the most recent transition and returns the agent's next action.
Args:
reward: float, the reward received from the agent's most recent action.
observation: numpy array, the most recent observation.
Returns:
int, the selected action.
"""
self._record_observation(observation)
self.action = self._select_action()
return self.action
def end_episode(self, reward):
assert self.eval_mode, 'Eval mode is not set to be True.'
super(FixedReplayQuantileAgent, self).end_episode(reward)
def _build_replay_buffer(self, use_staging):
"""Creates the replay buffer used by the agent."""
return fixed_replay_buffer.WrappedFixedReplayBuffer(
data_dir=self._replay_data_dir,
replay_suffix=self._replay_suffix,
observation_shape=self.observation_shape,
stack_size=self.stack_size,
use_staging=use_staging,
update_horizon=self.update_horizon,
gamma=self.gamma,
observation_dtype=self.observation_dtype.as_numpy_dtype)
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN agent with fixed replay buffer(s)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from batch_rl.fixed_replay.replay_memory import fixed_replay_buffer
from dopamine.agents.dqn import dqn_agent
import gin
import tensorflow.compat.v1 as tf
@gin.configurable
class FixedReplayDQNAgent(dqn_agent.DQNAgent):
"""An implementation of the DQN agent with fixed replay buffer(s)."""
def __init__(self, sess, num_actions, replay_data_dir, replay_suffix=None,
init_checkpoint_dir=None, **kwargs):
"""Initializes the agent and constructs the components of its graph.
Args:
sess: tf.Session, for executing ops.
num_actions: int, number of actions the agent can take at any state.
replay_data_dir: str, log Directory from which to load the replay buffer.
replay_suffix: int, If not None, then only load the replay buffer
corresponding to the specific suffix in data directory.
init_checkpoint_dir: str, directory from which initial checkpoint before
training is loaded if there doesn't exist any checkpoint in the current
agent directory. If None, no initial checkpoint is loaded.
**kwargs: Arbitrary keyword arguments.
"""
assert replay_data_dir is not None
tf.logging.info(
'Creating FixedReplayAgent with replay directory: %s', replay_data_dir)
tf.logging.info('\t init_checkpoint_dir %s', init_checkpoint_dir)
tf.logging.info('\t replay_suffix %s', replay_suffix)
# Set replay_log_dir before calling parent's initializer
self._replay_data_dir = replay_data_dir
self._replay_suffix = replay_suffix
if init_checkpoint_dir is not None:
self._init_checkpoint_dir = os.path.join(
init_checkpoint_dir, 'checkpoints')
else:
self._init_checkpoint_dir = None
super(FixedReplayDQNAgent, self).__init__(sess, num_actions, **kwargs)
def step(self, reward, observation):
"""Records the most recent transition and returns the agent's next action.
Args:
reward: float, the reward received from the agent's most recent action.
observation: numpy array, the most recent observation.
Returns:
int, the selected action.
"""
self._record_observation(observation)
self.action = self._select_action()
return self.action
def end_episode(self, reward):
assert self.eval_mode, 'Eval mode is not set to be True.'
super(FixedReplayDQNAgent, self).end_episode(reward)
def _build_replay_buffer(self, use_staging):
"""Creates the replay buffer used by the agent."""
return fixed_replay_buffer.WrappedFixedReplayBuffer(
data_dir=self._replay_data_dir,
replay_suffix=self._replay_suffix,
observation_shape=self.observation_shape,
stack_size=self.stack_size,
use_staging=use_staging,
update_horizon=self.update_horizon,
gamma=self.gamma,
observation_dtype=self.observation_dtype.as_numpy_dtype)
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""C51 agent with fixed replay buffer(s)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from batch_rl.fixed_replay.replay_memory import fixed_replay_buffer
from dopamine.agents.rainbow import rainbow_agent
import gin
import tensorflow.compat.v1 as tf
@gin.configurable
class FixedReplayRainbowAgent(rainbow_agent.RainbowAgent):
"""An implementation of the DQN agent with fixed replay buffer(s)."""
def __init__(self, sess, num_actions, replay_data_dir, replay_suffix=None,
init_checkpoint_dir=None, **kwargs):
"""Initializes the agent and constructs the components of its graph.
Args:
sess: tf.Session, for executing ops.
num_actions: int, number of actions the agent can take at any state.
replay_data_dir: str, log Directory from which to load the replay buffer.
replay_suffix: int, If not None, then only load the replay buffer
corresponding to the specific suffix in data directory.
init_checkpoint_dir: str, directory from which initial checkpoint before
training is loaded if there doesn't exist any checkpoint in the current
agent directory. If None, no initial checkpoint is loaded.
**kwargs: Arbitrary keyword arguments.
"""
assert replay_data_dir is not None
tf.logging.info(
'Creating FixedReplayAgent with replay directory: %s', replay_data_dir)
tf.logging.info('\t init_checkpoint_dir %s', init_checkpoint_dir)
tf.logging.info('\t replay_suffix %s', replay_suffix)
# Set replay_log_dir before calling parent's initializer
self._replay_data_dir = replay_data_dir
self._replay_suffix = replay_suffix
if init_checkpoint_dir is not None:
self._init_checkpoint_dir = os.path.join(
init_checkpoint_dir, 'checkpoints')
else:
self._init_checkpoint_dir = None
super(FixedReplayRainbowAgent, self).__init__(sess, num_actions, **kwargs)
def step(self, reward, observation):
"""Records the most recent transition and returns the agent's next action.
Args:
reward: float, the reward received from the agent's most recent action.
observation: numpy array, the most recent observation.
Returns:
int, the selected action.
"""
self._record_observation(observation)
self.action = self._select_action()
return self.action
def end_episode(self, reward):
assert self.eval_mode, 'Eval mode is not set to be True.'
super(FixedReplayRainbowAgent, self).end_episode(reward)
def _build_replay_buffer(self, use_staging):
"""Creates the replay buffer used by the agent."""
return fixed_replay_buffer.WrappedFixedReplayBuffer(
data_dir=self._replay_data_dir,
replay_suffix=self._replay_suffix,
observation_shape=self.observation_shape,
stack_size=self.stack_size,
use_staging=use_staging,
update_horizon=self.update_horizon,
gamma=self.gamma,
observation_dtype=self.observation_dtype.as_numpy_dtype)
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logged Replay Buffer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from concurrent import futures
from absl import logging
from dopamine.replay_memory import circular_replay_buffer
import gin
import numpy as np
import tensorflow.compat.v1 as tf
gfile = tf.gfile
STORE_FILENAME_PREFIX = circular_replay_buffer.STORE_FILENAME_PREFIX
class FixedReplayBuffer(object):
"""Object composed of a list of OutofGraphReplayBuffers."""
def __init__(self,
data_dir,
replay_suffix,
*args,
replay_file_start_index=0,
replay_file_end_index=None,
**kwargs): # pylint: disable=keyword-arg-before-vararg
"""Initialize the FixedReplayBuffer class.
Args:
data_dir: str, log Directory from which to load the replay buffer.
replay_suffix: int, If not None, then only load the replay buffer
corresponding to the specific suffix in data directory.
*args: Arbitrary extra arguments.
replay_file_start_index: int, Starting index of the replay buffer to use.
replay_file_end_index: int, End index of the replay buffer to use.
**kwargs: Arbitrary keyword arguments.
"""
self._args = args
self._kwargs = kwargs
self._data_dir = data_dir
self._loaded_buffers = False
self.add_count = np.array(0)
self._replay_suffix = replay_suffix
self._replay_indices = self._get_checkpoint_suffixes(
replay_file_start_index, replay_file_end_index)
while not self._loaded_buffers:
if replay_suffix:
assert replay_suffix >= 0, 'Please pass a non-negative replay suffix'
self.load_single_buffer(replay_suffix)
else:
self._load_replay_buffers(num_buffers=1)
def load_single_buffer(self, suffix):
"""Load a single replay buffer."""
replay_buffer = self._load_buffer(suffix)
if replay_buffer is not None:
self._replay_buffers = [replay_buffer]
self.add_count = replay_buffer.add_count
self._num_replay_buffers = 1
self._loaded_buffers = True
def _load_buffer(self, suffix):
"""Loads a OutOfGraphReplayBuffer replay buffer."""
try:
# pytype: disable=attribute-error
logging.info(
'Starting to load from ckpt %s from %s', suffix, self._data_dir)
replay_buffer = circular_replay_buffer.OutOfGraphReplayBuffer(
*self._args, **self._kwargs)
replay_buffer.load(self._data_dir, suffix)
# pylint:disable=protected-access
replay_capacity = replay_buffer._replay_capacity
logging.info('Capacity: %d', replay_buffer._replay_capacity)
for name, array in replay_buffer._store.items():
# This frees unused RAM if replay_capacity is smaller than 1M
replay_buffer._store[name] = array[:replay_capacity + 4].copy()
logging.info('%s: %s', name, array.shape)
logging.info('Loaded replay buffer ckpt %s from %s',
suffix, self._data_dir)
# pylint:enable=protected-access
# pytype: enable=attribute-error
return replay_buffer
except tf.errors.NotFoundError:
return None
def _get_checkpoint_suffixes(self, replay_file_start_index,
replay_file_end_index):
"""Get replay buffer indices to be be sampled among all replay buffers."""
ckpts = gfile.ListDirectory(self._data_dir) # pytype: disable=attribute-error
# Assumes that the checkpoints are saved in a format CKPT_NAME.{SUFFIX}.gz
ckpt_counters = collections.Counter(
[name.split('.')[-2] for name in ckpts if name.endswith('gz')])
# Should contain the files for add_count, action, observation, reward,
# terminal and invalid_range
ckpt_suffixes = [
int(x) for x in ckpt_counters if ckpt_counters[x] in [6, 7]]
# Sort the replay buffer indices. This would correspond to list of indices
# ranging from [0, 1, 2, ..]
ckpt_suffixes = sorted(ckpt_suffixes)
if replay_file_end_index is None:
replay_file_end_index = len(ckpt_suffixes)
replay_indices = ckpt_suffixes[
replay_file_start_index:replay_file_end_index]
logging.info('Replay indices: %s', str(replay_indices))
if len(replay_indices) == 1:
self._replay_suffix = replay_indices[0]
return replay_indices
def _load_replay_buffers(self, num_buffers):
"""Loads multiple checkpoints into a list of replay buffers."""
if not self._loaded_buffers: # pytype: disable=attribute-error
ckpt_suffixes = np.random.choice(
self._replay_indices, num_buffers, replace=False)
self._replay_buffers = []
# Load the replay buffers in parallel
with futures.ThreadPoolExecutor(
max_workers=num_buffers) as thread_pool_executor:
replay_futures = [thread_pool_executor.submit(
self._load_buffer, suffix) for suffix in ckpt_suffixes]
for f in replay_futures:
replay_buffer = f.result()
if replay_buffer is not None:
self._replay_buffers.append(replay_buffer)
self.add_count = max(replay_buffer.add_count, self.add_count)
self._num_replay_buffers = len(self._replay_buffers)
if self._num_replay_buffers:
self._loaded_buffers = True
def get_transition_elements(self):
return self._replay_buffers[0].get_transition_elements()
def sample_transition_batch(self, batch_size=None, indices=None):
buffer_index = np.random.randint(self._num_replay_buffers)
return self._replay_buffers[buffer_index].sample_transition_batch(
batch_size=batch_size, indices=indices)
def load(self, *args, **kwargs): # pylint: disable=unused-argument
pass
def reload_buffer(self, num_buffers):
if not self._replay_suffix:
self._loaded_buffers = False
self._load_replay_buffers(num_buffers)
def save(self, *args, **kwargs): # pylint: disable=unused-argument
pass
def add(self, *args, **kwargs): # pylint: disable=unused-argument
pass
@gin.configurable(
denylist=['observation_shape', 'stack_size', 'update_horizon', 'gamma'])
class WrappedFixedReplayBuffer(circular_replay_buffer.WrappedReplayBuffer):
"""Wrapper of OutOfGraphReplayBuffer with an in graph sampling mechanism."""
def __init__(self,
data_dir,
replay_suffix,
observation_shape,
stack_size,
use_staging=True,
replay_capacity=1000000,
batch_size=32,
update_horizon=1,
gamma=0.99,
wrapped_memory=None,
max_sample_attempts=1000,
extra_storage_types=None,
observation_dtype=np.uint8,
action_shape=(),
action_dtype=np.int32,
reward_shape=(),
reward_dtype=np.float32):
"""Initializes WrappedFixedReplayBuffer."""
memory = FixedReplayBuffer(
data_dir, replay_suffix, observation_shape, stack_size, replay_capacity,
batch_size, update_horizon, gamma, max_sample_attempts,
extra_storage_types=extra_storage_types,
observation_dtype=observation_dtype)
super(WrappedFixedReplayBuffer, self).__init__(
observation_shape,
stack_size,
use_staging=use_staging,
replay_capacity=replay_capacity,
batch_size=batch_size,
update_horizon=update_horizon,
gamma=gamma,
wrapped_memory=memory,
max_sample_attempts=max_sample_attempts,
extra_storage_types=extra_storage_types,
observation_dtype=observation_dtype,
action_shape=action_shape,
action_dtype=action_dtype,
reward_shape=reward_shape,
reward_dtype=reward_dtype)
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""The entry point for running experiments.
"""
from absl import app
from absl import flags
from batch_rl.multi_head import multi_network_dqn_agent
from batch_rl.multi_head import quantile_agent
from dopamine.agents.dqn import dqn_agent
from dopamine.agents.rainbow import rainbow_agent
from dopamine.discrete_domains import run_experiment
import tensorflow.compat.v1 as tf
flags.DEFINE_string('agent_name', 'dqn', 'Name of the agent.')
flags.DEFINE_string('base_dir', None,
'Base directory to host all required sub-directories.')
flags.DEFINE_multi_string(
'gin_files', [], 'List of paths to gin configuration files (e.g.'
'"third_party/py/dopamine/agents/dqn/dqn.gin").')
flags.DEFINE_multi_string(
'gin_bindings', [],
'Gin bindings to override the values set in the config files '
'(e.g. "DQNAgent.epsilon_train=0.1",'
' "create_environment.game_name="Pong"").')
FLAGS = flags.FLAGS
def create_agent(sess, environment, summary_writer=None):
"""Creates an online agent.
Args:
sess: A `tf.Session`object for running associated ops.
environment: An Atari 2600 environment.
summary_writer: A Tensorflow summary writer to pass to the agent
for in-agent training statistics in Tensorboard.
Returns:
A DQN agent with metrics.
"""
if FLAGS.agent_name == 'dqn':
agent = dqn_agent.DQNAgent
elif FLAGS.agent_name == 'c51':
# Gin config ensures that we only run C51 component of Rainbow
agent = rainbow_agent.RainbowAgent
elif FLAGS.agent_name == 'quantile':
agent = quantile_agent.QuantileAgent
elif FLAGS.agent_name == 'rem':
agent = multi_network_dqn_agent.MultiNetworkDQNAgent
else:
raise ValueError('{} is not a valid agent name'.format(FLAGS.agent_name))
return agent(sess, num_actions=environment.action_space.n,
summary_writer=summary_writer)
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
run_experiment.load_gin_configs(FLAGS.gin_files, FLAGS.gin_bindings)
runner = run_experiment.Runner(FLAGS.base_dir, create_agent)
runner.run_experiment()
if __name__ == '__main__':
flags.mark_flag_as_required('base_dir')
app.run(main)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PPLHam2, a probabilistic programming language in style of Edward2.
This module provides two members:
1. Lightly wrapped distributions from scipy.stats. They enable tracing over any
calls to `rvs`;
2. A `make_log_joint_fn` factory function. It takes a PPLHam program and returns
its log joint probability function.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from contextlib import contextmanager
import functools
import inspect
import threading
import numpy as np
from scipy import stats
import six
from . import log_probs as _log_probs
def make_log_joint_fn(model):
"""Takes PPLHam probabilistic program and returns its log joint function.
Args:
model: Python callable which executes the generative process of a
computable probability distribution using PPLham random variables.
Returns:
A log-joint probability function. Its inputs are `model`'s original inputs
and random variables which appear during the program execution. Its output
is a scalar `np.ndarray`.
#### Examples
Below we define Bayesian logistic regression as an PPLHam program, which
represents the model's generative process. We apply `make_log_joint_fn` in
order to alternatively represent the model in terms of its joint probability
function.
```python
import pplham as ph
def model(X):
beta = ph.norm.rvs(loc=0., scale=0.1, size=X.shape[1])
loc = np.einsum('ij,j->i', X, beta)
y = ph.norm.rvs(loc=loc, scale=1.)
return y
log_joint = ph.make_log_joint_fn(model)
X = np.random.normal(size=[3, 2])
beta = np.random.normal(size=[2])
y = np.random.normal(size=[3])
out = log_joint(X, beta, y)
```
One can use kwargs in `log_joint` if `rvs` are given `name` kwargs.
```python
def model(X):
beta = ph.norm.rvs(loc=0., scale=0.1, size=X.shape[1], name="beta")
loc = np.einsum('ij,j->i', X, beta)
y = ph.norm.rvs(loc=loc, scale=1., name="y")
return y
log_joint = ph.make_log_joint_fn(model)
out = log_joint(X, y=y, beta=beta)
```
#### Notes
For implementation, we make several requirements:
1. The `log_probs` module has a supported `log_prob` function for each
random variable choice.
2. A random variable's `rvs` method has the same kwargs as scipy.stats'
`logpmf`/`logpdf` up to `size` and `random_state`.
3. The event outcome is the first argument of the `log_prob` function in the
`log_probs` module.
4. User must use explicit kwargs (no positional arguments) when specifying
`size` and `random_state` in the `rvs` method.
TODO(trandustin): Relax this requirement.
"""
def log_joint_fn(*args, **kwargs):
"""Log-probability of inputs according to a joint probability distribution.
Args:
*args: Positional arguments. They are the model's original inputs and can
alternatively be specified as part of `kwargs`.
**kwargs: Keyword arguments, where for each key-value pair `k` and `v`,
`v` is passed as a `value` to the random variable(s) whose keyword
argument `name` during construction is equal to `k`.
Returns:
Scalar `np.ndarray`, which represents the model's log-probability summed
over all PPLHam random variables and their dimensions.
Raises:
TypeError: If a random variable in the model has no specified value in
`**kwargs`.
"""
log_probs = []
args_counter = []
def interceptor(rv_call, *rv_args, **rv_kwargs):
"""Overrides a random variable's `value` and accumulates its log-prob."""
if len(args) - len(args_counter) > 0:
value = args[len(args_counter)]
args_counter.append(0)
else:
# Set value to keyword argument indexed by `name` (an input tensor).
rv_name = rv_kwargs.get("name")
if rv_name is None:
raise KeyError("Random variable call {} has no name in its arguments."
.format(rv_call.im_class.__name__))
value = kwargs.get(rv_name)
if value is None:
raise LookupError("Keyword argument specifying value for {} is "
"missing.".format(rv_name))
log_prob_fn = getattr(_log_probs, rv_call.im_class.__name__ + "_log_prob")
rv_kwargs.pop("size", None)
rv_kwargs.pop("random_state", None)
rv_kwargs.pop("name", None)
log_prob = log_prob_fn(value, *rv_args, **rv_kwargs)
log_probs.append(log_prob)
return value
args, model_args, model_kwargs = _get_function_inputs(
model, *args, **kwargs)
with interception(interceptor):
model(*model_args, **model_kwargs)
log_prob = sum(log_probs)
return log_prob
return log_joint_fn
def _get_function_inputs(f, *args, **kwargs):
"""Filters inputs to be compatible with function `f`'s signature.
Args:
f: Function according to whose input signature we filter arguments.
*args: Keyword arguments to filter according to `f`.
**kwargs: Keyword arguments to filter according to `f`.
Returns:
New original args, args of f, kwargs of f.
"""
if hasattr(f, "_func"): # functions returned by tf.make_template
argspec = inspect.getargspec(f._func) # pylint: disable=protected-access
else:
argspec = inspect.getargspec(f)
fkwargs = {}
for k, v in six.iteritems(kwargs):
if k in argspec.args:
fkwargs[k] = v
kwargs.pop(k)
num_args = len(argspec.args) - len(fkwargs)
fargs = args[:num_args]
new_args = args[num_args:]
return new_args, fargs, fkwargs
class _InterceptorStack(threading.local):
"""A thread-local stack of interceptors."""
def __init__(self):
super(_InterceptorStack, self).__init__()
self.stack = [lambda f, *args, **kwargs: f(*args, **kwargs)]
_interceptor_stack = _InterceptorStack()
@contextmanager
def interception(interceptor):
"""Python context manager for interception.
Upon entry, an interception context manager pushes an interceptor onto a
thread-local stack. Upon exiting, it pops the interceptor from the stack.
Args:
interceptor: Function which takes a callable `f` and inputs `*args`,
`**kwargs`.
Yields:
None.
"""
try:
_interceptor_stack.stack.append(interceptor)
yield
finally:
_interceptor_stack.stack.pop()
def get_interceptor():
"""Returns the top-most (last) interceptor on the thread's stack.
The bottom-most (first) interceptor in the stack is a function which takes
`f, *args, **kwargs` as input and returns `f(*args, **kwargs)`. It is the
default if no `interception` contexts have been entered.
"""
return _interceptor_stack.stack[-1]
def interceptable(func):
"""Decorator that wraps `func` so that its execution is intercepted.
The wrapper passes `func` to the interceptor for the current thread.
Args:
func: Function to wrap.
Returns:
The decorated function.
"""
@functools.wraps(func)
def func_wrapped(*args, **kwargs):
return get_interceptor()(func, *args, **kwargs)
return func_wrapped
# Automatically generate random variables from scipy.stats. We wrap all
# distributions by registering their `rvs` method as `interceptable`.
#
# A vanilla Edward 2.0-like PPL in SciPy would introduce a RandomVariable
# abstraction: it wraps SciPy frozen distributions and calls `rvs` to associate
# the RandomVariable with a sampled value. SciPy distributions already enable
# parameters as input to `rvs`. Therefore instead of introducing a new
# abstraction, we just wrap `rvs`. This enables the same manipulations.
_globals = globals()
for _name in sorted(dir(stats)):
_candidate = getattr(stats, _name)
if isinstance(_candidate, (stats._multivariate.multi_rv_generic, # pylint: disable=protected-access
stats.rv_continuous,
stats.rv_discrete,
stats.rv_histogram)):
_candidate.rvs = interceptable(_candidate.rvs)
_globals[_name] = _candidate
del _candidate
class categorical_gen(stats._multivariate.multi_rv_generic): # pylint: disable=invalid-name,protected-access
"""Categorical distribution.
Implementation follows `scipy.stats.multinomial_gen`. We build this manually
as scipy.stats does not support a categorical distribution.
"""
def __init__(self, seed=None):
super(categorical_gen, self).__init__(seed)
def __call__(self, p, seed=None):
return categorical_frozen(p, seed)
def _process_parameters(self, p):
p = np.array(p, dtype=np.float64, copy=True)
p[..., -1] = 1. - p[..., :-1].sum(axis=-1)
return p
def rvs(self, p, size=None, random_state=None):
if size != 1:
raise NotImplementedError()
p = self._process_parameters(p)
random_state = self._get_random_state(random_state)
scores = (random_state.uniform(size=p.shape[:-1] + (1,)) -
np.cumsum(p, axis=-1))
scores[scores < 0] = 0
return np.argmin(scores, axis=-1)
categorical = categorical_gen()
categorical.rvs = interceptable(categorical.rvs) # register `rvs` for PPLHam
class categorical_frozen(stats._multivariate.multi_rv_frozen): # pylint: disable=invalid-name,protected-access
def __init__(self, p, seed=None):
self._dist = categorical_gen(seed)
self.p = self._dist._process_parameters(p) # pylint: disable=protected-access
self._dist._process_parameters = lambda p: self.p # pylint: disable=protected-access
def rvs(self, size=1, random_state=None):
return self._dist.rvs(self.p, size, random_state)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for exploiting graphical model structure in random variables,
once it has been identified. This includes efficient log-normalizers for
tree-structured Gaussian and Categorical distributions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import autograd.numpy as np
from autograd.scipy.misc import logsumexp
from .tracers import logdet
### Tree normal log normalizer
def diag(value):
return np.einsum('...i,j,ij->...ij', value, np.ones(value.shape[-1]),
np.eye(value.shape[-1]))
def convert_to_info_form(natparam):
info_natparam = natparam.__class__(**{k: defaultdict(int)
for k in natparam._fields})
for factor, val in natparam.xi_xjtrs.iteritems():
info_natparam.xi_xjtrs[factor] += -val
for factor, val in natparam.xi_times_xjs.iteritems():
info_natparam.xi_xjtrs[factor] += -diag(val)
for factor, val in natparam.xi_xitrs.iteritems():
info_natparam.xi_xitrs[factor] += -2*val
for factor, val in natparam.xi_squareds.iteritems():
info_natparam.xi_xitrs[factor] += -2*diag(val)
for factor, val in natparam.xis.iteritems():
info_natparam.xis[factor] += val
return info_natparam
def make_tree_normal_log_normalizer(elim_order):
def tree_normal_log_normalizer(natparam):
log_normalizer = 0
natparam = convert_to_info_form(natparam)
for node in elim_order:
# Find joint factor node participates in, if it exists.
joint_onehot_xis = [factor for factor in natparam.xi_xjtrs.keys()
if node in factor]
joint_factor = joint_onehot_xis[0] if joint_onehot_xis else None
# Retrieve natural parameters (in information form) of factors node
# participates in.
single_J = natparam.xi_xitrs.pop((node,), 0)
single_h = natparam.xis.pop((node,), np.zeros(single_J.shape[-1]))
joint_J = natparam.xi_xjtrs.pop(joint_factor, 0)
# After marginalizing, accumulate the result into log normalizer.
inv_single_J = np.linalg.inv(single_J)
dim_node = single_J.shape[0]
log_normalizer += 0.5*np.dot(single_h, np.dot(inv_single_J, single_h))
log_normalizer -= 0.5*logdet(single_J) + 0.5*dim_node*np.log(2*np.pi)
# Compute message to other node in joint factor, if it exists.
if joint_factor:
node_pos = joint_factor.index(node)
other_pos = (node_pos + 1) % 2
joint_J = joint_J.transpose((node_pos, other_pos))
msg_h = -np.dot(joint_J.T, np.dot(inv_single_J, single_h))
msg_J = -np.dot(joint_J.T, np.dot(inv_single_J, joint_J))
other_node = joint_factor[other_pos]
natparam.xis[(other_node,)] += msg_h
natparam.xi_xitrs[(other_node,)] += msg_J
return log_normalizer
return tree_normal_log_normalizer
### Tree categorical
def make_tree_categorical_collapser(elim_order, collapse_fun):
def tree_categorical_collapser(natparam_original):
# Make a copy of the natural parameters so we can mutate without outside
# effects.
natparam = natparam_original.__class__(
**{k: defaultdict(int, v) for k, v
in natparam_original._asdict().iteritems()})
# Eliminate nodes.
for node in elim_order:
# Find single and joint factors node participates in, if they exist.
joint_onehot_xis = [factor for factor in natparam.joint_onehot_xis.keys()
if node in factor]
joint_factor = joint_onehot_xis[0] if joint_onehot_xis else ()
# Retrieve natural parameters of factors node participates in.
log_single_param = natparam.single_onehot_xis.pop((node,), 0)
log_joint_param = natparam.joint_onehot_xis.pop(joint_factor, 0)
# Rearrange log_joint_param for broadcasting.
node_pos = joint_factor.index(node) if joint_factor else 0
if joint_factor:
old_axes = tuple(range(log_joint_param.ndim))
new_axes = old_axes[:node_pos] + old_axes[node_pos+1:] + (node_pos,)
log_joint_param = np.transpose(log_joint_param, new_axes)
# Construct new collapsed factor.
collapsed_factor = joint_factor[:node_pos] + joint_factor[node_pos+1:]
log_collapsed_param = collapse_fun(log_single_param + log_joint_param,
axis=-1)
if len(collapsed_factor) <= 1:
original_param = natparam.single_onehot_xis[collapsed_factor]
natparam.single_onehot_xis[collapsed_factor] = (log_collapsed_param +
original_param)
else:
original_param = natparam.joint_onehot_xis[collapsed_factor]
natparam.joint_onehot_xis[collapsed_factor] = (log_collapsed_param +
original_param)
return natparam.single_onehot_xis[()]
return tree_categorical_collapser
make_tree_categorical_log_normalizer =(
lambda elim_order: make_tree_categorical_collapser(elim_order,
collapse_fun=logsumexp))
make_tree_categorical_maximum = (
lambda elim_order: make_tree_categorical_collapser(elim_order,
collapse_fun=np.max))
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import itertools
import operator
import autograd.extend as ag_extend
import autograd.numpy as np
import autograd.numpy.numpy_vspaces as numpy_vspaces
import autograd.tracer as ag_tracer
import funcsigs
from .patterns import (Subtract, Add, Dot, Multiply, Divide, TrueDivide, Node,
Val, Einsum, Str, Choice, Segment, Log, Sum, Tuple,
VSpaceAdd, Any, Power, Scalar, OneHot, Transpose, Inv,
Logdet, AddN, Star)
from .tracers import add_n
from .tracers import logdet
from .tracers import make_dummy
from .tracers import subvals
from .util import split_einsum_formula
from . import matchers
from . import patterns
from . import tracers
_einsum_range = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
_einsum_index_set = frozenset(_einsum_range)
### eager rewrites replace individual functions with constant-folding versions
def is_constant(x):
return not ag_tracer.isbox(x)
def _is_constant_val(x, val):
return is_constant(x) and np.all(x == val)
_is_constant_zero = functools.partial(_is_constant_val, val=0.)
_is_constant_one = functools.partial(_is_constant_val, val=1.)
def _multiply_as_einsum(x, y):
x_arr, y_arr = np.array(x), np.array(y)
new_shape = np.broadcast(x_arr, y_arr).shape
out_formula = _einsum_range[:len(new_shape)]
next_index = iter(_einsum_range[len(new_shape):])
def _make_broadcast_formula(z):
offset = len(new_shape) - len(z.shape)
return ''.join([out_formula[offset + i]
if z.shape[i] == new_shape[offset + i]
else next_index.next()
for i in range(len(z.shape))])
new_formula = '{},{}->{}'.format(_make_broadcast_formula(x_arr),
_make_broadcast_formula(y_arr),
out_formula)
return np.einsum(new_formula, x, y)
def maybe_multiply(x, y):
if _is_constant_zero(x) or _is_constant_zero(y):
return np.zeros(np.broadcast(x, y).shape, dtype=np.result_type(x, y))
if _is_constant_one(x) and np.shape(y) == np.broadcast(x, y).shape:
return y
if _is_constant_one(y) and np.shape(x) == np.broadcast(x, y).shape:
return x
return _multiply_as_einsum(x, y)
def maybe_add(x, y):
if _is_constant_zero(x) and np.shape(y) == np.broadcast(x, y).shape:
return y
if _is_constant_zero(y) and np.shape(x) == np.broadcast(x, y).shape:
return x
return add_n(x, y)
def maybe_subtract(x, y):
if _is_constant_zero(y) and np.shape(x) == np.broadcast(x, y).shape:
return x
return add_n(x, _multiply_as_einsum(-1, y))
def maybe_getitem(x, idx):
if isinstance(idx, slice):
return list(x)[idx]
else:
return x[idx]
def dot_as_einsum(x, y):
if x.ndim == 0 or y.ndim == 0: return np.einsum(',->', x, y)
if x.ndim == y.ndim == 1: return np.einsum('i,i->', x, y)
if x.ndim == 2 and y.ndim == 1: return np.einsum('ij,j->i', x, y)
if x.ndim == 1 and y.ndim == 2: return np.einsum('i,ij->j', x, y)
return np.einsum('{}ab,{}bc->{}ac'.format(
_einsum_range[:x.ndim-2][::-1], _einsum_range[:y.ndim-2][::-1],
_einsum_range[:max([x.ndim, y.ndim])-2][::-1]), x, y)
def maybe_divide(x, y):
if _is_constant_one(y) and np.shape(x) == np.broadcast(x, y).shape:
return x
elif _is_constant_one(x) and np.shape(y) == np.broadcast(x, y).shape:
return y ** -1
return _multiply_as_einsum(x, y ** -1)
# TODO(mhoffman): Consider exponent == 0. E.g., what if base could also be 0?
def maybe_power(base, exponent):
if exponent == 1:
return base
elif exponent == 0:
return 1
elif isinstance(exponent, int) and exponent > 0 and exponent < 10:
formula = ''.join([_einsum_range[i] for i in range(len(base.shape))])
in_formulas = [formula for _ in range(exponent)]
out_formula = formula
formula = _reconstitute_einsum_formula(in_formulas, out_formula)
args = [base for _ in range(exponent)]
return np.einsum(formula, *args)
else:
return base ** exponent
def _rename_formula_indices(formula):
"""Renames einsum formula indices to be in a canonical order."""
# First, ensure that indices are packed.
translation_dict = {index: _einsum_range[i] for i, index in
enumerate(np.unique([index for index in formula
if index in _einsum_index_set]))}
translator = lambda x: translation_dict[x] if x in translation_dict else x
formula = [translator(i) for i in formula]
# Next, ensure that they're alphabetical in order of appearance.
translation_dict = {}
for index in formula:
if index not in translation_dict and index in _einsum_index_set:
translation_dict[index] = _einsum_range[len(translation_dict)]
return ''.join([translator(i) for i in formula])
def debroadcast_formula(formula, *arg_ndims):
"""Given an einsum's formula string and the dimensions of the arguments
provided to the einsum, converts any broadcasting ellipses into appropriate
letters.
"""
formula = _rename_formula_indices(formula)
num_chars = len(_einsum_index_set.intersection(set(formula)))
remaining_letters = _einsum_range[num_chars:]
in_formulas, out_formula = split_einsum_formula(formula)
max_ellipsis_dims = -float('inf')
for i, in_formula in enumerate(in_formulas):
in_formula = decompose_formula(in_formula)
if '...' in in_formula:
num_ellipsis_dims = arg_ndims[i]-len(in_formula)+1
max_ellipsis_dims = max(max_ellipsis_dims, num_ellipsis_dims)
ellipsis_idx = in_formula.index('...')
in_formula[ellipsis_idx] = remaining_letters[:num_ellipsis_dims][::-1]
in_formulas[i] = ''.join(in_formula)
if '...' in out_formula:
out_formula = out_formula.replace(
'...', remaining_letters[:max_ellipsis_dims][::-1])
new_formula = _reconstitute_einsum_formula(in_formulas, out_formula)
return _rename_formula_indices(new_formula)
def _zeros_like_einsum(formula, args1, args2):
args = args1 + args2
input_formulas, output_formula = split_einsum_formula(formula)
output_formula = decompose_formula(output_formula)
input_formulas = input_formulas[:len(args1)] + input_formulas[len(args1)+1:]
input_formulas = [decompose_formula(input_formula) for
input_formula in input_formulas]
out_shape = []
for output_index in output_formula:
for i, input_formula in enumerate(input_formulas):
position = input_formula.index(output_index)
if position != -1 and output_index != '...':
out_shape.append(args[i].shape[position])
break
elif position != -1 and output_index == '...':
for offset in range(args[i].ndim-len(input_formula)+1):
out_shape.append(args[i].shape[position+offset])
return np.zeros(out_shape, dtype=np.result_type(*args))
def maybe_einsum(formula, *args):
formula = debroadcast_formula(formula, *[np.ndim(arg) for arg in args])
if any(_is_constant_zero(arg) for arg in args):
return _zeros_like_einsum(formula, args, ())
if len(args) == 1:
input_formulas, output_formula = split_einsum_formula(formula)
if input_formulas[0] == output_formula:
return args[0]
return constant_folding_einsum(formula, *args)
def maybe_vspace_add(vs, x_prev, x_new):
if x_prev is None:
return x_new
if isinstance(vs, numpy_vspaces.ArrayVSpace):
return maybe_add(x_prev, x_new)
return vs.add(x_prev, x_new)
def swapaxes(x, axis1, axis2):
"""Implements np.swapaxes as an np.einsum."""
in_formula = _einsum_range[:len(x.shape)]
out_formula = list(in_formula)
out_formula[axis1] = in_formula[axis2]
out_formula[axis2] = in_formula[axis1]
return np.einsum('{}->{}'.format(in_formula, ''.join(out_formula)), x)
### rewrite rules replace whole subgraphs with other subgraphs
class Rule(collections.namedtuple('BasicRule',
['pattern', 'rewriter', 'preds'])):
def __new__(cls, pattern, rewriter, preds=()):
return super(Rule, cls).__new__(cls, pattern, rewriter, preds)
_add_pattern = Choice((Add, Val('x'), (Add, Val('y'), Val('z'))),
(Add, (Add, Val('x'), Val('y')), Val('z')))
replace_add = Rule(_add_pattern, lambda x, y, z: add_n(x, y, z))
_add_addn_pattern = Choice((Add, Val('x'), (AddN, Segment('args'))),
(Add, (AddN, Segment('args')), Val('x')))
replace_add_addn = Rule(_add_addn_pattern,
lambda x, args: add_n(x, *args))
_addn_addn_pattern = (AddN,
Segment('args1'),
(AddN, Segment('parent_args')),
Segment('args2'))
replace_addn_addn = Rule(
_addn_addn_pattern,
lambda args1, parent_args, args2: add_n(*(parent_args + args1 + args2)))
def _duplicated_addn(x, args1, args2, args3):
return add_n(2 * x, *(args1 + args2 + args3))
_duplicated_addn_pattern = (AddN,
Segment('args1'),
Val('x'),
Segment('args2'),
Val('x'),
Segment('args3'))
replace_duplicated_addn = Rule(_duplicated_addn_pattern, _duplicated_addn)
# TODO(mattjj): figure out why we want sums as einsums, since not multiplies
_sum_pat = Choice((Sum, Node('x'), Choice(Val('axis'), Tuple('axis'), None)),
(Sum, Node('x')))
def _sum_as_einsum(x, axis=None):
if axis is None:
return np.einsum('{}->'.format(_einsum_range[:x.ndim]), x)
axis = axis if isinstance(axis, (tuple, list)) else [axis]
input_formula = _einsum_range[:x.ndim]
axis = [i % x.ndim for i in axis]
output_formula = ''.join([input_formula[i] for i in range(x.ndim)
if i not in axis])
return np.einsum('{}->{}'.format(input_formula, output_formula), x)
replace_sum = Rule(_sum_pat, _sum_as_einsum)
## move log behind an einsum if the other argument is a onehot
_log_oneh_einsum_pat = (Log,
(Einsum, Str('formula'),
(OneHot, Node('x'), Scalar('depth')),
Val('y')))
def _log_behind_onehot_einsum_pred(formula, x, depth, y):
"""Confirms sum is only over index added by one_hot."""
# TODO(matthewjmackay): broadcasting support might be needed here
if '...' in formula:
return False
in_formulas, out_formula = split_einsum_formula(formula)
oneh_index = in_formulas[0][-1]
other_indices = set([ch for in_formula in in_formulas
for ch in in_formula])
other_indices.remove(oneh_index)
out_indices = set(out_formula)
return other_indices == out_indices
def _log_behind_onehot_einsum(formula, x, depth, y):
return np.einsum(formula, tracers.one_hot(x, depth), np.log(y))
log_behind_onehot_einsum = Rule(_log_oneh_einsum_pat, _log_behind_onehot_einsum,
(_log_behind_onehot_einsum_pred,))
## move log-add behind an einsum if the other argument is a onehot
_log_addn_oneh_einsum_pat = (Log,
(AddN, Val('x'),
(Einsum, Str('formula'), Scalar('scale'),
(OneHot, Node('y'), Scalar('depth')),
Val('z'))))
def _log_addn_behind_onehot_einsum_pred(x, formula, scale, y, depth, z):
"""Confirms sum is only over index added by one_hot"""
# TODO(matthewjmackay): broadcasting support might be needed here
if '...' in formula:
return False
in_formulas, out_formula = split_einsum_formula(formula)
oneh_index = in_formulas[1][-1]
other_indices = set([ch for in_formula in in_formulas
for ch in in_formula])
other_indices.remove(oneh_index)
out_indices = set(out_formula)
return other_indices == out_indices
def _log_addn_behind_onehot_einsum(x, formula, scale, y, depth, z):
in_formulas, out_formula = split_einsum_formula(formula)
in_formulas = in_formulas[1:]
formula = _reconstitute_einsum_formula(in_formulas, out_formula)
return np.einsum(formula,
tracers.one_hot(y, depth),
np.log(add_n(x, scale*z)))
log_addn_behind_onehot_einsum = Rule(_log_addn_oneh_einsum_pat,
_log_addn_behind_onehot_einsum,
(_log_addn_behind_onehot_einsum_pred,))
## canonicalizing einsums
_einsum_distribute_pat = \
(Einsum, Str('formula'),
Segment('args1'),
(AddN('op'), Segment('add_args')),
Segment('args2'))
def _distribute_einsum(formula, op, add_args, args1, args2):
# Make sure any implicit broadcasting isn't lost.
broadcast_shape = np.broadcast(*add_args).shape
dtype = np.result_type(*add_args)
add_args = [arg * np.ones(broadcast_shape, dtype=dtype)
if not hasattr(arg, 'shape') or broadcast_shape != arg.shape
else arg
for arg in add_args]
return op(*[np.einsum(formula, *(args1 + (arg,) + args2))
for arg in add_args])
distribute_einsum = Rule(_einsum_distribute_pat, _distribute_einsum)
_einsum_transpose_pat = \
(Einsum, Str('formula'),
Segment('args1'),
(Transpose, Val('x')),
Segment('args2'))
def _transpose_inside_einsum(formula, args1, x, args2):
in_formulas, out_formula = split_einsum_formula(formula)
i = len(args1)
new_formula = _reconstitute_einsum_formula(
in_formulas[:i] + [in_formulas[i][::-1]] + in_formulas[i+1:],
out_formula)
new_args = args1 + (x,) + args2
return np.einsum(new_formula, *new_args)
transpose_inside_einsum = Rule(_einsum_transpose_pat, _transpose_inside_einsum)
def _remove_list_elements(list_to_thin, indices_to_remove):
return [item for i, item in enumerate(list_to_thin)
if i not in indices_to_remove]
def _remove_einsum_arg(formula, args1, args2):
in_formulas, out_formula = split_einsum_formula(formula)
new_formula = _reconstitute_einsum_formula(
_remove_list_elements(in_formulas, [len(args1)]), out_formula)
return np.einsum(new_formula, *(args1 + args2))
# Matches things like add_n(x*a, x*b) that can be rewritten as x * add_n(a, b).
_gatherable_add_n_einsum_pat = (
AddN, Star((Einsum, Str('formula'),
Segment('args1'), Scalar('x'), Segment('args2')),
accumulate=['formula', 'args1', 'args2']))
def _add_n_remaining_einsums(formula, args1, args2):
return add_n(*[_remove_einsum_arg(formula_i, args1_i, args2_i)
for formula_i, args1_i, args2_i in zip(formula, args1, args2)])
def _gather_log_add_n_einsum(x, formula, args1, args2):
return add_n(np.log(x), np.log(_add_n_remaining_einsums(formula, args1, args2)))
gather_log_add_einsum = Rule((Log, _gatherable_add_n_einsum_pat),
_gather_log_add_n_einsum)
def _gather_pow_add_n_einsum(x, formula, args1, args2, exponent):
return (np.power(x, exponent) *
np.power(_add_n_remaining_einsums(formula, args1, args2), exponent))
gather_pow_add_einsum = Rule(
(Power, _gatherable_add_n_einsum_pat, Scalar('exponent')),
_gather_pow_add_n_einsum)
def _gather_inv_add_einsum(x, formula, args1, args2):
return np.power(x, -1) * np.linalg.inv(_add_n_remaining_einsums(formula, args1, args2))
gather_inv_add_einsum = Rule((Inv, _gatherable_add_n_einsum_pat),
_gather_inv_add_einsum)
def _gather_logdet_add_einsum(x, formula, args1, args2):
new_sum = _add_n_remaining_einsums(formula, args1, args2)
return new_sum.shape[-1] * np.log(x) + logdet(new_sum)
gather_logdet_add_einsum = Rule((Logdet, _gatherable_add_n_einsum_pat),
_gather_logdet_add_einsum)
def _add_powers_within_einsum(formula, x, args1, args2, args3, exponent1,
exponent2):
in_formulas, out_formula = split_einsum_formula(formula)
new_formula = _reconstitute_einsum_formula(
_remove_list_elements(in_formulas, [len(args1) + 1 + len(args2)]),
out_formula)
return np.einsum(new_formula, *(args1 + (x ** (exponent1 + exponent2),)
+ args2 + args3))
def _add_powers_within_einsum_pred(formula, x, args1, args2, args3, exponent1=1,
exponent2=1):
in_formulas, out_formula = split_einsum_formula(formula)
x_indices = [len(args1), len(args1) + 1 + len(args2)]
if in_formulas[x_indices[0]] != in_formulas[x_indices[1]]:
return False
x_index_names = frozenset(in_formulas[x_indices[0]] +
in_formulas[x_indices[1]])
if any([not frozenset(in_formula).isdisjoint(x_index_names)
for i, in_formula in enumerate(in_formulas) if i not in x_indices]):
return False
return True
add_powers_within_einsum = Rule((Einsum, Str('formula'), Segment('args1'),
(Power, Val('x'), Scalar('exponent1')),
Segment('args2'),
(Power, Val('x'), Scalar('exponent2')),
Segment('args3')),
_add_powers_within_einsum,
(_add_powers_within_einsum_pred,))
def _increment_negative_power_in_einsum_r(formula, x, exponent,
args1, args2, args3):
in_formulas, out_formula = split_einsum_formula(formula)
new_formula = _reconstitute_einsum_formula(
in_formulas[:len(args1) + 1 + len(args2)] +
in_formulas[len(args1) + 2 + len(args2):], out_formula)
return np.einsum(new_formula,
*(args1 + (x ** (exponent + 1),) + args2 + args3))
# TODO(mhoffman): Add predicates that make sure formulas match.
increment_negative_power_in_einsum_r = Rule(
(Einsum, Str('formula'), Segment('args1'),
(Power, Node('x'), Scalar('exponent', lambda exponent: exponent < 0)),
Segment('args2'), Node('x'), Segment('args3')),
_increment_negative_power_in_einsum_r)
# TODO(mhoffman): Figure out cleaner way of dealing with commuting args.
def _increment_negative_power_in_einsum_l(formula, x, exponent,
args1, args2, args3):
in_formulas, out_formula = split_einsum_formula(formula)
new_formula = _reconstitute_einsum_formula(
in_formulas[:len(args1)] + in_formulas[len(args1) + 1:], out_formula)
return np.einsum(new_formula,
*(args1 + args2 + (x ** (exponent + 1),) + args3))
# TODO(mhoffman): Add predicates that make sure formulas match.
increment_negative_power_in_einsum_l = Rule(
(Einsum, Str('formula'), Segment('args1'),
Node('x'), Segment('args2'),
(Power, Node('x'), Scalar('exponent', lambda exponent: exponent < 0)),
Segment('args3')),
_increment_negative_power_in_einsum_l)
_einsum_composition_pat = \
(Einsum, Str('formula'),
Segment('args1'),
(Einsum, Str('parent_formula'), Segment('parent_args')),
Segment('args2'))
def decompose_formula(formula):
"""Given a string of indices for an argument to an einsum, returns a list
of the letters used, with '...' treated as an atomic letter.
"""
formula = formula.replace('...', '.')
decomposed = []
for idx in formula:
if idx == '.':
decomposed.append('...')
else:
decomposed.append(idx)
return decomposed
def _compose_einsums(formula, args1, args2, parent_formula, parent_args):
parent_formula = debroadcast_formula(parent_formula,
*[np.ndim(arg) for arg in parent_args])
parent_in_formulas, parent_out_formula = split_einsum_formula(parent_formula)
parent_ndim = len(parent_out_formula)
arg_ndims = ([np.ndim(arg) for arg in args1] +
[parent_ndim] +
[np.ndim(arg) for arg in args2])
formula = debroadcast_formula(formula, *arg_ndims)
in_formulas, out_formula = split_einsum_formula(formula)
i = len(args1)
if len(parent_out_formula) != len(in_formulas[i]):
raise ValueError('Input formula {} and parent formula {} have'
' inconsistent numbers of indexes, broadcasting'
'problem?'.format(in_formulas[i], parent_out_formula))
subs_map = collections.defaultdict(iter(_einsum_range).next)
# splice out the old input formula
old_in_formula = in_formulas[i]
in_formulas = in_formulas[:i] + in_formulas[i+1:]
# canonicalize input and output formulas (optional, for cleanliness)
in_formulas = [''.join(subs_map[idx] for idx in subs) for subs in in_formulas]
out_formula = ''.join(subs_map[idx] for idx in out_formula)
# identify parent output indices with corresponding input indices
subs_map.update((pidx + '_parent', subs_map[idx])
for pidx, idx in zip(parent_out_formula, old_in_formula))
# update the parent input formulas
parent_in_formulas = [''.join(subs_map[idx + '_parent'] for idx in subs)
for subs in parent_in_formulas]
# splice the formula lists and arguments
new_in_formulas = in_formulas[:i] + parent_in_formulas + in_formulas[i:]
new_args = args1 + parent_args + args2
new_formula = _reconstitute_einsum_formula(new_in_formulas, out_formula)
return np.einsum(new_formula, *new_args)
combine_einsum_compositions = Rule(_einsum_composition_pat, _compose_einsums)
def _einsum_repeated_one_hot(formula, x, depth, args1, args2, args3):
in_formulas, out_formula = split_einsum_formula(formula)
new_letter = in_formulas[len(args1)][-1]
old_letter = in_formulas[len(args1) + 1 + len(args2)][-1]
if old_letter in out_formula:
old_letter, new_letter = new_letter, old_letter
in_formulas = in_formulas[:len(args1)] + in_formulas[len(args1) + 1:]
else:
in_formulas = (in_formulas[:len(args1) + 1 + len(args2)] +
in_formulas[len(args1) + 1 + len(args2) + 1:])
for i in range(len(in_formulas)):
in_formulas[i] = in_formulas[i].replace(old_letter, new_letter)
one_hot_x = tracers.one_hot(x, depth)
return np.einsum(_reconstitute_einsum_formula(in_formulas, out_formula),
*(args1 + (one_hot_x,) + args2 + args3))
def _einsum_repeated_one_hot_pred(formula, x, depth, args1, args2, args3):
in_formulas, out_formula = split_einsum_formula(formula)
x_letter_1 = in_formulas[len(args1)][-1]
x_letter_2 = in_formulas[len(args1) + 1 + len(args2)][-1]
return (x_letter_1 != x_letter_2 and
not (x_letter_1 in out_formula and x_letter_2 in out_formula))
einsum_repeated_one_hot = Rule((Einsum, Str('formula'), Segment('args1'),
(OneHot, Val('x'), Scalar('depth')),
Segment('args2'),
(OneHot, Val('x'), Scalar('depth')),
Segment('args3')),
_einsum_repeated_one_hot,
(_einsum_repeated_one_hot_pred,))
def _reconstitute_einsum_formula(input_formulas, output_formula):
return '{}->{}'.format(','.join(input_formulas), output_formula)
## Miscellaneous expansions
def _log_einsum_expand(formula, args):
assert _check_log_einsum(formula)
result = np.log(args[0])
for arg in args[1:]:
result += np.log(arg)
return result
def _check_log_einsum(formula):
input_formulas, output_formula = split_einsum_formula(formula)
unique_input_indexes = set(list(''.join(input_formulas)))
return unique_input_indexes == set(list(output_formula))
replace_log_einsum = Rule((Log, (Einsum, Str('formula', _check_log_einsum),
Segment('args'))),
_log_einsum_expand)
## replacing autograd internal ops
replace_vspace_add = Rule((VSpaceAdd, Any('vs'), Val('x_prev'), Val('x_new')),
lambda vs, x_prev, x_new: x_prev + x_new)
## Miscellaneous simplifications
def constant_folding_einsum(formula, *args):
in_formulas, out_formula = split_einsum_formula(formula)
const_indices = []
node_indices = []
const_letters = set()
node_letters = set()
for i, (in_formula, arg) in enumerate(zip(in_formulas, args)):
if is_constant(arg):
const_indices.append(i)
const_letters.update(in_formula)
else:
node_indices.append(i)
node_letters.update(in_formula)
const_args = []
const_in_formulas = []
indices_to_remove = []
for i in const_indices:
if not node_letters.intersection(in_formulas[i]):
const_args.append(args[i])
const_in_formulas.append(in_formulas[i])
indices_to_remove.append(i)
elif node_letters.issuperset(in_formulas[i]) and np.all(args[i] == 1):
indices_to_remove.append(i)
if not indices_to_remove:
return np.einsum(formula, *args)
folded_constant = 1
if const_args:
const_letters = frozenset(''.join(const_in_formulas))
const_out_formula = ''.join([i for i in out_formula if i in const_letters])
folded_constant = np.einsum('{}->{}'.format(','.join(const_in_formulas),
const_out_formula), *const_args)
if len(indices_to_remove) == len(in_formulas):
return folded_constant
retained_in_formulas = ','.join([in_formulas[i]
for i in range(len(in_formulas))
if i not in indices_to_remove])
retained_args = [arg for i, arg in enumerate(args)
if i not in indices_to_remove]
if np.isscalar(folded_constant) and folded_constant == 0:
return 0.
elif np.isscalar(folded_constant) and folded_constant == 1:
return np.einsum('{}->{}'.format(retained_in_formulas, out_formula),
*retained_args)
else:
return np.einsum('{},{}->{}'.format(const_out_formula,
retained_in_formulas, out_formula),
*([folded_constant] + retained_args))
# TODO(mhoffman): This isn't 100% kosher for negative inputs.
# e.g., (-1 ** 2) ** 1.5 == 1, -1 ** 3 == -1.
fold_power = Rule(
(Power, (Power, Val('base'), Scalar('power1')), Scalar('power2')),
lambda base, power1, power2: maybe_power(base, power1 * power2))
### rewriter functions
def make_rewriter(rule):
"""Given a rewrite Rule, produces an attempt_rewrite function."""
pattern, rewriter, preds = rule
match = matchers.matcher(pattern)
def attempt_rewrite(node):
"""Given a node, attempt to pattern-match it and apply an in-place rewrite.
Args:
node: an ExprNode against which to match the Rule's pattern and, given a
match, apply an in-place rewrite.
Returns:
If the rewrite could not be applied, returns a falsey value. If the
rewrite was successful, return the node (which gets in-place modified).
Side-effects:
If a rewrite was successful then the returned node is modified in-place,
and in particular its parents are changed.
"""
bindings = match(node)
if bindings is not False:
rewriter_env = dict(node.kwargs, **bindings)
if all(pred(**rewriter_env) for pred in preds):
new_expr = run_rewriter(rewriter, rewriter_env)
tracers.replace_node_with_expr(node, new_expr) # modifies node in-place
return node
return False
return attempt_rewrite
def run_rewriter(rewriter, symbolic_env):
"""Runs rewriter on a symbolic environment and returns resulting expression.
Args:
rewriter: a rewriter function to be traced into a new expression.
symbolic_env: a dict of bindings that contains the rewriters' arguments as
keys and can have literals or ExprNodes as values.
Returns:
A new expression built on top of the ExprNodes in env.
"""
# include default argument values in the environment
sig = funcsigs.signature(rewriter)
defaults = {name: param.default for name, param in sig.parameters.items()
if param.default is not param.empty}
symbolic_env = dict(defaults, **symbolic_env)
# trace the rewriter function on dummy values to produce a new subexpression
args = [symbolic_env[name] for name in sig.parameters.keys()]
flat_args, unflatten = _flatten(args)
symbolic_args = ((i, arg) for i, arg in enumerate(flat_args)
if isinstance(arg, tracers.ExprNode))
argnums, argnodes = zip(*symbolic_args)
def _rewriter(*node_vals):
return rewriter(*unflatten(subvals(flat_args, zip(argnums, node_vals))))
node_vals = [tracers.make_dummy(argnode) for argnode in argnodes]
subexpr = tracers.make_expr(_rewriter, *node_vals)
# return the new subexpression evaluated in the symbolic environment
return tracers.inline_expr(subexpr, dict(zip(subexpr.free_vars, argnodes)))
def _flatten(obj):
"""Flatten a potentially-nested list/tuple data structure into a flat list."""
if not isinstance(obj, (list, tuple)):
return [obj], lambda lst: lst[0]
constructor = type(obj)
if not obj: return [], lambda lst: constructor()
sublists, unflattens = zip(*map(_flatten, obj))
lengths = list(map(len, sublists))
starts = np.subtract(np.cumsum(lengths), lengths)
flat_list = [elt for sublist in sublists for elt in sublist]
def unflatten(lst):
sublists = (lst[start:start+l] for start, l in zip(starts, lengths))
return constructor(unflatten(sublist)
for sublist, unflatten in zip(sublists, unflattens))
return flat_list, unflatten
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pattern definitions for use with matcher."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import autograd.extend as ag_extend
import autograd.util as ag_util
from autograd.numpy.numpy_vspaces import ArrayVSpace
import autograd.numpy as np
import autograd.numpy.numpy_boxes as np_boxes
from . import tracers
## util
def _point_free_logical(op):
def make_checker(*predicates):
def check(x):
return op(pred(x) for pred in predicates)
pred_names = (pred.__name__ for pred in predicates)
check.__name__ = '{}({})'.format(op.__name__, ', '.join(pred_names))
return check
return make_checker
_or = _point_free_logical(any)
_and = _point_free_logical(all)
## predicates for testing types of literals and nodes in our graphs
def is_node(x): return isinstance(x, tracers.ExprNode)
def is_array_literal(x): return isinstance(x, np.ndarray)
def is_array_node(x): return is_node(x) and isinstance(x.vs, ArrayVSpace)
is_array = _or(is_array_literal, is_array_node)
def is_scalar_literal(x): return np.isscalar(x)
def is_scalar_node(x):
return is_node(x) and isinstance(x.vs, ArrayVSpace) and x.vs.shape == ()
is_scalar = _or(is_scalar_literal, is_scalar_node)
def is_tuple_literal(x): return isinstance(x, tuple)
is_tuple = is_tuple_literal
def is_list_literal(x): return isinstance(x, list)
is_list = is_list_literal
def is_dict_literal(x): return isinstance(x, dict)
is_dict = is_dict_literal
def is_string_literal(x): return isinstance(x, str)
is_string = is_string_literal
## patterns
def _make_convenience_pattern(*preds):
def make_pattern(name=None, extra_pred=None):
all_preds = preds + (extra_pred,) if extra_pred else preds
return ('?', name, all_preds)
return make_pattern
Any = _make_convenience_pattern()
Array = _make_convenience_pattern(is_array)
Node = _make_convenience_pattern(is_array_node)
Str = _make_convenience_pattern(is_string)
Scalar = _make_convenience_pattern(is_scalar)
Val = _make_convenience_pattern(_or(is_array, is_scalar))
Tuple = _make_convenience_pattern(is_tuple)
List = _make_convenience_pattern(is_list)
Dict = _make_convenience_pattern(is_dict)
# generate a pattern for each Autograd primitive
def _make_primitive_checker(name):
def check_node_name(node):
return is_node(node) and node.fun.__name__ == name
return check_node_name
def _make_primitive_pattern(fun, pattern_name):
def pat_maker(name=None):
return ('?', name,
(_make_primitive_checker(fun.__name__),),
lambda node: node.fun)
pat_maker.__name__ = pattern_name
return pat_maker
def _import_primitives_no_clobber(new, old):
def is_primitive(fun): return callable(fun) and hasattr(fun, 'fun')
for name, obj in old.items():
titlecase_name = ''.join(word.title() for word in name.split('_'))
if is_primitive(obj) and titlecase_name not in new:
new[titlecase_name] = _make_primitive_pattern(obj, titlecase_name)
_import_primitives_no_clobber(globals(), np.__dict__)
_import_primitives_no_clobber(globals(), np.linalg.__dict__)
_import_primitives_no_clobber(globals(), np_boxes.ArrayBox.__dict__)
_import_primitives_no_clobber(globals(), {'add_n': tracers.add_n})
_import_primitives_no_clobber(globals(), {'logdet': tracers.logdet})
_import_primitives_no_clobber(globals(), {'one_hot': tracers.one_hot})
EnvLookup = _make_primitive_pattern(tracers.env_lookup, 'EnvLookup')
## patterns for Autograd internals
def _is_vspace_add(node):
return (node.fun is ag_util.func(ag_extend.VSpace.add) or
node.fun is ag_util.func(ag_extend.VSpace.mut_add))
def VSpaceAdd(name=None):
return ('?', name, (_is_vspace_add,))
## convenience combinators that operate on patterns
def Choice(*alternatives): return ('?:choice',) + alternatives
def List(*list_elements): return ('List',) + list_elements
def Segment(name=None): return Star(Any, name)
def Star(pat, name=None, accumulate=[]): return ('??', name, pat, accumulate)
def Not(pattern): return ('?:not', pattern)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Log probability functions.
It complements the collection of log-normalizers in `exponential_families.py`.
The API follows scipy.stats. Each log probability function's name is the name
of its associated distribution class followed by "_log_prob". All functions
have the same arguments (and order of arguments) as their associated SciPy
`logpdf` and `logpmf`.
Unlike SciPy log probs, these functions return a scalar value, that is, they
sum across all dimensions. We make this simplification for easier
canonicalization as we don't need to deal with a downstream reduce sum.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import autograd.numpy as np
from autograd.scipy import special
from .tracers import one_hot
def categorical_gen_log_prob(x, p):
"""Log-probability of `categorical_gen` in PPLHam."""
x_one_hot = one_hot(x, len(p))
log_prob = np.sum(x_one_hot * np.log(p))
return log_prob
def dirichlet_gen_log_prob(x, alpha):
"""Log-probability of `dirichlet_gen` in scipy.stats."""
log_prob = np.sum((alpha - 1) * np.log(x))
log_prob -= np.sum(special.gammaln(alpha))
log_prob += np.sum(special.gammaln(np.sum(alpha, -1)))
return log_prob
def multinomial_gen_log_prob(x, n, p):
"""Log-probability of `multinomial_gen` in scipy.stats."""
if n != 1:
raise NotImplementedError()
log_prob = np.sum(x * np.log(p))
return log_prob
# TODO(trandustin): need rewrite rules to handle n > 1
# log_prob = np.sum(x * np.log(p))
# log_prob -= np.sum(special.gammaln(x + 1))
# log_prob += np.sum(special.gammaln(n + 1))
# return log_prob
def norm_gen_log_prob(x, loc, scale):
"""Log-probability of `norm_gen` in scipy.stats."""
get_dim = lambda x: np.prod(x.shape) if hasattr(x, "shape") else 1
precision = 1.0 / scale ** 2
errors = x - loc
log_prob = -0.5 * get_dim(errors) * np.log(2.0 * np.pi)
log_prob += 0.5 * get_dim(errors) * np.sum(np.log(precision))
log_prob += -0.5 * np.sum(precision * errors * errors)
return log_prob
# TODO(trandustin): Change signature to follow `scipy.stats.gamma`'s
# (`x, a, loc=0, scale=1`). Using `make_log_joint_fn` with `ph.gamma` will fail
# unless specifying `gamma.rvs(a, b)`. This lets b implicitly act as the rate
# parameter as in here, even though the `gamma.rvs` call will incorrectly set
# `b` as `loc`.
def gamma_gen_log_prob(x, a, b):
"""Log-probability of `gamma_gen` in scipy.stats (via shape/rate)."""
return (a - 1) * np.log(x) - b * x + a * np.log(b) - special.gammaln(a)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various utility functions and classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import autograd
from autograd import numpy as np
from autograd.numpy import numpy_boxes, numpy_vspaces
import autograd.util as ag_util
import numpy as onp
def Enum(name, fields):
return type(name, (), dict(zip(map(str.upper, fields), itertools.count())))
SupportTypes = Enum('SupportTypes', ['REAL', 'NONNEGATIVE', 'UNIT_INTERVAL',
'SIMPLEX', 'BINARY', 'INTEGER', 'ONE_HOT'])
support_type_to_name = {i: name for name, i in SupportTypes.__dict__.items()
if name[0] != '_'}
def split_einsum_formula(formula):
joined_input_formulas, output_formula = formula.split('->')
return joined_input_formulas.split(','), output_formula
# Monkey-patch to register int and boolean types with ArrayBox.
int_types = [int, onp.int16, onp.int32, onp.int64]
bool_types = [bool, onp.bool_]
for type_ in int_types + bool_types:
numpy_boxes.ArrayBox.register(type_)
numpy_vspaces.ArrayVSpace.register(type_)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tracers to build expressions as computation graphs on free variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import funcsigs
import functools
import hashlib
import inspect
import itertools
from numbers import Number
from types import FunctionType, CodeType
import warnings
# this import has side-effects: it registers new Boxes/VSpaces with Autograd
from . import util
import autograd.core as ag_core
from autograd.core import vspace
from autograd.extend import defvjp
from autograd.extend import defvjp_argnums
from autograd.extend import primitive
from autograd.extend import notrace_primitive
from autograd.extend import register_notrace
from autograd.numpy.numpy_vjps import unbroadcast
import autograd.tracer as tracer
from autograd.tracer import getval
from autograd.tracer import Node
from autograd.util import toposort
from autograd.util import subvals
from autograd import core
from autograd import grad
from autograd import numpy as np
# Expression types
GraphExpr = collections.namedtuple('GraphExpr', ['expr_node', 'free_vars'])
ConstExpr = collections.namedtuple('ConstExpr', ['val', 'free_vars'])
def trace(fun, start_nodes, args):
with tracer.trace_stack.new_trace() as t:
start_boxes = [tracer.new_box(x, t, n) for x, n in zip(args, start_nodes)]
end_box = fun(*start_boxes)
if tracer.isbox(end_box) and end_box._trace == t:
return end_box._value, end_box._node
else:
warnings.warn("Output seems independent of input.")
return end_box, None
def make_expr(fun, *args, **kwargs):
"""Trace a function's execution to a representation of its body expression.
Args:
fun: a Python callable that only requires positional arguments.
args: positional argument values on which to trace the evaluation of fun.
names: optional, a list of names for free variables corresponding to the
positional arguments of fun (the default is to use variable names
corresponding to the parameter names of fun, or x0, x1, ... if parameter
names of fun cannot be determined).
Returns:
An expression instance representing the body of fun (with all non-primitive
function calls inlined) with free variables corresponding to the positional
arguments that affect its value.
"""
names = kwargs.pop('names', getargs(fun) or _default_names())
start_nodes = [ExprNode.new_root(name, arg) for name, arg in zip(names, args)]
val, end_node = trace(fun, start_nodes, args)
if end_node:
used_start_nodes = {n for n in toposort(end_node, lambda n: n.parents)
if n in start_nodes}
free_vars = collections.OrderedDict((n.name, n) for n in start_nodes
if n in used_start_nodes)
return GraphExpr(end_node, free_vars)
else:
return ConstExpr(val, {})
class ExprNode(Node):
"""Node type used in GraphExpr internal representation."""
__slots__ = ['fun', 'args', 'kwargs', 'vs']
def __init__(self, ans, fun, args, kwargs, parent_argnums, parents):
self.fun = fun
self.args = list(subvals(args, zip(parent_argnums, parents)))
self.kwargs = kwargs
self.vs = vspace(ans)
def initialize_root(self, var_name, val):
self.fun = env_lookup
self.args = (var_name,)
self.kwargs = {}
self.vs = vspace(val)
@property
def parents(self):
return [arg for arg in self.args if isinstance(arg, ExprNode)]
@property
def parent_argnums(self):
return [i for i, arg in enumerate(self.args) if isinstance(arg, ExprNode)]
@property
def name(self):
if self.fun is not env_lookup:
raise AttributeError("ExprNode only has 'name' for env_lookup nodes.")
return self.args[0]
def __eq__(self, x):
return (isinstance(x, ExprNode)
and self.fun == x.fun
and len(self.args) == len(x.args)
and all(map(equal, self.args, x.args))
and set(self.kwargs) == set(x.kwargs)
and all(equal(self.kwargs[k], x.kwargs[k]) for k in self.kwargs))
def __repr__(self):
node_name = self.fun.__name__
if self.fun is env_lookup:
node_name += '(' + self.name + ')'
return '<ExprNode {}({}) {}>'.format(
node_name, ', '.join(['-'] * len(self.args)), hex(id(self)))
def env_lookup(env, var_name):
"""Function used by 'source' nodes in ExprNode graphs to model var lookup."""
try:
return env[var_name]
except KeyError:
raise NameError("Name '{}' is not defined in environment with names {}"
.format(var_name, env.keys()))
def _value_hash(o):
try:
return hash(o) # NOTE can collide, e.g. hash(-1) == hash(-2)
except TypeError:
if isinstance(o, np.ndarray):
return hash(hashlib.sha1(np.require(o, requirements='C')).hexdigest())
else:
return id(o) # give up, hash object by id
class _CSEValue(object):
__slots__ = ['fun', 'args', 'kwargs']
def __init__(self, fun, args, kwargs):
self.fun = fun
self.args = args
self.kwargs = kwargs
def __hash__(self):
fun, args, kwargs = self.fun, self.args, self.kwargs
try:
bound = funcsigs.signature(fun).bind(*args, **kwargs)
except (ValueError, TypeError):
pass # can't use signature to match kwargs to args, use generic approach
else:
args, kwargs = bound.args, bound.kwargs
arg_hash = (_value_hash(arg) for arg in args)
kwarg_hash = (hash(k) ^ _value_hash(v) for k, v in kwargs.iteritems())
return hash((fun,) + tuple(itertools.chain(arg_hash, kwarg_hash)))
def __eq__(self, x):
return (type(x) is _CSEValue
and self.fun == x.fun
and len(self.args) == len(x.args)
and all(map(equal, self.args, x.args))
and set(self.kwargs) == set(x.kwargs)
and all(equal(self.kwargs[k], x.kwargs[k]) for k in self.kwargs))
def _memoize_apply_node(apply_node):
memoized_vals = {}
def memoized_apply_node(node, args):
node_hash = _CSEValue(node.fun, args, node.kwargs)
if node_hash not in memoized_vals:
memoized_vals[node_hash] = apply_node(node, args)
return memoized_vals[node_hash]
return memoized_apply_node
def _eval_graph(root_node, eval_args, apply_node, cse=True):
vals = {}
apply_node = _memoize_apply_node(apply_node) if cse else apply_node
for node in reversed(list(toposort(root_node))):
args = eval_args(node, (vals[p] for p in node.parents))
vals[node] = apply_node(node, args)
return vals[root_node]
def node_fmap(f, xs):
if isinstance(xs, ExprNode):
return f(xs)
elif isinstance(xs, (list, tuple)):
elts = [node_fmap(f, elt) for elt in xs]
# assume there are no tuple subclasses other than namedtuples
if type(xs) in (list, tuple):
return type(xs)(elts)
else:
return type(xs)(*elts) # namedtuple
elif isinstance(xs, dict):
return {k: node_fmap(f, v) for k, v in xs.items()}
else:
return xs
class ContainerOutput(object):
def __init__(self, container):
self.container = container
def __hash__(self):
return id(self.container) # unique value
@property
def parents(self):
nodes = set()
node_fmap(nodes.add, self.container)
return nodes
def _eval_graph_container(root_container, eval_args, apply_node, cse=True):
# This function exists to handle root nodes that are container types.
graph = list(toposort(ContainerOutput(root_container)))[::-1]
vals = {}
apply_node = _memoize_apply_node(apply_node) if cse else apply_node
for node in graph[:-1]:
args = eval_args(node, (vals[p] for p in node.parents))
vals[node] = apply_node(node, args)
out = node_fmap(vals.get, root_container)
import pdb; pdb.set_trace()
return out
def eval_expr(expr, env={}):
"""Evaluate an expression in a given environment.
Args:
expr: an expression instance.
env: a dict of name:value bindings, where keys can be strings corresponding
to free variable names (mapping to variable values), functions (mapping to
replacement functions to apply), or ExprNodes (mapping to values).
Returns:
The value of the expression given the environment.
Raises:
NameError if an env_lookup node is encountered without a corresponding name
binding in env.
"""
if isinstance(expr, ConstExpr):
return expr.val
elif isinstance(expr, GraphExpr):
def eval_args(node, partial_args):
return subvals(node.args, zip(node.parent_argnums, partial_args))
def apply_node(node, node_args):
fun = env.get(node.fun, node.fun)
if node in env:
return env[node]
if node.fun is env_lookup:
return fun(env, *node_args, **node.kwargs)
else:
return fun(*node_args, **node.kwargs)
if isinstance(expr.expr_node, tuple):
return _eval_graph_container(expr.expr_node, eval_args, apply_node)
else:
return _eval_graph(expr.expr_node, eval_args, apply_node)
else:
raise TypeError("Can't evaluate expression type: {}".format(type(expr)))
def eval_node(node, free_vars, env):
return eval_expr(GraphExpr(node, free_vars), env)
def backward_pass(g, start_nodes, end_node):
outgrads = {end_node : (g, False)}
for node in ag_core.toposort(end_node):
outgrad = outgrads[node]
ingrads = node.vjp(outgrad[0])
for parent, ingrad in zip(node.parents, ingrads):
outgrads[parent] = ag_core.add_outgrads(outgrads.get(parent), ingrad)
return [outgrads.pop(node, (None, None))[0] for node in start_nodes]
def make_dummy(node):
result = node.vs.ones()
if len(result.shape) >= 2 and result.shape[-1] == result.shape[-2]:
result = np.ones(result.shape[:-2] + (1, 1)) * np.eye(result.shape[-1])
return result
# TODO(mattjj): This function re-evals on dummy values, but that's wasteful.
# If we tweak how eager simplifications work, we can avoid FLOPs here.
def remake_expr(expr, env={}):
"""Convenience wrapper for make_expr/eval_expr to apply eager simplifies."""
# this function doesn't eliminate unused free_vars in expr, but it could
names = expr.free_vars.keys()
args = (make_dummy(node) for node in expr.free_vars.values())
return make_expr(lambda *args: eval_expr(expr, dict(zip(names, args), **env)),
*args, names=names)
def inline_expr(expr, symbolic_env):
"""Evaluates expr in a symbolic environment for substituting subgraphs."""
if isinstance(expr, ConstExpr):
return expr
elif isinstance(expr, GraphExpr):
def eval_args(node, partial_args):
return subvals(node.args, zip(node.parent_argnums, partial_args))
def apply_node(node, node_args):
if node.fun is env_lookup:
return node.fun(symbolic_env, *node_args, **node.kwargs)
else:
return _make_node_like(node, args=node_args)
expr_node = _eval_graph(expr.expr_node, eval_args, apply_node)
return GraphExpr(expr_node, {})
else:
raise TypeError("Can't inline expression type: {}".format(type(expr)))
def replace_node_with_expr(node, expr):
"""Replaces an ExprNode (in a GraphExpr) with a given expression."""
if isinstance(expr, ConstExpr):
val = expr.val
temp_node = ExprNode(val, lambda: val, (), {}, (), ())
_mutate_node(node, temp_node)
elif isinstance(expr, GraphExpr):
_mutate_node(node, expr.expr_node)
else:
raise TypeError("Can't handle expression type: {}".format(type(expr)))
return node
def _mutate_node(target_node, source_node, **kwargs):
for attrname in target_node.__slots__:
attrval = kwargs.get(attrname, getattr(source_node, attrname))
setattr(target_node, attrname, attrval)
return target_node
def _make_node_like(node, **kwargs):
return _mutate_node(ExprNode.__new__(ExprNode), node, **kwargs)
def _default_names():
return itertools.imap(lambda i: "x{}".format(i), itertools.count())
# TODO(mattjj): this should allow repeated names... should write a new function
# remake_with_new_free_vars that takes a dict mapping nodes to names
def extract_superexpr(expr, nodes):
"""Extract a super-expression on the given nodes (and other free vars).
Args:
expr: a GraphExpr instance.
nodes: dict mapping name strings to ExprNodes in GraphExpr.
Returns:
A new expression, with free variables drawn from the keys of `nodes` (and
any remaining free variables of `expr`), corresponding to the body of the
function from `nodes` (and any other free variables) to the value of `expr`.
"""
names = expr.free_vars.keys()
args = (node.vs.ones() for node in expr.free_vars.values())
env_vals = (node.vs.ones() for node in nodes.values())
def fun(*args_and_env):
N = len(expr.free_vars)
args, env_vals = args_and_env[:N], args_and_env[N:]
env = dict(zip(nodes.values(), env_vals))
return eval_expr(expr, dict(zip(names, args), **env))
return make_expr(fun, *itertools.chain(args, env_vals),
names=itertools.chain(names, nodes))
## util
def getargs(fun):
try:
return inspect.getargspec(fun).args
except TypeError:
pass
@notrace_primitive
def equal(a, b):
"""An equality function that compares all elements of ndarrays."""
try:
return bool(a == b)
except ValueError:
return (isinstance(a, np.ndarray) and isinstance(b, np.ndarray)
and np.shape(a) == np.shape(b) and (a == b).all())
def make_node(fun, args, kwargs):
# infer shape data by running fun on dummy arguments
argvals = [arg.vs.ones() if isinstance(arg, ExprNode) else arg
for arg in args]
vs = vspace(fun(*argvals, **kwargs))
new_node = ExprNode.__new__(ExprNode)
new_node.fun = fun
new_node.args = list(args)
new_node.kwargs = kwargs
new_node.vs = vs
return new_node
def is_descendant_of(a, b):
"""Test if the object a is a descendant of the ExprNode b."""
if not isinstance(b, ExprNode):
raise TypeError("Second argument must be ExprNode, got {}".format(type(b)))
if a is b:
return True
visited = set()
def is_descendant_of_b(a):
visited.add(a)
return b in a.parents or any(p not in visited and is_descendant_of_b(p)
for p in a.parents)
return isinstance(a, ExprNode) and is_descendant_of_b(a)
def all_descendants_of(root_node, ancestor):
"""Return a set of all descendants of `ancestor` in the graph `root_node`."""
visited = set([ancestor])
descendants = set([ancestor])
def collect_descendants(node):
if node not in visited:
visited.add(node)
if node is ancestor or any([collect_descendants(p) or p in descendants
for p in node.parents]):
descendants.add(node)
collect_descendants(root_node)
return frozenset(descendants)
@primitive
def one_hot(x, width):
"""Convert int array-like x to a one-hot representation of given width."""
return (np.expand_dims(x, -1) == np.arange(width)).astype(np.float32)
defvjp(one_hot, None)
@primitive
def logdet(x):
return np.linalg.slogdet(x)[1]
# transpose by swapping last two dimensions
def _T(x): return np.swapaxes(x, -1, -2)
# add two dimensions to the end of x
def _add2d(x): return np.reshape(x, np.shape(x) + (1, 1))
defvjp(logdet, lambda ans, x: lambda g: _add2d(g) * _T(np.linalg.inv(x)))
@primitive
def add_n(*args):
return reduce(np.add, args)
def grad_add_n_full(parent_argnums, ans, args, kwargs):
meta = [np.metadata(args[i]) for i in parent_argnums]
return lambda g: [unbroadcast(g, m) for m in meta]
defvjp_argnums(add_n, grad_add_n_full)
## debugging
def print_expr(expr, env={}):
"""Return a string with an SSA-like representation of an expression."""
if isinstance(expr, ConstExpr):
return str(expr.val)
elif isinstance(expr, GraphExpr):
fragment = []
temp_names = ('temp_{}'.format(i) for i in itertools.count())
apply_str = '{} = {}({})\n'.format
def eval_args(node, partial_args):
args = subvals(node.args, zip(node.parent_argnums, partial_args))
return [str(a) for a in args]
def apply_node(node, arg_strs):
if node.fun is env_lookup:
name, = arg_strs
return name if name not in env else env[name]
else:
name = next(temp_names)
fragment.append(apply_str(name, node.fun.__name__, ', '.join(arg_strs)))
return name
out_name = _eval_graph(expr.expr_node, eval_args, apply_node, cse=False)
return ''.join(fragment)
else:
raise TypeError("Can't print expression type: {}".format(type(expr)))
dot_edge = '{} -> {} [color=gray30];\n'.format
dot_function_node = (
'{} [label="{}", shape=box, color=lightblue, style=filled];\n'.format)
dot_variable_node = '{} [label="{}", color=orange, style=filled];\n'.format
dot_graph = 'digraph G {{{}}}'.format
def draw_expr(expr, env={}):
if isinstance(expr, GraphExpr):
fragment = ['']
temp_names = ('temp_{}'.format(i) for i in itertools.count())
node_names = collections.defaultdict(iter(temp_names).next)
def eval_args(node, partial_args):
return subvals(node.args, zip(node.parent_argnums, partial_args))
# TODO print out einsum nodes, string vs float vs input nodes in different color
def apply_node(node, arg_strs):
if node.fun is env_lookup:
name, = arg_strs
name = node_names[node] = name if name not in env else env[name]
fragment[0] += dot_variable_node(name, name)
else:
name = node_names[node]
fragment[0] += dot_function_node(name, node.fun.__name__)
for argnum, arg in enumerate(node.args):
if argnum in node.parent_argnums:
fragment[0] += dot_edge(node_names[node.args[argnum]], name)
else:
argname = '{}_arg_{}'.format(name, argnum)
fragment[0] += dot_edge(argname, name)
fragment[0] += dot_variable_node(argname, arg)
return name
name = _eval_graph(expr.expr_node, eval_args, apply_node, cse=False)
fragment[0] += dot_variable_node('output', 'output')
fragment[0] += dot_edge(name, 'output')
return dot_graph(fragment[0])
else:
raise TypeError("Can't draw expression type: {}".format(type(expr)))
notrace_functions = [np.ones_like, np.zeros_like]
for fun in notrace_functions:
register_notrace(ExprNode, fun)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Conjugate meanfield functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import autograd.numpy as np
from autograd import grad
from autograd import value_and_grad
from . import conjugacy
def elbo(neg_energy, normalizers, eta, return_lp=False):
# TODO(trandustin): more efficiently return various parts of elbo
logZ = total_normalizer(normalizers)
val, mu = value_and_grad(logZ)(eta)
lp = neg_energy(*mu)
elbo_val = lp - (dot(eta, mu) - val)
if return_lp:
return elbo_val, lp
return elbo_val
def cavi(log_joint, init_vals, supports, num_iters, callback=None):
if not callback:
callback = lambda t, neg_energy, normalizers, natparams: print(elbo(neg_energy, normalizers, natparams))
neg_energy, normalizers, _, initializers, _, _ = \
conjugacy.multilinear_representation(log_joint, init_vals, supports)
natparams = [initializer(10.) for initializer in initializers]
meanparams = [grad(normalizer)(natparam)
for normalizer, natparam in zip(normalizers, natparams)]
callback(-1, neg_energy, normalizers, natparams)
for t in range(num_iters):
for i, normalizer in reversed(list(enumerate(normalizers))):
natparams[i] = grad(neg_energy, i)(*meanparams)
meanparams[i] = grad(normalizer)(natparams[i])
callback(t, neg_energy, normalizers, natparams)
return natparams, meanparams
### util
# TODO tree map
def dot(a, b):
tot = [0.]
def _dot(a, b):
tot[0] += np.dot(np.ravel(a), np.ravel(b))
fmap_util.container_fmap(_dot, a, b)
return tot[0]
def total_normalizer(normalizers):
def logZ(eta):
return sum([norm(eta[i]) for i, norm in enumerate(normalizers)])
return logZ
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file encodes knowledge about exponential families.
Each exponential family (normal, gamma, Dirichlet, etc.) is completely
characterized by:
* Support
* Base Measure (not yet implemented---mostly unimportant)
* Sufficient Statistics
The functions and data structures in this file map from the above
information to:
* Log-normalizer function: Maps natural parameters to a scalar such that
\int_x \exp(natural_parameters^T sufficient_statistics(x)
- log_normalizer(natural_parameters)) dx = 1.
* scipy.stats distribution classes.
* Standard parameters for those classes as a function of natural parameters.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from autograd import numpy as np
from autograd.scipy import misc
from autograd.scipy import special
from scipy import stats
from . import graph_util
from . import matchers
from . import pgm
from .patterns import (
Subtract, Add, Dot, Multiply, Divide, TrueDivide, Node, Val, Einsum, Str,
Choice, Segment, Log, Log1P, Sum, Tuple, VSpaceAdd, Any, Power, Scalar,
OneHot, Transpose, EnvLookup, Getitem, Negative, Star)
from . import pplham as ph
from .tracers import logdet
from .util import SupportTypes
T = lambda X: np.swapaxes(X, -1, -2)
sym = lambda X: 0.5 * (X + T(X))
exp_family_stats = []
distbn_defns = []
StatsMatcher = namedtuple('StatsMatcher', ['matcher', 'preds',
'update_suffstat'])
DistributionDefinition = namedtuple('DistributionDefinition',
['matchers', 'support', 'check',
'suffstat_cls', 'make_log_normalizer',
'distribution'])
def make_matcher(pattern, preds, update_suffstat):
return StatsMatcher(matchers.matcher(pattern), preds, update_suffstat)
### Logic for matching sufficient statistic nodes to a distribution
def find_distributions(all_stats_nodes, supports):
nodenames_lognormalizers_distbns = []
for stats_nodes, support in zip(all_stats_nodes, supports):
nodenames_lognormalizers_distbns.append(find_distribution(
stats_nodes, support))
return zip(*nodenames_lognormalizers_distbns)
def find_distribution(stats_nodes, support):
for distbn_defn in distbn_defns:
if distbn_defn.support != support:
continue
suffstat_nodes = match_nodes_with_distbn(stats_nodes, distbn_defn)
if suffstat_nodes:
return (suffstat_nodes,
distbn_defn.make_log_normalizer(suffstat_nodes),
distbn_defn.distribution)
suffstat_nodes = NotFoundSuffStat(params=tuple(stats_nodes))
return suffstat_nodes, not_found_normalizer, not_found_distbn
# TODO(matthewjmackay): change so suffstat fields are initialized to None
def init_suffstat(suffstat_cls):
return suffstat_cls(**{name:{} for name in suffstat_cls._fields})
def match_nodes_with_distbn(stats_nodes, distbn_defn):
suffstat = init_suffstat(distbn_defn.suffstat_cls)
def match_node(node, stats_matchers):
for stats_matcher in stats_matchers:
bindings = stats_matcher.matcher(node)
if bindings and all(pred(**bindings) for pred in stats_matcher.preds):
return stats_matcher.update_suffstat(suffstat, bindings, node)
return False
for node in stats_nodes:
suffstat = match_node(node, distbn_defn.matchers)
if not suffstat:
return False
# Check if all the required statistics are present and return.
if distbn_defn.check(suffstat):
return suffstat
else:
return False
### Not found distribution
NotFoundSuffStat = namedtuple('NotFoundSuffStat', ['params'])
def asserts_false(*args, **kwargs):
assert False
not_found_distbn = lambda *args, **kwargs: asserts_false
not_found_normalizer = asserts_false
### Bernoulli distribution
BernoulliSuffStat = namedtuple('BernoulliSuffStat', ['x'])
x_matcher = make_matcher(
pattern=EnvLookup('x'), preds=(),
update_suffstat=(lambda suffstat, bindings, node:
suffstat._replace(**{'x': node})))
bernoulli_matchers = frozenset([x_matcher])
bernoulli_check = lambda suffstat: not isinstance(suffstat.x, dict)
bernoulli_log_normalizer = (lambda natparam:
np.sum(np.log1p(np.exp(natparam.x))))
bernoulli_distbn = (lambda natparam:
stats.bernoulli(special.expit(natparam.x)))
bernoulli_defn = DistributionDefinition(
matchers=bernoulli_matchers, support=SupportTypes.BINARY,
check=bernoulli_check, suffstat_cls=BernoulliSuffStat,
make_log_normalizer=lambda *args: bernoulli_log_normalizer,
distribution=bernoulli_distbn)
exp_family_stats.append(BernoulliSuffStat)
distbn_defns.append(bernoulli_defn)
### Gamma distribution
GammaSuffStat = namedtuple('GammaSuffStat',
['log_x', 'x'])
log_x_matcher = make_matcher(
pattern=(Log, EnvLookup('x')), preds=(),
update_suffstat=(lambda suffstat, bindings, node:
suffstat._replace(**{'log_x': node})))
x_matcher = make_matcher(
pattern=EnvLookup('x'), preds=(),
update_suffstat=(lambda suffstat, bindings, node:
suffstat._replace(**{'x': node})))
gamma_matchers = frozenset([log_x_matcher, x_matcher])
gamma_check = lambda suffstat: (not isinstance(suffstat.x, dict)
and not isinstance(suffstat.log_x, dict))
def gamma_log_normalizer(natparam):
alpha = natparam.log_x + 1.
beta = -natparam.x
return np.sum(-alpha * np.log(beta) + special.gammaln(alpha))
def gamma_distbn(natparam):
args = [natparam.log_x + 1., 0., -1. / natparam.x]
return stats.gamma(*args)
gamma_defn = DistributionDefinition(
matchers=gamma_matchers, support=SupportTypes.NONNEGATIVE,
check=gamma_check, suffstat_cls=GammaSuffStat,
make_log_normalizer=lambda *args: gamma_log_normalizer,
distribution=gamma_distbn)
exp_family_stats.append(GammaSuffStat)
distbn_defns.append(gamma_defn)
### Dirichlet distribution
DirichletSuffStat = namedtuple('DirichletSuffStat',
['log_x'])
log_x_matcher = make_matcher(
pattern=(Log, EnvLookup('x')), preds=(),
update_suffstat=(lambda suffstat, bindings, node:
suffstat._replace(**{'log_x': node})))
dirichlet_matchers = frozenset([log_x_matcher])
dirichlet_check = lambda suffstat: not isinstance(suffstat.log_x, dict)
def dirichlet_log_normalizer(natparam):
alpha = natparam.log_x + 1.
alpha_sum = np.sum(alpha, -1)
return np.sum(special.gammaln(alpha)) - np.sum(special.gammaln(alpha_sum))
def batch_dirichlet(alpha):
"""Batched `np.ndarray` of Dirichlet frozen distributions.
To get each frozen distribution, index the returned `np.ndarray` followed by
`item(0)`.
"""
if alpha.ndim == 1:
return stats.dirichlet(alpha)
return np.array(
[stats.dirichlet(vec) for vec in alpha.reshape([-1, alpha.shape[-1]])]
).reshape(alpha.shape[:-1])
def dirichlet_distbn(natparam):
return batch_dirichlet(natparam.log_x + 1.)
dirichlet_defn = DistributionDefinition(
matchers=dirichlet_matchers, support=SupportTypes.SIMPLEX,
check=dirichlet_check, suffstat_cls=DirichletSuffStat,
make_log_normalizer=lambda *args: dirichlet_log_normalizer,
distribution=dirichlet_distbn)
exp_family_stats.append(DirichletSuffStat)
distbn_defns.append(dirichlet_defn)
### Beta distribution
BetaSuffStat = namedtuple('BetaSuffStat',
['log_x', 'log_one_minus_x'])
log_x_matcher = make_matcher(
pattern=(Log, EnvLookup('x')), preds=(),
update_suffstat=(lambda suffstat, bindings, node:
suffstat._replace(**{'log_x': node})))
log_one_minus_x_matcher = make_matcher(
pattern=(Log1P, (Negative, EnvLookup('x'))), preds=(),
update_suffstat=(lambda suffstat, bindings, node:
suffstat._replace(**{'log_one_minus_x': node})))
beta_matchers = frozenset([log_x_matcher, log_one_minus_x_matcher])
beta_check = lambda suffstat: (not isinstance(suffstat.log_x, dict) and
not isinstance(suffstat.log_one_minus_x, dict))
def beta_log_normalizer(natparam):
alpha = natparam.log_x + 1.
beta = natparam.log_one_minus_x + 1.
return np.sum(special.gammaln(alpha) + special.gammaln(beta) -
special.gammaln(alpha+beta))
def beta_distbn(natparam):
return stats.beta(natparam.log_x+1., natparam.log_one_minus_x+1.)
beta_defn = DistributionDefinition(
matchers=beta_matchers, support=SupportTypes.UNIT_INTERVAL,
check=beta_check, suffstat_cls=BetaSuffStat,
make_log_normalizer=lambda *args: beta_log_normalizer,
distribution=beta_distbn)
exp_family_stats.append(BetaSuffStat)
distbn_defns.append(beta_defn)
### Categorical distribution
CategoricalSuffStat = namedtuple('CategoricalSuffStat',
['onehot_x'])
onehot_x_matcher = make_matcher(
pattern=(OneHot, EnvLookup('x'), Val),
preds=(),
update_suffstat=(lambda suffstat, bindings, node:
suffstat._replace(**{'onehot_x': node})))
categorical_matchers = frozenset([onehot_x_matcher])
categorical_check = lambda suffstat: not isinstance(suffstat.onehot_x, dict)
def categorical_log_normalizer(natparam):
return np.sum(misc.logsumexp(natparam.onehot_x, -1))
def _softmax(x):
safe_x = x - x.max(-1, keepdims=True)
p = np.exp(safe_x)
return p / p.sum(-1, keepdims=True)
def categorical_distbn(natparam):
return ph.categorical(_softmax(natparam.onehot_x))
categorical_defn = DistributionDefinition(
matchers=categorical_matchers, support=SupportTypes.INTEGER,
check=categorical_check, suffstat_cls=CategoricalSuffStat,
make_log_normalizer=lambda *args: categorical_log_normalizer,
distribution=categorical_distbn)
exp_family_stats.append(CategoricalSuffStat)
distbn_defns.append(categorical_defn)
### Multinoulli distribution
MultinoulliSuffStat = namedtuple('MultinoulliSuffStat', ['x'])
x_matcher = make_matcher(
pattern=EnvLookup('x'),
preds=(),
update_suffstat=(lambda suffstat, bindings, node:
suffstat._replace(**{'x': node})))
multinoulli_matchers = frozenset([x_matcher])
multinoulli_check = lambda suffstat: not isinstance(suffstat.x, dict)
def multinoulli_log_normalizer(natparam):
return np.sum(misc.logsumexp(natparam.x, -1))
def multinoulli_distbn(natparam):
return stats.multinomial(n=1, p=_softmax(natparam.x))
multinoulli_defn = DistributionDefinition(
matchers=multinoulli_matchers, support=SupportTypes.ONE_HOT,
check=multinoulli_check, suffstat_cls=MultinoulliSuffStat,
make_log_normalizer=lambda *args: multinoulli_log_normalizer,
distribution=multinoulli_distbn)
exp_family_stats.append(MultinoulliSuffStat)
distbn_defns.append(multinoulli_defn)
### Multivariate normal with dense precision matrix
MultivariateNormalSuffStat = namedtuple('MultivariateNormalSuffStat',
['x_xtr', 'x', 'x_squared'])
def diagonal_einsum(**kwargs):
return kwargs['formula'] == '...,...->...'
def quadratic_einsum(**kwargs):
return kwargs['formula'] == '...a,...b->...ab'
x_xtr_matcher = make_matcher(
pattern=(Einsum, Str('formula'), EnvLookup('x'), EnvLookup('x')),
preds=(quadratic_einsum,),
update_suffstat=(lambda suffstat, bindings, node:
suffstat._replace(**{'x_xtr': node})))
x_matcher = make_matcher(
pattern=EnvLookup('x'),
preds=(),
update_suffstat=(lambda suffstat, bindings, node:
suffstat._replace(**{'x': node})))
x_squared_matcher = make_matcher(
pattern=(Einsum, Str('formula'), EnvLookup('x'), EnvLookup('x')),
preds=(diagonal_einsum,),
update_suffstat=(lambda suffstat, bindings, node:
suffstat._replace(**{'x_squared': node})))
multivariate_normal_check = (lambda suffstat:
not isinstance(suffstat.x_xtr, dict))
multivariate_normal_matchers = frozenset([x_xtr_matcher, x_matcher,
x_squared_matcher])
def _add_diag(tau, J):
return J + np.einsum('...i,j,ij->...ij', tau, np.ones(tau.shape[-1]),
np.eye(tau.shape[-1]))
def multivariate_normal_log_normalizer(natparam):
if isinstance(natparam.x_squared, dict):
tau = np.zeros(natparam.x_xtr.shape[-1])
else:
tau = natparam.x_squared
J = _add_diag(tau, natparam.x_xtr)
precision = -2 * J
log_det_term = -0.5 * logdet(sym(precision)).sum()
pi_term = 0.5 * J.shape[-1] * np.log(2. * np.pi)
if not isinstance(natparam.x, dict):
quadratic_term = np.einsum(',...ij,...i,...j->...', 0.5,
sym(np.linalg.inv(sym(precision))),
natparam.x, natparam.x).sum()
else:
quadratic_term = 0
return quadratic_term + log_det_term + pi_term
class BatchMultivariateNormal(object):
def __init__(self, mean, cov):
self.mean = mean
self.cov = cov
self._chol = None
def __getitem__(self, i):
return BatchMultivariateNormal(self.mean[i], self.cov[i])
@property
def chol(self):
if self._chol is None:
self._chol = np.linalg.cholesky(self.cov)
return self._chol
def rvs(self):
return self.mean + self.chol.dot(np.random.randn(self.mean.shape[-1]))
def multivariate_normal_from_natural_parameters(J, h):
covariance = sym(np.linalg.inv(-2 * sym(J)))
mean = np.einsum('...ij,...j->...i', covariance, h)
return mean, covariance
def multivariate_normal_distbn(natparam):
if isinstance(natparam.x_squared, dict):
tau = np.zeros(natparam.x_xtr.shape[-1])
else:
tau = natparam.x_squared
J = _add_diag(tau, natparam.x_xtr)
if isinstance(natparam.x, dict):
h = np.zeros(natparam.x_xtr.shape[-1])
else:
h = natparam.x
return BatchMultivariateNormal(
*multivariate_normal_from_natural_parameters(J, h))
multivariate_normal_defn = DistributionDefinition(
matchers=multivariate_normal_matchers, support=SupportTypes.REAL,
check=multivariate_normal_check,
suffstat_cls=MultivariateNormalSuffStat,
make_log_normalizer=lambda *args: multivariate_normal_log_normalizer,
distribution=multivariate_normal_distbn)
exp_family_stats.append(MultivariateNormalSuffStat)
distbn_defns.append(multivariate_normal_defn)
### Diagonal-covariance normal
DiagonalNormalSuffStat = namedtuple('DiagonalNormalSuffStat',
['x_squared', 'x'])
x_squared_matcher = make_matcher(
pattern=(Einsum, Str('formula'), EnvLookup('x'), EnvLookup('x')),
preds=(diagonal_einsum,),
update_suffstat=(lambda suffstat, bindings, node:
suffstat._replace(**{'x_squared': node})))
x_matcher = make_matcher(
pattern=EnvLookup('x'),
preds=(),
update_suffstat=(lambda suffstat, bindings, node:
suffstat._replace(**{'x': node})))
diagonal_normal_matchers = frozenset([x_squared_matcher, x_matcher])
diagonal_normal_check = lambda suffstat: not isinstance(suffstat.x_squared, dict)
def diagonal_normal_log_normalizer(natparam):
if isinstance(natparam.x, dict):
h = np.zeros_like(natparam.x_squared)
else:
h = natparam.x
tau = -2 * natparam.x_squared
mu = h / tau
return np.sum(-0.5*np.log(tau) + 0.5*tau*mu*mu + 0.5*np.log(2.*np.pi))
def diagonal_normal_from_natural_parameters(half_minus_tau, h):
tau = -2 * half_minus_tau
return h / tau, 1. / np.sqrt(tau)
def diagonal_normal_distbn(natparam):
if isinstance(natparam.x, dict):
h = np.zeros_like(natparam.x_squared)
else:
h = natparam.x
return stats.norm(*diagonal_normal_from_natural_parameters(
natparam.x_squared, h))
diagonal_normal_defn = DistributionDefinition(
matchers=diagonal_normal_matchers, support=SupportTypes.REAL,
check=diagonal_normal_check, suffstat_cls=DiagonalNormalSuffStat,
make_log_normalizer=lambda *args: diagonal_normal_log_normalizer,
distribution=diagonal_normal_distbn)
exp_family_stats.append(DiagonalNormalSuffStat)
distbn_defns.append(diagonal_normal_defn)
### Structured normal distribution
StructuredNormalSuffStat = namedtuple('StructuredNormalSuffStat',
['xi_xjtrs', 'xi_times_xjs',
'xi_xitrs', 'xi_squareds',
'xis'])
def different_indices(formula, x, idx):
return len(idx) == len(set(idx))
def single_index(formula, x, idx):
return len(set(idx)) == 1
def two_indices(formula, x, idx):
return len(idx) == 2
def make_joint_updater(name):
def joint_updater(suffstat, bindings, node):
factor_dict = getattr(suffstat, name)
factor_dict[bindings['idx']] = node
return suffstat
return joint_updater
def make_single_updater(name):
def single_updater(suffstat, bindings, node):
factor_dict = getattr(suffstat, name)
idx = bindings['idx']
if isinstance(idx, tuple):
idx = idx[0]
factor_dict[(idx,)] = node
return suffstat
return single_updater
xi_xjtr_matcher = make_matcher(
pattern=(Einsum, Str('formula'),
Star((Getitem, EnvLookup('x'), Val('idx')), accumulate=['idx'])),
preds=(different_indices, quadratic_einsum, two_indices),
update_suffstat=make_joint_updater('xi_xjtrs'))
xi_times_xj_matcher = make_matcher(
pattern=(Einsum, Str('formula'),
Star((Getitem, EnvLookup('x'), Val('idx')), accumulate=['idx'])),
preds=(different_indices, diagonal_einsum, two_indices),
update_suffstat=make_joint_updater('xi_times_xjs'))
xi_xitr_matcher = make_matcher(
pattern=(Einsum, Str('formula'),
Star((Getitem, EnvLookup('x'), Val('idx')), accumulate=['idx'])),
preds=(quadratic_einsum, single_index),
update_suffstat=make_single_updater('xi_xitrs'))
xi_squared_matcher = make_matcher(
pattern=(Einsum, Str('formula'),
Star((Getitem, EnvLookup('x'), Val('idx')), accumulate=['idx'])),
preds=(diagonal_einsum, single_index),
update_suffstat=make_single_updater('xi_squareds'))
xi_matcher = make_matcher(
pattern=(Getitem, EnvLookup('x'), Val('idx')),
preds=(),
update_suffstat=make_single_updater('xis'))
struct_normal_matchers = frozenset([xi_xjtr_matcher, xi_times_xj_matcher,
xi_xitr_matcher, xi_squared_matcher,
xi_matcher])
def struct_normal_check(suffstat):
factors = (suffstat.xi_xjtrs.keys() + suffstat.xi_times_xjs.keys() +
suffstat.xi_xitrs.keys() + suffstat.xi_squareds.keys() +
suffstat.xis.keys())
nodes = {node for factor in factors for node in factor}
for node in nodes:
single_factor = (node,)
if (single_factor not in suffstat.xi_xitrs and
single_factor not in suffstat.xi_squareds):
return False
return True
def make_struct_normal_log_normalizer(suffstat):
factors = {frozenset(factor) for factor in suffstat.xi_xjtrs.keys() +
suffstat.xi_times_xjs.keys() + suffstat.xi_xitrs.keys() +
suffstat.xi_squareds.keys() + suffstat.xis.keys()}
factor_graph = graph_util.make_factor_graph(factors)
tree_order = graph_util.find_tree(factor_graph)
if tree_order:
elim_order = [node for node in tree_order if isinstance(node, int)]
return pgm.make_tree_normal_log_normalizer(elim_order)
return not_found_normalizer
struct_normal_distbn = not_found_distbn
struct_normal_defn = DistributionDefinition(
matchers=struct_normal_matchers, support=SupportTypes.REAL,
check=struct_normal_check, suffstat_cls=StructuredNormalSuffStat,
make_log_normalizer=make_struct_normal_log_normalizer,
distribution=struct_normal_distbn)
exp_family_stats.append(StructuredNormalSuffStat)
distbn_defns.append(struct_normal_defn)
### Structured categorical distribution
StructuredCategoricalSuffStat = namedtuple('StructuredCategoricalSuffStat',
['joint_onehot_xis',
'single_onehot_xis'])
# TODO(matthewjmackay): we probably need a predicate on the einsum formula here
joint_onehot_xis_matcher = make_matcher(
pattern=(Einsum, Str('formula'),
Star((OneHot, (Getitem, EnvLookup('x'), Val('idx')), Val),
accumulate=['idx'])),
preds=(different_indices,),
update_suffstat=make_joint_updater('joint_onehot_xis'))
single_onehot_xi_matcher = make_matcher(
pattern=(OneHot, (Getitem, EnvLookup('x'), Val('idx')), Val),
preds=(),
update_suffstat=make_single_updater('single_onehot_xis'))
struct_categorical_matchers = frozenset([joint_onehot_xis_matcher,
single_onehot_xi_matcher])
def struct_categorical_check(suffstat):
return True # TODO(matthewjmackay): think about whether this is correct
def make_struct_categorical_log_normalizer(suffstat):
factors = suffstat.joint_onehot_xis.keys() + suffstat.single_onehot_xis.keys()
factor_graph = graph_util.make_factor_graph(factors)
tree_order = graph_util.find_tree(factor_graph)
if tree_order:
elim_order = [node for node in tree_order if not isinstance(node, tuple)]
return pgm.make_tree_categorical_log_normalizer(elim_order)
return not_found_normalizer
struct_categorical_distbn = not_found_distbn
struct_categorical_defn = DistributionDefinition(
matchers=struct_categorical_matchers,
support=SupportTypes.INTEGER, check=struct_categorical_check,
suffstat_cls=StructuredCategoricalSuffStat,
make_log_normalizer=make_struct_categorical_log_normalizer,
distribution=struct_categorical_distbn)
exp_family_stats.append(StructuredCategoricalSuffStat)
distbn_defns.append(struct_categorical_defn)
if __name__ == '__main__':
app.run(main)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions that transform a computation graph into a (more) canonical form.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import autograd.extend as ag_extend
import autograd.numpy as np
import autograd.util as ag_util
from . import rewrites
from .tracers import ExprNode
from .tracers import ConstExpr
from .tracers import GraphExpr
from .tracers import add_n
from .tracers import is_descendant_of
from .tracers import print_expr
from .tracers import remake_expr
from .tracers import toposort
from .tracers import env_lookup
from .util import Enum
## canonicalization rule sets
eager_simplifications = {
np.dot: rewrites.dot_as_einsum,
np.multiply: rewrites.maybe_multiply,
np.divide: rewrites.maybe_divide,
np.true_divide: rewrites.maybe_divide,
np.add: rewrites.maybe_add,
np.subtract: rewrites.maybe_subtract,
np.einsum: rewrites.maybe_einsum,
ag_util.func(ag_extend.VSpace.add): rewrites.maybe_vspace_add,
ag_util.func(ag_extend.VSpace.mut_add): rewrites.maybe_vspace_add,
np.reciprocal: lambda x: x ** -1,
np.square: lambda x: x ** 2,
np.sqrt: lambda x: x ** 0.5,
np.power: rewrites.maybe_power,
np.swapaxes: rewrites.swapaxes,
add_n: lambda *args: args[0] if len(args) == 0 else add_n(*args),
}
simplification_rules = [
rewrites.transpose_inside_einsum,
rewrites.replace_sum,
rewrites.combine_einsum_compositions,
rewrites.distribute_einsum,
rewrites.einsum_repeated_one_hot,
rewrites.log_behind_onehot_einsum,
rewrites.log_addn_behind_onehot_einsum,
rewrites.replace_log_einsum,
rewrites.fold_power,
rewrites.add_powers_within_einsum,
rewrites.increment_negative_power_in_einsum_l,
rewrites.increment_negative_power_in_einsum_r,
rewrites.replace_add,
rewrites.replace_add_addn,
rewrites.replace_addn_addn,
rewrites.replace_duplicated_addn,
rewrites.gather_log_add_einsum,
rewrites.gather_pow_add_einsum,
rewrites.gather_inv_add_einsum,
rewrites.gather_logdet_add_einsum
]
simplifiers = [rewrites.make_rewriter(rule) for rule in simplification_rules]
## main canonicalization functions
def canonicalize(expr, env={}):
"""Canonicalize an expression in an environment."""
simplification_env = dict(eager_simplifications, **env)
new_expr = remake_expr(expr, simplification_env)
while any(simplify_sweep(new_expr, rewrite) for rewrite in simplifiers):
new_expr = remake_expr(new_expr, simplification_env)
return new_expr
def simplify_sweep(expr, simplification):
"""Tries to apply a simplification to an expression, returns success bool."""
if isinstance(expr, ConstExpr):
return False
elif isinstance(expr, GraphExpr):
visited = set()
def sweep(node):
visited.add(node)
return simplification(node) or any(sweep(p) for p in node.parents
if p not in visited)
return sweep(expr.expr_node)
else:
raise TypeError("Can't simplify expression type: {}".format(type(expr)))
## testing for a canonical form
# hierarchy_level[fun_1] > hierarchy_level[fun_2] implies that fun_1
# should be closer to the final node than fun_2 is.
NodeTypes = Enum('NodeTypes', ['OTHER', 'EINSUM', 'LINEAR'])
hierarchy_level = collections.defaultdict(lambda: NodeTypes.OTHER)
def register_node_type(level, *funs):
hierarchy_level.update(zip(funs, itertools.repeat(level)))
register_node_type(NodeTypes.EINSUM, np.einsum)
register_node_type(NodeTypes.LINEAR, add_n, np.squeeze)
def is_canonical(expr):
"""Necessary but not sufficient tests for a graph to be in canonical form."""
visited = set()
is_ref = lambda node: node.fun == env_lookup
def _is_canonical(node):
visited.add(node)
return (is_ref(node) or hierarchy_level[node.fun] == NodeTypes.OTHER
or all(is_ref(p) or _check_parent(p, node) and _is_canonical(p)
for p in node.parents if p not in visited))
return isinstance(expr, ConstExpr) or _is_canonical(expr.expr_node)
_parent_child_checks = [
lambda p, c: hierarchy_level[p.fun] <= hierarchy_level[c.fun],
lambda p, c: not c.fun == p.fun == np.einsum,
lambda p, c: not (c.fun == np.einsum and p.fun in {np.add, np.subtract}),
]
def _check_parent(parent, child):
return all(check(parent, child) for check in _parent_child_checks)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from .canonicalize import canonicalize
from .conjugacy import (complete_conditional, _extract_conditional_factors,
find_sufficient_statistic_nodes, marginalize,
split_einsum_node, SupportTypes)
from .exponential_families import batch_dirichlet
from .tracers import (eval_expr, eval_node, GraphExpr, make_expr, one_hot,
print_expr)
from . import log_probs
import autograd.numpy as np
from autograd.scipy import special
from autograd.scipy import misc
from scipy import stats
def _condition_and_marginalize(log_joint, argnum, support, *args):
sub_args = args[:argnum] + args[argnum + 1:]
marginalized = marginalize(log_joint, argnum, support, *args)
marginalized_value = marginalized(*sub_args)
conditional_factory = complete_conditional(log_joint, argnum, support, *args)
conditional = conditional_factory(*sub_args)
return conditional, marginalized_value
def testBetaBernoulli():
def log_joint(p, x, a, b):
log_prior = ((a - 1) * np.log(p) + (b - 1) * np.log1p(-p) -
special.gammaln(a) - special.gammaln(b) +
special.gammaln(a + b)).sum()
log_likelihood = (x * np.log(p) + (1 - x) * np.log1p(-p)).sum()
return log_prior + log_likelihood
n_examples = 10
a = 1.3
b = 2.4
p = np.random.beta(a, b, [3, 4])
x = np.random.uniform(size=(n_examples,) + p.shape) < p
x = x.astype(np.float32)
conditional, marginalized_value = (
_condition_and_marginalize(log_joint, 0, SupportTypes.UNIT_INTERVAL,
p, x, a, b))
new_a = a + x.sum(0)
new_b = b + x.shape[0] - x.sum(0)
correct_marginalized_value = (
(-special.gammaln(a) - special.gammaln(b)
+ special.gammaln(a + b)) * p.size
+ (special.gammaln(new_a) + special.gammaln(new_b)
- special.gammaln(new_a + new_b)).sum())
# self.assertAlmostEqual(marginalized_value, correct_marginalized_value,
# places=4)
# self.assertTrue(np.allclose(new_a, conditional.args[0]))
# self.assertTrue(np.allclose(new_b, conditional.args[1]))
def testFactorAnalysis():
def log_joint(x, w, epsilon, tau, alpha, beta):
log_p_epsilon = log_probs.norm_gen_log_prob(epsilon, 0, 1)
log_p_w = log_probs.norm_gen_log_prob(w, 0, 1)
log_p_tau = log_probs.gamma_gen_log_prob(tau, alpha, beta)
# TODO(mhoffman): The transposed version below should work.
# log_p_x = log_probs.norm_gen_log_prob(x, np.dot(epsilon, w), 1. / np.sqrt(tau))
log_p_x = log_probs.norm_gen_log_prob(x, np.einsum('ik,jk->ij', epsilon, w),
1. / np.sqrt(tau))
return log_p_epsilon + log_p_w + log_p_tau + log_p_x
n_examples = 200
D = 10
K = 5
alpha = 2.
beta = 8.
tau = np.random.gamma(alpha, beta)
w = np.random.normal(loc=0, scale=1, size=[D, K])
epsilon = np.random.normal(loc=0, scale=1, size=[n_examples, K])
x = np.random.normal(loc=epsilon.dot(w.T), scale=np.sqrt(tau))
all_args = [x, w, epsilon, tau, alpha, beta]
w_conditional_factory = complete_conditional(log_joint, 1,
SupportTypes.REAL, *all_args)
conditional = w_conditional_factory(x, epsilon, tau, alpha, beta)
true_cov = np.linalg.inv(tau * np.einsum('nk,nl->kl', epsilon, epsilon) +
np.eye(K))
true_mean = tau * np.einsum('nk,nd,kl->dl', epsilon, x, true_cov)
epsilon_conditional_factory = complete_conditional(log_joint, 2,
SupportTypes.REAL,
*all_args)
conditional = epsilon_conditional_factory(x, w, tau, alpha, beta)
true_cov = np.linalg.inv(tau * np.einsum('dk,dl->kl', w, w) + np.eye(K))
true_mean = tau * np.einsum('dk,nd,kl->nl', w, x, true_cov)
tau_conditional_factory = complete_conditional(log_joint, 3,
SupportTypes.NONNEGATIVE,
*all_args)
conditional = tau_conditional_factory(x, w, epsilon, alpha, beta)
true_a = alpha + 0.5 * n_examples * D
true_b = beta + 0.5 * np.sum(np.square(x - epsilon.dot(w.T)))
def main(argv):
del argv # Unused.
for _ in range(1):
testFactorAnalysis()
# testBetaBernoulli()
if __name__ == '__main__':
app.run(main)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions to recognize and extract useful information from graphs
(e.g. tree/chain orderings, if they exist).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from Queue import Queue
from collections import defaultdict
def make_factor_graph(factors):
"""Construct an adjacency list representation of a factor graph.
Arguments:
factors: list of tuples of nodes where each tuple represents a factor
Returns:
a dictionary where nodes map to their set of neighbors
"""
graph = defaultdict(set)
for factor in factors:
for node in factor:
graph[node].add(factor)
graph[factor].add(node)
return graph
def find_chain(graph):
"""Perform depth-first search to check if graph graph is a chain.
Arguments:
graph: dictionary mapping from node to set of node's neighbors
Returns:
a chain-ordering of the graph's nodes if one exists or False
"""
start_node = graph.keys()[0]
if len(graph[start_node]) > 2:
return False
chain_list = [start_node]
node_stack = list(enumerate(list(graph[start_node])))
while node_stack:
i, curr_node = node_stack.pop()
chain_list = i*[curr_node] + chain_list + (1-i)*[curr_node]
visited_nghbrs = graph[curr_node].intersection(set(chain_list))
unvisited_nghbrs = graph[curr_node].difference(set(chain_list))
if len(visited_nghbrs) > 1 or len(unvisited_nghbrs) > 1:
return False
if len(unvisited_nghbrs) == 1:
node_stack.append((i, unvisited_nghbrs.pop()))
return len(chain_list) == len(graph.keys()) and chain_list
def find_tree(graph):
"""Perform breadth-first search to check if graph is a tree.
Arguments:
graph: dictionary mapping from node to set of node's neighbors
Returns:
a tree-ordering of the graph's nodes if one exists or False
"""
root = graph.keys()[0]
depths = {root: 0}
node_queue = Queue()
node_queue.put((root, 0))
while not node_queue.empty():
curr_node, curr_depth = node_queue.get()
visited_nghbrs = graph[curr_node].intersection(set(depths.keys()))
unvisited_nghbrs = graph[curr_node].difference(set(depths.keys()))
# Check if there's a cycle in the graph.
if len(visited_nghbrs) > 1:
return False
# Add unvisited neighbors to the queue.
for nghbr in unvisited_nghbrs:
depths[nghbr] = curr_depth + 1
node_queue.put((nghbr, curr_depth+1))
# Sort nodes by distance from the root and check if tree contains all nodes.
visited_nodes = depths.keys()
elimination_ordering = sorted(visited_nodes, key=lambda node: depths[node],
reverse=True)
return len(elimination_ordering) == len(graph.keys()) and elimination_ordering
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to recognize conjugacy relationships in log-joint functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
from collections import OrderedDict
import itertools
from os.path import commonprefix
from types import FunctionType, CodeType
from autograd import numpy as np
from autograd import make_vjp
from autograd import grad
from .canonicalize import canonicalize, hierarchy_level, NodeTypes
from .exponential_families import find_distributions, exp_family_stats
from .tracers import (all_descendants_of, ConstExpr, draw_expr, env_lookup ,
eval_expr, eval_node, ExprNode, extract_superexpr,
GraphExpr, is_descendant_of, make_expr, make_node,
_mutate_node, print_expr, remake_expr, subvals)
from .util import split_einsum_formula, support_type_to_name, SupportTypes
def find_sufficient_statistic_nodes(expr, free_var, split_einsums=False):
r"""Finds nodes in `expr` that represent sufficient statistics of a free var.
This function assumes that `canonicalize()` has already been called on the
graph. It may behave strangely if the graph is not in canonical form.
Algebraically, we assume that expr encodes an exponential family log density
function in free_var (potentially unnormalized), so that expr has the form
expr = \eta \dot t(x) = eta_1 \dot t_1(x) + ... + \eta_K \dot t_K(x) + const
where each t_k is a sufficient statistic function, which is either:
1. an identity function, or
2. a nonlinear function.
The nonlinearity requirement ensures that we can separate the statistics
functions from the linear/affine form. In terms of the GraphExpr data
structure, a sufficient statistic function corresponds to a node `node` in the
graph where:
1. `node` has a variable reference to free_var as an ancestor;
2. for each node between `node` and expr.expr_node, that node is a linear
function;
3. `node` is either a nonlinear function of free_var or is itself a variable
reference to free_var.
Note that monomials implemented by einsum are handled a little differently
than other sufficient statistics, since an einsum can be either linear or
nonlinear in free_var depending on the degrees of its arguments. That is,
because we don't separate out nonlinear monomials into their own einsum nodes,
this function will return the full einsum node (including linear interactions
with other terms), which requires additional parsing to extract natural
parameters.
Args:
expr: an expression that is an affine function of a tuple of intermediate
(potentially nonlinear) functions of `free_var`.
free_var: a free variable in expr, either a string name or int index.
split_einsums: optional, bool for whether to in-place-modify expr to split
einsums (default False).
Returns:
A set of ExprNodes representing sufficient statistics functions of free_var
(but possibly containing multiple nodes that represent the same expression).
"""
if isinstance(expr, ConstExpr):
return set()
elif isinstance(expr, GraphExpr):
var_node = expr.free_vars.get(free_var) or expr.free_vars.values()[free_var]
desc = all_descendants_of(expr.expr_node, var_node)
visited = set()
suff_stats = set()
def collect_suff_stats(node):
visited.add(node)
lvl = hierarchy_level[node.fun]
if lvl == NodeTypes.OTHER:
suff_stats.add(node)
elif lvl == NodeTypes.EINSUM and sum(p in desc for p in node.parents) > 1:
if split_einsums:
argnums = [i for i, a in enumerate(node.args[1:])
if isinstance(a, ExprNode) and a in desc]
potential_node, stat_node = split_einsum_node(node, argnums)
_mutate_node(node, potential_node) # mutates node and hence expr too
suff_stats.add(stat_node)
else:
suff_stats.add(node)
else:
for p in node.parents:
if p not in visited and p in desc:
collect_suff_stats(p)
collect_suff_stats(expr.expr_node)
return suff_stats
else:
raise TypeError("Can't handle expression type: {}".format(type(expr)))
_einsum_range = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
_einsum_index_set = frozenset(_einsum_range)
def _canonicalize_einsum_formula(formula):
in_formulas, out_formula = split_einsum_formula(formula)
i = len(commonprefix({f for f in in_formulas if f} | {out_formula}))
in_formulas = [in_formula[i:] for in_formula in in_formulas]
out_formula = out_formula[i:]
in_formulas = ['...' + in_formula for in_formula in in_formulas]
out_formula = '...' + out_formula
# Relabel all index names in canonical order.
index_map = defaultdict(iter(_einsum_range).next)
return ''.join(index_map[char] if char in _einsum_index_set else char
for char in '{}->{}'.format(','.join(in_formulas),
out_formula))
def make_zeros(stat):
if stat.__class__ in exp_family_stats:
return stat.__class__(**{name: make_zeros(v)
for name, v in stat._asdict().iteritems()})
elif isinstance(stat, tuple):
return tuple([make_zeros(item) for item in stat])
elif isinstance(stat, dict):
return {k: make_zeros(v) for k, v in stat.iteritems()}
else:
return np.zeros_like(stat)
# TODO(mattjj): revise to use inline_expr and replace_node_with_expr
def marginalize(log_joint_fun, argnum, support, *args):
new_log_joint_fun, log_normalizers, stats_funs, _ = (
statistic_representation(log_joint_fun, args, (support,), (argnum,)))
log_normalizer, stat_fun = log_normalizers[0], stats_funs[0]
stat_zeros = make_zeros(stat_fun(args[argnum]))
def marginalized_log_prob(*new_args):
new_args = new_args[:argnum] + (stat_zeros,) + new_args[argnum:]
log_joint = new_log_joint_fun(*new_args)
natural_parameters = grad_namedtuple(new_log_joint_fun, argnum)(*new_args)
log_normalizer_val = log_normalizer(natural_parameters)
return log_joint + log_normalizer_val
return marginalized_log_prob
def complete_conditional(log_joint_fun, argnum, support, *args):
"""Infers tractable complete-conditional distributions from log-joints.
Args:
log_joint_fun: A callable that returns the log-joint probability of its
arguments under some model.
argnum: Integer position of the argument to log_joint_fun whose
complete conditional we want. For example, if argnum == 1 and
log_joint_fun(x, y, z) is the joint log-probability log p(x, y,
z), then this function will try to return p(y | x, z).
support:
*args: Arguments to log_joint_fun. These are needed for the tracer.
Returns:
conditional_factory: A callable that takes the same args as
log_joint_fun() and returns the complete conditional as a frozen
scipy.stats distribution.
"""
# TODO(mhoffman): Make it possible to pass multiple argnums and
# return multiple conditionals.
new_log_joint_fun, log_normalizers, stats_funs, distbns = (
statistic_representation(log_joint_fun, args, (support,), (argnum,)))
log_normalizer, stat_fun, distbn = (
log_normalizers[0], stats_funs[0],distbns[0])
stat_zeros = make_zeros(stat_fun(args[argnum]))
def conditional_factory(*new_args):
new_args = new_args[:argnum] + (stat_zeros,) + new_args[argnum:]
natural_parameters = grad_namedtuple(new_log_joint_fun, argnum)(*new_args)
return distbn(natural_parameters)
return conditional_factory
def statistic_representation(log_joint_fun, args, supports, argnums=None):
if argnums is None:
argnums = range(len(args))
# TODO(mattjj): add optimization to not always split the einsum node
expr = _split_einsum_stats(canonicalize(make_expr(log_joint_fun, *args)))
names = [expr.free_vars.keys()[argnum] for argnum in argnums]
stats_nodes = [find_sufficient_statistic_nodes(expr, name) for name in names]
stats_nodes, log_normalizers, distbns = (
find_distributions(stats_nodes, supports))
new_log_joint_fun = _make_stat_log_joint(expr, argnums, stats_nodes, supports)
make_stat_fun = (lambda name, nodes: lambda arg:
eval_node(nodes, expr.free_vars, {name: arg}))
stats_funs = map(make_stat_fun, names, stats_nodes)
return new_log_joint_fun, log_normalizers, stats_funs, distbns
def make_initializers(args, neg_energy, normalizers, stats_funs):
stats_vals = [stat_fun(arg) for stat_fun, arg in zip(stats_funs, args)]
make_nat_init = (lambda i: lambda scale=1.:
make_vjp(neg_energy, i)(*stats_vals)[0](scale))
natural_initializers = map(make_nat_init, range(len(normalizers)))
make_mean_init = (lambda (i, normalizer): lambda scale=1.:
grad(normalizer)(make_vjp(neg_energy, i)(*stats_vals)[0](scale)))
mean_initializers = map(make_mean_init, enumerate(normalizers))
return natural_initializers, mean_initializers
def _split_einsum_stats(expr):
expr = remake_expr(expr) # copy to avoid mutating expr
for name in expr.free_vars:
find_sufficient_statistic_nodes(expr, name, split_einsums=True) # mutates
return remake_expr(expr) # common-subexpression elimination
def flat_dict(stats):
stats_dict = {}
def add_to_dict(item, name_so_far):
if item.__class__ in exp_family_stats:
for subname, subitem in item._asdict().iteritems():
add_to_dict(subitem, name_so_far + '_' + subname)
elif isinstance(item, tuple) or isinstance(item, list):
for subname, subitem in enumerate(item):
add_to_dict(subitem, name_so_far + '_' + str(subname))
elif isinstance(item, dict):
for subname, subitem in item.iteritems():
add_to_dict(subitem, name_so_far + '_' + str(subname))
elif item is not None:
stats_dict[name_so_far] = item
add_to_dict(stats, '')
return stats_dict
def _make_stat_log_joint(expr, argnums, stats_nodes, supports):
names = tuple(expr.free_vars.keys())
g_expr = extract_superexpr(expr, flat_dict(stats_nodes))
def construct_env(args):
env = {name: arg for i, (name, arg)
in enumerate(zip(names, args)) if i not in argnums}
flat_stats_dict = flat_dict([args[argnum] for argnum in argnums])
return dict(env, **flat_stats_dict)
g_raw = lambda *args: eval_expr(g_expr, construct_env(args))
g = make_fun(g_raw, name='neg_energy', varnames=names)
return g
def make_fun(fun, **kwargs):
code = fun.func_code
attr_names = ['argcount', 'nlocals', 'stacksize', 'flags', 'code',
'consts', 'names', 'varnames', 'filename', 'name',
'firstlineno', 'lnotab', 'freevars', 'cellvars']
new_code = CodeType(*(kwargs.get(name, getattr(code, 'co_' + name))
for name in attr_names))
return FunctionType(new_code, fun.func_globals, closure=fun.func_closure)
def split_einsum_node(node, stat_argnums, canonicalize=True):
"""Pushes part of an einsum computation up a level in the graph.
Args:
node: The einsum ExprNode to break up. Must contract to a scalar.
stat_argnums: Which non-formula arguments to push out of `node`.
Returns:
potential_node: A new einsum ExprNode that computes the same
function as `node`, but only indirectly depends on the arguments
pointed to by `stat_argnums` through the newly created
`stat_node`.
stat_node: A new einsum ExprNode that depends directly on the
arguments pointed to by `stat_argnums`, and is used as an argument
to `potential_node`.
Examples:
```
stat_argnums == [2, 3]:
einsum('ij,ik,j,k->', X, X, beta, beta)
=>
einsum('ij,ik,jk->', X, X, einsum('j,k->jk', beta, beta))
stat_argnums == [0, 1]:
einsum('...ab,...ab,,a->', x, x, -0.5, tau)
=>
einsum('...ab,,a->', einsum('...ab,...ab->...ab', x, x), -0.5, tau)
```
"""
formula = node.args[0]
assert isinstance(formula, str), "Must use string-formula form of einsum."
in_formulas, out_formula = split_einsum_formula(formula)
in_formulas = [formula.lstrip('...') for formula in in_formulas]
out_formula = out_formula.lstrip('...')
assert not out_formula, "Must contract to a scalar."
param_argnums = [i for i, _ in enumerate(in_formulas)
if i not in stat_argnums]
stat_inputs = ','.join(in_formulas[i] for i in stat_argnums)
param_inputs = ','.join(in_formulas[i] for i in param_argnums)
stat_indexes = ''.join(OrderedDict.fromkeys(stat_inputs.replace(',', '')).keys())
stat_formula = '{}->{}'.format(stat_inputs, stat_indexes)
if param_argnums:
pot_formula = '{},{}->'.format(param_inputs, stat_indexes)
else:
pot_formula = '{}->'.format(stat_indexes)
if canonicalize:
stat_formula = _canonicalize_einsum_formula(stat_formula)
pot_formula = _canonicalize_einsum_formula(pot_formula)
stat_node = make_node(np.einsum,
[stat_formula] + [node.args[i+1] for i in stat_argnums],
node.kwargs)
pot_node = make_node(np.einsum,
[pot_formula] + [node.args[i+1] for i in param_argnums]
+ [stat_node],
node.kwargs)
return pot_node, stat_node
def grad_namedtuple(fun, argnum=0):
assert type(argnum) is int
def gradfun(*args):
args = list(args)
args[argnum], unflatten = _flatten_namedtuple(args[argnum])
flat_fun = lambda *args: fun(*subvals(args, [(argnum, unflatten(args[argnum]))]))
return unflatten(grad(flat_fun, argnum)(*args))
return gradfun
def _flatten_namedtuple(x):
try:
return tuple(x), lambda tup: type(x)(*tup)
except AttributeError:
return x, lambda x: x
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pattern matcher for computation graphs.
See //learning/brain/contrib/kfac/.../tensormatch/graph_matcher.py for more.
The grammar for the pattern language implemented in this file is:
pattern ::= element | choice | list | internal_node | negated_pattern
patterns ::= pattern, patterns | ()
element ::= ('?', name, restrictions) | ('?', name, restrictions, binding)
name ::= PYTHON_STRING
restrictions ::= PYTHON_FUNCTION, restrictions | ()
binding ::= PYTHON_FUNCTION
choice ::= ('?:choice', patterns)
list ::= ('List', list_elements)
list_elements ::= list_element, list_elements | ()
list_element ::= pattern | star
star ::= ('??', name, pattern)
internal_node ::= (pattern, input_constraints)
input_constraints ::= list_elements
negated_pattern ::= ('?:not', pattern)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import inspect
import itertools
import operator
## graph interface (otherwise the logic is generic to any graph)
parents = operator.attrgetter('args')
## utilities
def identity(x): return x
def _any(itr):
for val in itr:
if val: return val
return False
def _all(itr):
any_iterations = False
for val in itr:
any_iterations = True
if not val: return val
return val if any_iterations else True
def is_seq(x): return isinstance(x, (tuple, list))
def is_empty_seq(x): return is_seq(x) and not bool(x)
def is_pair(x): return is_seq(x) and len(x) > 0
def is_thunk(x):
if callable(x):
spec = inspect.getargspec(x)
num_free_args = len(set(spec.args)) - len(set(spec.defaults or {}))
return num_free_args == 0
return False
Literal = collections.namedtuple('Literal', ['val'])
Literal.__hash__ = lambda self: id(self.val)
def _singleton(elt):
try:
return {elt}
except TypeError:
return {Literal(elt)}
def _set(*elts): return reduce(operator.or_, map(_singleton, elts))
## define the syntax of the pattern language
is_pat = is_pair
def is_element_pattern(pat): return is_pair(pat) and pat[0] == '?'
def element_name(pat): return pat[1]
def element_restrictions(pat): return pat[2]
def element_binding(pat): return pat[3] if pat[3:] else identity
def is_choice_pattern(pat): return is_pair(pat) and pat[0] == '?:choice'
def choice_alternatives(pat): return pat[1:]
def is_list_pattern(pat): return is_pair(pat) and pat[0] == 'List'
def list_elements(pat): return pat[1:]
# star matchers are a special form that only occurrs within list patterns,
# and their matching is handled inside match_list
def is_star_matcher(matcher):
return is_pair(matcher) and matcher[0] == '??' and len(matcher) == 4
def is_not_pattern(pat): return is_pair(pat) and pat[0] == '?:not'
def negated_pattern(pat): return pat[1]
def is_negated_pattern(pat): return pat[1]
def is_noconsume_pattern(pat): return is_pat(pat) and pat[0] == '?:noconsume'
def is_internal_node_pattern(pat): return is_pair(pat) and is_pair(pat[0])
## constructors for pattern-matching combinators
def match_eqv(pattern):
def eqv_match(data, bindings, consumed, succeed):
return data == pattern and succeed(bindings, consumed | _singleton(data))
return eqv_match
def match_noconsume(data, bindings, consumed, succeed): # pylint: disable=unused-argument
return succeed(bindings, consumed)
def match_element(name, restrictions, binding):
def element_match(data, bindings, consumed, succeed):
consumed |= _singleton(data)
if _all(restriction(data) for restriction in restrictions):
if not name:
return succeed(bindings, consumed)
elif name in bindings:
return bindings[name] == binding(data) and succeed(bindings, consumed)
return succeed(dict(bindings, **{name: binding(data)}), consumed)
return False
return element_match
def match_choice(*match_combinators):
def choice_match(data, bindings, consumed, succeed):
return _any(matcher(data, bindings, consumed, succeed)
for matcher in match_combinators)
return choice_match
def match_list(*match_combinators):
def list_match(data, bindings, consumed, succeed):
return _list_match(data, match_combinators, bindings, consumed, succeed)
def _list_match(data, matchers, bindings, consumed, succeed, carried=()):
def match_first_then_rest(combinator, datum):
return combinator(datum, bindings, consumed, match_subsequent_elements)
def match_subsequent_elements(bindings, consumed):
return _list_match(data[1:], matchers[1:], bindings, consumed, succeed)
def try_star(star_matcher):
_, var_name, submatcher, accumulate = star_matcher
bind = lambda val: dict(bindings, **({var_name: val} if var_name else {}))
# if the name is already bound, check that we have a segment here that's
# consistent with it
if var_name in bindings:
n = len(bindings[var_name])
return (tuple(bindings[var_name]) == tuple(data[:n])
and _list_match(data[n:], matchers[1:], bindings, consumed,
succeed))
def accumulate_back(new_bindings, bindings):
accumulated = {k:bindings.get(k, ()) + (new_bindings[k],) for k in accumulate}
return dict(new_bindings, **accumulated)
def alternatives():
# if the data list is empty and there are no other matchers, we match
if not data and not matchers:
yield succeed(bind(data), consumed | _set(data))
# try matching nothing more, proceed with the rest of the non-empty list
yield _list_match(data[0:], matchers[1:],
bind(carried), consumed | _set(carried), succeed)
# if the data is not empty, try consuming one element *without* using up
# this star matcher
if data:
subbindings = {k:v for k, v in bindings.iteritems() if k not in accumulate}
yield submatcher(data[0], subbindings, consumed,
lambda new_bindings, consumed: _list_match(
data[1:], matchers, accumulate_back(new_bindings, bindings),
consumed, succeed, carried = carried + (data[0],)))
return _any(alternatives())
if is_empty_seq(matchers) and is_empty_seq(data):
return succeed(bindings, consumed)
if is_pair(matchers):
if is_star_matcher(matchers[0]):
return try_star(matchers[0])
else:
return is_pair(data) and match_first_then_rest(matchers[0], data[0])
return False
return list_match
def match_not(match_combinator):
def not_match(data, bindings, consumed, succeed):
return (not match_combinator(data, bindings, set(),
lambda bindings, _: True)
and succeed(bindings, consumed))
return not_match
def match_internal(*match_combinators):
expanded_matcher = match_list(*match_combinators)
def internal_match(data, bindings, consumed, succeed):
try:
expanded = tuple(itertools.chain([data], parents(data)))
except:
return False
return expanded_matcher(expanded, bindings, consumed, succeed)
return internal_match
## parsing the pattern language into compositions of combinators
class PatternEvaluator(object):
def __init__(self, default_operation=None):
self.default_operation = default_operation
self.handlers = []
def defhandler(self, predicate, handler):
self.handlers.append((predicate, handler))
def __call__(self, pat):
for predicate, handler in self.handlers:
if predicate(pat):
return handler(pat)
if self.default_operation:
return self.default_operation(pat)
raise ValueError
make_combinators = PatternEvaluator(match_eqv)
make_combinators.defhandler(
is_element_pattern,
lambda pat: match_element(element_name(pat), element_restrictions(pat),
element_binding(pat)))
make_combinators.defhandler(
is_list_pattern,
lambda pat: match_list(*map(make_combinators, list_patterns(pat))))
make_combinators.defhandler(
is_star_matcher,
lambda pat: (pat[0], pat[1], make_combinators(pat[2]), pat[3]))
make_combinators.defhandler(
is_choice_pattern,
lambda pat: match_choice(*map(make_combinators, choice_alternatives(pat))))
make_combinators.defhandler(
is_not_pattern,
lambda pat: match_not(make_combinators(negated_pattern(pat))))
make_combinators.defhandler(
is_noconsume_pattern,
lambda pat: match_noconsume)
make_combinators.defhandler(
is_internal_node_pattern,
lambda pat: match_internal(*map(make_combinators, pat)))
## utility function so the patterns require fewer parentheses
def expand_syntax(pat):
def is_thunk(x):
if callable(x):
spec = inspect.getargspec(x)
num_free_args = len(spec.args) - len(spec.defaults or {})
return num_free_args == 0
return False
while is_thunk(pat):
pat = pat()
if isinstance(pat, (tuple, list)):
return type(pat)(map(expand_syntax, pat))
return pat
## main matcher interface functions
def matcher(pattern):
combinators = make_combinators(expand_syntax(pattern))
def match(node):
return combinators(node, {}, set(), lambda bindings, _: bindings or True)
return match
def all_matcher(pattern):
combinators = make_combinators(expand_syntax(pattern))
results = []
def all_matches(node):
combinators(node, {}, set(),
lambda bindings, _: results.append(bindings or True))
return results
return all_matches
def matcher_with_consumed(pattern):
combinators = make_combinators(expand_syntax(pattern))
def match(node):
return combinators(node, {}, set(),
lambda bindings, consumed: (bindings, consumed))
return match
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import autograd.numpy as np
from autograd import grad
from collections import OrderedDict, defaultdict
from itertools import product
from itertools import chain
from autoconj import pgm
from autoconj.tracers import logdet
from autoconj.exponential_families import (
init_suffstat, StructuredNormalSuffStat, StructuredCategoricalSuffStat)
def find_argmax(natparam_grad, num_nodes):
"""Given the gradient of tree_categorical_maximum w.r.t. the natural
parameters, finds the argmax of the tree categorical's log-joint."""
not_found = set(range(num_nodes)) # haven't found the argmax for these nodes
argmax = [0 for _ in range(num_nodes)]
factor_iter = chain(natparam_grad.single_onehot_xis.iteritems(),
natparam_grad.joint_onehot_xis.iteritems())
while not_found:
factor, param = factor_iter.next()
if not_found.intersection(set(factor)):
nonzero = np.nonzero(param)
for i, node in enumerate(factor):
argmax[node] = int(nonzero[i][0])
not_found.discard(node)
return argmax
def _add_diag(tau, J):
return J + np.einsum('...i,j,ij->...ij', tau, np.ones(tau.shape[-1]),
np.eye(tau.shape[-1]))
def make_struct_normal_natparam(factors, sizes, single_covars=True,
single_diags=True, means=True,
xi_xjtrs=True, xi_times_xjs=True):
"""Makes random natural parameter values for a structured normal distribution,
given which factors are present in the graphical model.
"""
natparam = init_suffstat(StructuredNormalSuffStat)
if not single_covars and not single_diags:
assert False
for factor in factors:
if len(factor) == 1:
node, node_size = factor[0], sizes[factor[0]]
if means:
natparam.xis[(node,)] = np.random.randn(node_size)
if single_covars:
sqrtJ = np.random.randn(node_size, 2*node_size)
halfminusJ = -0.5 * np.dot(sqrtJ, sqrtJ.T)
natparam.xi_xitrs[(node,)] = halfminusJ
if single_diags:
halfminustau = -0.5 * np.exp(np.random.randn(node_size))
natparam.xi_squareds[(node,)] = halfminustau
else:
v1, v2 = factor[0], factor[1]
size1, size2 = sizes[factor[0]], sizes[factor[1]]
if xi_xjtrs:
natparam.xi_xjtrs[(v1,v2)] = np.random.randn(size1, size2)
if size1 == size2 and xi_times_xjs:
natparam.xi_times_xjs[(v1, v2)] = np.random.randn(size1)
return natparam
def make_struct_categorical_natparam(factors, sizes):
natparam = init_suffstat(StructuredCategoricalSuffStat)
for factor in factors:
factor_sizes = [sizes[node] for node in factor]
if len(factor) > 1:
natparam.joint_onehot_xis[factor] = np.random.randn(*factor_sizes)
else:
natparam.single_onehot_xis[factor] = np.random.randn(*factor_sizes)
return natparam
def actual_categorical_log_normalizer(natparam, sizes):
normalizer = 0
for x in product(*[np.arange(size) for size in sizes]):
logp_x = 0
for factor, param in chain(natparam.single_onehot_xis.iteritems(),
natparam.joint_onehot_xis.iteritems()):
idx = tuple([x[node] for node in factor])
logp_x += param[idx]
normalizer += np.exp(logp_x)
return np.log(normalizer)
def actual_normal_log_normalizer(natparam, factors, sizes):
def make_dense_precision_matrix(natparam, sizes):
dims = [sum(sizes[:n]) for n in range(len(sizes)+1)]
prec = np.zeros((dims[-1], dims[-1]))
for factor, minusJ in natparam.xi_xjtrs.iteritems():
node1, node2 = factor
prec[dims[node1]:dims[node1+1], dims[node2]:dims[node2+1]] += -minusJ
prec[dims[node2]:dims[node2+1], dims[node1]:dims[node1+1]] += -minusJ.T
for factor, minustau in natparam.xi_times_xjs.iteritems():
node1, node2 = factor
prec[dims[node1]:dims[node1+1], dims[node2]:dims[node2+1]] += \
-pgm.diag(minustau)
prec[dims[node2]:dims[node2+1], dims[node1]:dims[node1+1]] += \
-pgm.diag(minustau).T
for factor, halfminusJ in natparam.xi_xitrs.iteritems():
node, = factor
prec[dims[node]:dims[node+1], dims[node]:dims[node+1]] += -2*halfminusJ
for factor, halfminustau in natparam.xi_squareds.iteritems():
node, = factor
prec[dims[node]:dims[node+1], dims[node]:dims[node+1]] += \
-2*pgm.diag(halfminustau)
return prec
prec = make_dense_precision_matrix(natparam, sizes)
inv_prec = np.linalg.inv(prec)
h = np.concatenate([natparam.xis.get((n,), (np.zeros(sizes[n]),))
for n in range(len(sizes))])
log_normalizer = 0.5 * np.dot(h, np.dot(inv_prec, h))
log_normalizer -= 0.5*logdet(prec) + 0.5*sum(sizes)*np.log(2*np.pi)
return log_normalizer
def actual_categorical_maximum(natparam, sizes):
max_logp = -float('inf')
argmax = None
for x in product(*[np.arange(size) for size in sizes]):
logp_x = struct_categorical_logpdf(x, natparam)
if logp_x > max_logp:
max_logp = logp_x
argmax = x
return max_logp, argmax
def struct_categorical_logpdf(x, natparam):
logp_x = 0
for factor, param in chain(natparam.single_onehot_xis.iteritems(),
natparam.joint_onehot_xis.iteritems()):
idx = tuple([x[node] for node in factor])
logp_x += param[idx]
return logp_x
class PgmTest(absltest.TestCase):
def testNormalTreeLogNormalizerChain(self):
T = 10
factors = [(n,) for n in range(T)] + [(n, n+1) for n in range(T-1)]
sizes = np.random.choice(10, size=(T,))
natparam = make_struct_normal_natparam(factors, sizes)
elim_order = range(T)
tree_normal_log_normalizer = pgm.make_tree_normal_log_normalizer(elim_order)
actual_log_normalizer = actual_normal_log_normalizer(natparam, factors,
sizes)
log_normalizer = tree_normal_log_normalizer(natparam)
self.assertTrue(np.allclose(actual_log_normalizer, log_normalizer))
def testNormalTreeLogNormalizerWheel(self):
num_nodes = 10
factors = [(n,) for n in range(num_nodes)] +\
[(0, n) for n in range(1, num_nodes)]
sizes = np.random.choice(10, size=(num_nodes,))
natparam = make_struct_normal_natparam(factors, sizes)
elim_order = range(1, num_nodes) + [0]
tree_normal_log_normalizer = pgm.make_tree_normal_log_normalizer(elim_order)
actual_log_normalizer = actual_normal_log_normalizer(natparam, factors,
sizes)
log_normalizer = tree_normal_log_normalizer(natparam)
self.assertTrue(np.allclose(actual_log_normalizer, log_normalizer))
def testNormalTreeLogNormalizerGeneric(self):
factors = [(n,) for n in range(10)]
factors += [(0,1), (0,8), (1,4), (1,5), (1,2), (2,3), (2,6), (2,7), (2,9)]
sizes = np.random.choice(9, size=(10,)) + 1
natparam = make_struct_normal_natparam(factors, sizes)
elim_order = [9, 4, 5, 6, 7, 3, 2, 1, 8, 0]
tree_normal_log_normalizer = pgm.make_tree_normal_log_normalizer(elim_order)
actual_log_normalizer = actual_normal_log_normalizer(natparam, factors,
sizes)
log_normalizer = tree_normal_log_normalizer(natparam)
self.assertTrue(np.allclose(actual_log_normalizer, log_normalizer))
def testCategoricalTreeLogNormalizerSimple(self):
factors = [(0,), (1,), (2,), (0,1,2)]
sizes = [2, 3, 4]
natparam = make_struct_categorical_natparam(factors, sizes)
elim_order = [0, 1, 2]
categorical_tree_log_normalizer =\
pgm.make_tree_categorical_log_normalizer(elim_order)
actual_log_normalizer = actual_categorical_log_normalizer(natparam,
sizes=(2,3,4))
log_normalizer = categorical_tree_log_normalizer(natparam)
self.assertTrue(np.allclose(actual_log_normalizer, log_normalizer))
def testCategoricalTreeLogNormalizerGeneric(self):
factors = [(0,1,2,3), (1,4,5), (5,), (2,), (3,6,7), (0,8)]
sizes = [2, 3, 4, 2, 3, 4, 2, 3, 4]
natparam = make_struct_categorical_natparam(factors, sizes)
elim_order = [5, 6, 8, 2, 7, 4, 3, 1, 0]
categorical_tree_log_normalizer =\
pgm.make_tree_categorical_log_normalizer(elim_order)
actual_log_normalizer = actual_categorical_log_normalizer(natparam, sizes)
log_normalizer = categorical_tree_log_normalizer(natparam)
self.assertTrue(np.allclose(actual_log_normalizer, log_normalizer))
# def testCategoricalTreeFindMaximumSimple(self):
# factors = [(0,), (1,), (2,), (0,1,2)]
# sizes = [2, 3, 4]
# natparam = make_struct_categorical_natparam(factors, sizes)
# elim_order = [0, 1, 2]
# tree_categorical_maximum = pgm.make_tree_categorical_maximum(elim_order)
# actual_maximum, actual_argmax = actual_categorical_maximum(natparam, sizes)
# maximum = tree_categorical_maximum(natparam)
# argmax = find_argmax(grad(tree_categorical_maximum)(natparam), num_nodes=3)
# self.assertTrue(np.allclose(actual_maximum, maximum))
# self.assertTrue(np.allclose(actual_maximum,
# struct_categorical_logpdf(argmax, natparam)))
# def testCategoricalTreeFindMaximumGeneric(self):
# factors = [(0,1,2,3), (1,4,5), (5,), (2,), (3,6,7), (0,8)]
# sizes = [2, 3, 4, 2, 3, 4, 2, 3, 4]
# natparam = make_struct_categorical_natparam(factors, sizes)
# elim_order = [5, 6, 8, 2, 7, 4, 3, 1, 0]
# tree_categorical_maximum = pgm.make_tree_categorical_maximum(elim_order)
# actual_maximum, actual_argmax = actual_categorical_maximum(natparam, sizes)
# maximum = tree_categorical_maximum(natparam)
# argmax = find_argmax(grad(tree_categorical_maximum)(natparam), num_nodes=9)
# self.assertTrue(np.allclose(actual_maximum, maximum))
# self.assertTrue(np.allclose(actual_maximum,
# struct_categorical_logpdf(argmax, natparam)))
if __name__ == '__main__':
absltest.main()
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from autoconj import graph_util
from collections import defaultdict, OrderedDict
def check_elimination_ordering(ordering, depths):
depth_list = [depths[node] for node in ordering]
for i in range(len(depth_list)):
if not all([depth_list[i] >= depth for depth in depth_list[i+1:]]):
return False
return True
def add_edges(pgm, edge_list):
for v1, v2 in edge_list:
if v1 not in pgm:
pgm[v1] = set()
if v2 not in pgm:
pgm[v2] = set()
pgm[v1].add(v2)
pgm[v2].add(v1)
T = 10
class GraphUtilTest(absltest.TestCase):
def testFindChainFromEnd(self):
# Starting at the end of a chain.
pgm = OrderedDict([(0, set([1]))] +
[(t, set([t-1, t+1])) for t in range(1, T-1)] +
[(T-1, set([T-2]))])
chain_list = graph_util.find_chain(pgm)
self.assertEqual(chain_list, range(T))
def testFindChainFromMiddle(self):
# Starting in the middle of a chain.
mid = T // 2
pgm = OrderedDict([(t, set([t-1, t+1])) for t in range(mid, T-1)] +
[(0, set([1])), (T-1, set([T-2]))] +
[(t, set([t-1, t+1])) for t in range(1, mid)])
chain_list = graph_util.find_chain(pgm)
self.assertEqual(chain_list, list(reversed(range(T))))
def testFindChainCycle(self):
# Cycle graph.
pgm = OrderedDict([(0, set([1, T-1]))] +
[(t, set([t-1, t+1])) for t in range(1, T-1)] +
[(T-1, set([T-2, 0]))])
self.assertFalse(graph_util.find_chain(pgm))
def testFindChainDisconnectedGraph(self):
# Disconnected graph which includes chain.
pgm = OrderedDict([(0, set([1, T-1]))] +
[(t, set([t-1, t+1])) for t in range(1, T-1)] +
[(T-1, set([T-2, 0]))] +
[(T, set([]))])
self.assertFalse(graph_util.find_chain(pgm))
def testFindChainConnectedGraph(self):
# Connected graph which is not a chain.
pgm = OrderedDict([(0, set([1])), (1, set([0, 2, 3])),
(2, set([1])), (3, set([1]))])
self.assertFalse(graph_util.find_chain(pgm))
def testFindTreeChainFromEnd(self):
# Starting at the end of a chain.
pgm = OrderedDict([(0, set([1]))] +
[(t, set([t-1, t+1])) for t in range(1, T-1)] +
[(T-1, set([T-2]))])
depths = dict(zip(range(T), range(T)))
elimination_order = graph_util.find_tree(pgm)
self.assertTrue(check_elimination_ordering(elimination_order, depths))
def testFindTreeChainFromMiddle(self):
mid = T // 2
pgm = OrderedDict([(t, set([t-1, t+1])) for t in range(mid, T-1)] +
[(0, set([1])), (T-1, set([T-2]))] +
[(t, set([t-1, t+1])) for t in range(1, mid)])
depths = {t: abs(mid-t) for t in range(T)}
elimination_order = graph_util.find_tree(pgm)
self.assertTrue(check_elimination_ordering(elimination_order, depths))
def testFindTreeWheel(self):
T = 3
pgm = OrderedDict()
add_edges(pgm, [(0, t) for t in range(1, T)])
depths = dict([(0, 0)] + [(t, 1) for t in range(1, T)])
elimination_order = graph_util.find_tree(pgm)
self.assertTrue(check_elimination_ordering(elimination_order, depths))
def testFindTreeGeneric(self):
pgm = OrderedDict()
# Children:
# (root) 0: 1, 2
# 1: 3, 4, 5
# 2: 7
# 3: 6
# 4: 9, 10
# 5:
# 6:
# 7: 8
# 8:
# 9:
# 10:
add_edges(pgm, [(0, 1), (0, 2), (1, 3), (1, 4), (1, 5), (3, 6),
(4, 9), (4, 10), (1, 5), (2, 7), (7, 8)])
depths = {0:0, 1:1, 2:1, 3:2, 4:2, 5:2, 6:3, 7:2, 8:3, 9:3, 10:3}
elimination_order = graph_util.find_tree(pgm)
self.assertTrue(check_elimination_ordering(elimination_order, depths))
def testFindTreeLoop(self):
pgm = OrderedDict()
add_edges(pgm, [(0, 1), (0, 2), (1, 3), (1, 4), (1, 5), (3, 6),
(4, 9), (4, 10), (1, 5), (2, 7), (7, 8)])
add_edges(pgm, [(8, 0)])
elimination_order = graph_util.find_tree(pgm)
self.assertFalse(elimination_order)
if __name__ == '__main__':
absltest.main()
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import autograd.numpy as np
from scipy import stats
from autoconj import log_probs
class LogProbTest(absltest.TestCase):
def testCategoricalGenLogProb(self):
x = np.array(2)
x_one_hot = np.array([0, 0, 1])
p = np.array([0.4, 0.25, 0.35])
value = log_probs.categorical_gen_log_prob(x, p=p)
true_value = stats.multinomial.logpmf(x_one_hot, n=1, p=p)
self.assertAlmostEqual(value, true_value)
xs_one_hot = stats.multinomial.rvs(n=1, p=p, size=[10])
xs = np.argmax(xs_one_hot, axis=1)
value = sum([log_probs.categorical_gen_log_prob(xs[i], p=p)
for i in range(xs.shape[0])])
true_value = sum([stats.multinomial.logpmf(xs_one_hot[i], n=1, p=p)
for i in range(xs_one_hot.shape[0])])
self.assertAlmostEqual(value, true_value)
def testDirichletGenLogProb(self):
x = np.array([0.4, 0.25, 0.35])
alpha = np.array([2.12, 0.54, 1.6])
value = log_probs.dirichlet_gen_log_prob(x, alpha=alpha)
true_value = stats.dirichlet.logpdf(x, alpha=alpha)
self.assertAlmostEqual(value, true_value)
xs = stats.dirichlet.rvs(alpha=alpha, size=[10])
value = sum([log_probs.dirichlet_gen_log_prob(xs[i], alpha=alpha)
for i in range(xs.shape[0])])
true_value = sum([stats.dirichlet.logpdf(xs[i], alpha=alpha)
for i in range(xs.shape[0])])
self.assertAlmostEqual(value, true_value)
def testMultinomialGenLogProb(self):
x = np.array([0, 0, 1])
n = 1
p = np.array([0.4, 0.25, 0.35])
value = log_probs.multinomial_gen_log_prob(x, n=n, p=p)
true_value = stats.multinomial.logpmf(x, n=n, p=p)
self.assertAlmostEqual(value, true_value)
xs = stats.multinomial.rvs(n=n, p=p, size=[10])
value = sum([log_probs.multinomial_gen_log_prob(xs[i], n=n, p=p)
for i in range(xs.shape[0])])
true_value = sum([stats.multinomial.logpmf(xs[i], n=n, p=p)
for i in range(xs.shape[0])])
self.assertAlmostEqual(value, true_value)
def testNormGenLogProb(self):
x = 2.3
loc = 0.3
scale = 1.0
value = log_probs.norm_gen_log_prob(x, loc=loc, scale=scale)
true_value = stats.norm.logpdf(x, loc=loc, scale=scale)
self.assertAlmostEqual(value, true_value)
x = stats.norm.rvs(loc=loc, scale=scale, size=[10])
value = log_probs.norm_gen_log_prob(x, loc=loc, scale=scale)
true_value = sum(stats.norm.logpdf(x, loc=loc, scale=scale))
self.assertAlmostEqual(value, true_value)
if __name__ == '__main__':
np.random.seed(3251)
absltest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from itertools import chain
import autograd.numpy as np
import time
from absl.testing import absltest
from autoconj.conjugacy import complete_conditional, marginalize
from autoconj.tracers import one_hot
from autoconj.util import SupportTypes
from pgm_test import (actual_categorical_log_normalizer,
actual_normal_log_normalizer,
make_struct_categorical_natparam,
make_struct_normal_natparam)
def structured_normal_logpdf(x, natparam):
logp = 0
for factor, mean in natparam.xis.iteritems():
logp += np.dot(x[factor[0]], mean)
for factor, halfminusJ in natparam.xi_xitrs.iteritems():
logp += np.dot(x[factor[0]], np.dot(halfminusJ, x[factor[0]]))
for factor, halfminustau in natparam.xi_squareds.iteritems():
logp += np.dot(x[factor[0]], halfminustau*x[factor[0]])
for factor, minusJ in natparam.xi_xjtrs.iteritems():
node1, node2 = factor
logp += np.dot(x[node1], np.dot(minusJ, x[node2]))
for factor, minustau in natparam.xi_times_xjs.iteritems():
node1, node2 = factor
logp += np.dot(x[node1], minustau*x[node2])
return logp
def structured_categorical_logpdf(x, natparam, dim):
logp = 0
alphabet = 'abcdefghijklmnopqrstuvwxyz'
factor_iter = chain(natparam.single_onehot_xis.iteritems(),
natparam.joint_onehot_xis.iteritems())
for factor, param in factor_iter:
factor_idxs = ''.join(alphabet[i] for i in range(len(factor)))
in_formula = ','.join([factor_idxs] +
[alphabet[i] for i in range(len(factor))])
formula = '{}->'.format(in_formula)
logp += np.einsum(formula, param,
*[one_hot(x[node], dim) for node in factor])
return logp
def _condition_and_marginalize(log_joint, argnum, support, *args):
sub_args = args[:argnum] + args[argnum + 1:]
marginalized = marginalize(log_joint, argnum, support, *args)
marginalized_value = marginalized(*sub_args)
conditional_factory = complete_conditional(log_joint, argnum, support, *args)
conditional = conditional_factory(*sub_args)
return conditional, marginalized_value
class ConjugacyPgmTest(absltest.TestCase):
def testNormalChain(self):
n_timesteps = 10
dim = 10
factors = ([(n,) for n in range(n_timesteps)] +
[(n, n+1) for n in range(n_timesteps-1)])
sizes = [dim]*n_timesteps
natparam = make_struct_normal_natparam(factors, sizes)
log_joint = lambda x: structured_normal_logpdf(x, natparam)
x = np.ones((n_timesteps, dim))
start_time = time.time()
conditional, marginalized_value = (
_condition_and_marginalize(log_joint, 0, SupportTypes.REAL, x))
start_time = time.time()
correct_marginalized_value = actual_normal_log_normalizer(natparam, factors,
sizes)
self.assertAlmostEqual(correct_marginalized_value, marginalized_value)
def testNormalGenericTree(self):
factors = [(n,) for n in range(10)]
factors += [(0,1), (0,8), (1,4), (1,5), (1,2), (2,3), (2,6), (2,7), (2,9)]
dim = 10
sizes = [dim]*10
natparam = make_struct_normal_natparam(factors, sizes)
log_joint = lambda x: structured_normal_logpdf(x, natparam)
x = np.ones((10, dim))
conditional, marginalized_value = (
_condition_and_marginalize(log_joint, 0, SupportTypes.REAL, x))
correct_marginalized_value = actual_normal_log_normalizer(natparam, factors,
sizes)
self.assertAlmostEqual(correct_marginalized_value, marginalized_value)
def testCategoricalGenericTree(self):
num_nodes = 9
dim = 2
factors = [(0,1,2,3), (1,4,5), (5,), (2,), (3,6,7), (0,8)]
sizes = [dim]*num_nodes
natparam = make_struct_categorical_natparam(factors, sizes)
log_joint = lambda x: structured_categorical_logpdf(x, natparam, dim)
x = np.random.choice(dim, size=(num_nodes,))
conditional, marginalized_value = (
_condition_and_marginalize(log_joint, 0, SupportTypes.INTEGER, x))
correct_marginalized_value = actual_categorical_log_normalizer(natparam,
sizes)
self.assertAlmostEqual(correct_marginalized_value, marginalized_value,
places=4)
if __name__ == '__main__':
absltest.main()
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from autoconj.canonicalize import canonicalize
from autoconj.conjugacy import (
complete_conditional, find_sufficient_statistic_nodes, marginalize,
split_einsum_node, SupportTypes, statistic_representation,
make_initializers, grad_namedtuple)
from autoconj.exponential_families import batch_dirichlet
from autoconj.tracers import (eval_expr, eval_node, one_hot, print_expr,
make_expr, GraphExpr, logdet)
from autoconj import log_probs
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd import grad
from autograd.scipy import special
from autograd.scipy import misc
from scipy import stats
from absl.testing import absltest
def _match_values(set_1, set_2, close_fn=np.allclose):
"""Checks that there's a match for every element of set_1 in set_2."""
return all(any(close_fn(a, b) for b in set_2) for a in set_1)
def _perfect_match_values(set_1, set_2, close_fn=np.allclose):
"""Checks that there's a perfect matching between set_1 and set_2."""
if len(set_1) == len(set_2):
matches = np.array([[close_fn(a, b) for a in set_1] for b in set_2], int)
return np.all(matches.sum(0) == 1) and np.all(matches.sum(1) == 1)
return False
def _condition_and_marginalize(log_joint, argnum, support, *args):
sub_args = args[:argnum] + args[argnum + 1:]
marginalized = marginalize(log_joint, argnum, support, *args)
marginalized_value = marginalized(*sub_args)
conditional_factory = complete_conditional(log_joint, argnum, support, *args)
conditional = conditional_factory(*sub_args)
return conditional, marginalized_value
class ConjugacyTest(absltest.TestCase):
def testFindSufficientStatisticNodes(self):
def log_joint(x, y, matrix):
# Linear in x: y^T x
result = np.einsum('i,i->', x, y)
# Quadratic form: x^T matrix x
result += np.einsum('ij,i,j->', matrix, x, x)
# Rank-1 quadratic form: (x**2)^T(y**2)
result += np.einsum('i,i,j,j->', x, y, x, y)
# Linear in log(x): y^T log(x)
result += np.einsum('i,i->', y, np.log(x))
# Linear in reciprocal(x): y^T reciprocal(x)
result += np.einsum('i,i->', y, np.reciprocal(x))
# More obscurely linear in log(x): y^T matrix log(x)
result += np.einsum('i,ij,j->', y, matrix, np.log(x))
# Linear in x * log(x): y^T (x * log(x))
result += np.einsum('i,i->', y, x * np.log(x))
return result
n_dimensions = 5
x = np.exp(np.random.randn(n_dimensions))
y = np.random.randn(n_dimensions)
matrix = np.random.randn(n_dimensions, n_dimensions)
env = {'x': x, 'y': y, 'matrix': matrix}
expr = make_expr(log_joint, x, y, matrix)
expr = canonicalize(expr)
sufficient_statistic_nodes = find_sufficient_statistic_nodes(expr, 'x')
suff_stats = [eval_expr(GraphExpr(node, expr.free_vars), env)
for node in sufficient_statistic_nodes]
correct_suff_stats = [x, x.dot(matrix.dot(x)), np.square(x.dot(y)),
np.log(x), np.reciprocal(x), y.dot(x * np.log(x))]
self.assertTrue(_perfect_match_values(suff_stats, correct_suff_stats))
expr = make_expr(log_joint, x, y, matrix)
expr = canonicalize(expr)
sufficient_statistic_nodes = find_sufficient_statistic_nodes(
expr, 'x', split_einsums=True)
suff_stats = [eval_expr(GraphExpr(node, expr.free_vars), env)
for node in sufficient_statistic_nodes]
correct_suff_stats = [x, np.outer(x, x), x * x,
np.log(x), np.reciprocal(x), x * np.log(x)]
self.assertTrue(_match_values(suff_stats, correct_suff_stats))
def testSplitEinsumNode(self):
n_dimensions = 5
x = np.random.randn(n_dimensions)
y = np.random.randn(n_dimensions)
matrix = np.random.randn(n_dimensions, n_dimensions)
env = {'x': x, 'y': y, 'matrix': matrix}
args = (x, y)
f = lambda x, y: np.einsum('i,i->', x, y)
node = make_expr(f, *args)
val = f(*args)
potential_node, stat_node = split_einsum_node(node.expr_node, [0])
self.assertTrue(np.allclose(eval_node(stat_node, node, env), x))
self.assertTrue(np.allclose(eval_node(potential_node, node, env), val))
potential_node, stat_node = split_einsum_node(node.expr_node, [1])
self.assertTrue(np.allclose(eval_node(stat_node, node, env), y))
self.assertTrue(np.allclose(eval_node(potential_node, node, env), val))
potential_node, stat_node = split_einsum_node(node.expr_node, [0, 1])
self.assertTrue(np.allclose(eval_node(stat_node, node, env), x * y))
self.assertTrue(np.allclose(eval_node(potential_node, node, env), val))
args = (x, y)
f = lambda x, y: np.einsum('i,i,i->', x, y, y)
node = make_expr(f, *args)
val = f(*args)
potential_node, stat_node = split_einsum_node(node.expr_node, [1, 2])
self.assertTrue(np.allclose(eval_node(stat_node, node, env), y * y))
self.assertTrue(np.allclose(eval_node(potential_node, node, env), val))
potential_node, stat_node = split_einsum_node(node.expr_node, [0])
self.assertTrue(np.allclose(eval_node(stat_node, node, env), x))
self.assertTrue(np.allclose(eval_node(potential_node, node, env), val))
args = (x,)
f = lambda x: np.einsum('i,i,i->', np.ones_like(x), x, x)
node = make_expr(f, *args)
val = f(*args)
potential_node, stat_node = split_einsum_node(node.expr_node, [1, 2])
self.assertTrue(np.allclose(eval_node(stat_node, node, env), x * x))
self.assertTrue(np.allclose(eval_node(potential_node, node, env), val))
args = (matrix, x, y)
f = lambda matrix, x, y: np.einsum('ij,i,j->', matrix, x, y)
node = make_expr(f, *args)
val = f(*args)
potential_node, stat_node = split_einsum_node(node.expr_node, [1, 2])
self.assertTrue(np.allclose(eval_node(stat_node, node, env),
np.outer(x, y)))
self.assertTrue(np.allclose(eval_node(potential_node, node, env), val))
potential_node, stat_node = split_einsum_node(node.expr_node, [0])
self.assertTrue(np.allclose(eval_node(stat_node, node, env), matrix))
self.assertTrue(np.allclose(eval_node(potential_node, node, env), val))
args = (matrix, x, y)
f = lambda matrix, x, y: np.einsum('i,j,ki,kj->', x, x, matrix, matrix)
node = make_expr(f, *args)
val = f(*args)
potential_node, stat_node = split_einsum_node(node.expr_node, [2, 3])
self.assertTrue(np.allclose(eval_node(stat_node, node, env),
matrix[:, None, :] * matrix[:, :, None]))
self.assertTrue(np.allclose(eval_node(potential_node, node, env), val))
potential_node, stat_node = split_einsum_node(node.expr_node, [0, 1])
self.assertTrue(np.allclose(eval_node(stat_node, node, env),
np.outer(x, x)))
self.assertTrue(np.allclose(eval_node(potential_node, node, env), val))
args = (matrix, x, y)
f = lambda matrix, x, y: np.einsum(',kj,j,ka,a->', -0.5, matrix, x,
matrix, y)
node = make_expr(f, *args)
val = f(*args)
potential_node, stat_node = split_einsum_node(node.expr_node, [2, 4], False)
self.assertEqual(stat_node.args[0], 'j,a->ja')
self.assertTrue(np.allclose(eval_node(potential_node, node, env), val))
potential_node, stat_node = split_einsum_node(node.expr_node, [0, 1, 3], False)
self.assertEqual(stat_node.args[0], ',kj,ka->kja')
self.assertTrue(np.allclose(eval_node(potential_node, node, env), val))
def testConditionAndMarginalizeZeroMeanScalarNormal(self):
def log_joint(x, precision):
return np.einsum(',,,->', -0.5, precision, x, x)
x = np.random.randn()
precision = np.exp(np.random.randn())
conditional, marginalized_value = _condition_and_marginalize(log_joint, 0,
SupportTypes.REAL,
x, precision)
correct_marginalized_value = (-0.5 * np.log(precision)
+ 0.5 * np.log(2. * np.pi))
self.assertAlmostEqual(correct_marginalized_value, marginalized_value)
self.assertEqual(0, conditional.args[0])
self.assertEqual(1. / np.sqrt(precision), conditional.args[1])
def testBatchDirichlet(self):
alpha = np.ones(4)
distribution_list = batch_dirichlet(alpha)
self.assertTrue(np.allclose(alpha, distribution_list.alpha))
alpha = np.ones([4, 3])
distribution_list = batch_dirichlet(alpha)
for i in range(alpha.shape[0]):
self.assertTrue(np.allclose(alpha[i],
distribution_list[i].item(0).alpha))
alpha = np.ones([2, 4, 3])
distribution_list = batch_dirichlet(alpha)
for i in range(alpha.shape[0]):
for j in range(alpha[i].shape[0]):
self.assertTrue(np.allclose(alpha[i, j],
distribution_list[i, j].item(0).alpha))
def testConditionAndMarginalizeScalarNormal(self):
def log_joint(x, mu, precision):
quadratic_term = np.einsum(',,,->', -0.5, precision, x, x)
linear_term = np.einsum(',,->', precision, x, mu)
return quadratic_term + linear_term
x = np.random.randn()
mu = np.random.randn()
precision = np.exp(np.random.randn())
conditional, marginalized_value = (
_condition_and_marginalize(log_joint, 0, SupportTypes.REAL, x, mu,
precision))
correct_marginalized_value = (-0.5 * np.log(precision)
+ 0.5 * mu**2 * precision
+ 0.5 * np.log(2. * np.pi))
self.assertAlmostEqual(correct_marginalized_value, marginalized_value)
self.assertAlmostEqual(mu, conditional.args[0])
self.assertAlmostEqual(1. / np.sqrt(precision), conditional.args[1])
def testConditionAndMarginalizeZeroMeanNormal(self):
def log_joint(x, precision):
return np.einsum(',ij,i,j->', -0.5, precision, x, x)
n_dimensions = 5
x = np.random.randn(n_dimensions)
precision = np.random.randn(n_dimensions, 2 * n_dimensions)
precision = np.dot(precision, precision.T)
conditional, marginalized_value = _condition_and_marginalize(log_joint, 0,
SupportTypes.REAL,
x, precision)
correct_marginalized_value = (-0.5 * np.linalg.slogdet(precision)[1]
+ 0.5 * n_dimensions * np.log(2. * np.pi))
self.assertAlmostEqual(correct_marginalized_value, marginalized_value)
self.assertTrue(np.allclose(np.zeros(n_dimensions), conditional.mean))
self.assertTrue(np.allclose(np.linalg.inv(precision), conditional.cov))
def testConditionAndMarginalizeDiagonalZeroMeanNormal(self):
def log_joint_einsum(x, tau):
return np.einsum(',i,i,i->', -0.5, tau, x, x)
def log_joint_square(x, tau):
return np.sum(-0.5 * tau * x ** 2)
self._test_condition_and_marginalize_diagonal_zero_mean_normal(
log_joint_einsum)
self._test_condition_and_marginalize_diagonal_zero_mean_normal(
log_joint_square)
def _test_condition_and_marginalize_diagonal_zero_mean_normal(self,
log_joint):
n_dimensions = 5
x = np.random.randn(n_dimensions)
tau = np.random.randn(n_dimensions) ** 2
end_node = make_expr(log_joint, x, tau)
end_node = canonicalize(end_node)
conditional, marginalized_value = _condition_and_marginalize(
log_joint, 0, SupportTypes.REAL, x, tau)
correct_marginalized_value = (-0.5 * np.log(tau).sum()
+ 0.5 * n_dimensions * np.log(2. * np.pi))
self.assertAlmostEqual(correct_marginalized_value, marginalized_value)
self.assertTrue(np.allclose(np.zeros(n_dimensions), conditional.args[0]))
self.assertTrue(np.allclose(1. / np.sqrt(tau), conditional.args[1]))
def testConditionAndMarginalizeNormal(self):
def log_joint(x, mu, precision):
quadratic = np.einsum(',ij,i,j->', -0.5, precision, x, x)
linear = np.einsum('ij,i,j->', precision, x, mu)
return linear + quadratic - 3.
n_dimensions = 5
x = np.random.randn(n_dimensions)
mu = np.random.randn(n_dimensions)
precision = np.random.randn(n_dimensions, 2 * n_dimensions)
precision = np.dot(precision, precision.T)
conditional, marginalized_value = (
_condition_and_marginalize(log_joint, 0, SupportTypes.REAL, x, mu,
precision))
correct_marginalized_value = (
-0.5 * np.linalg.slogdet(precision)[1]
+ 0.5 * np.einsum('ij,i,j->', precision, mu, mu)
+ 0.5 * n_dimensions * np.log(2. * np.pi)
- 3.)
self.assertAlmostEqual(correct_marginalized_value, marginalized_value)
self.assertTrue(np.allclose(mu, conditional.mean))
self.assertTrue(np.allclose(np.linalg.inv(precision), conditional.cov))
def testConditionAndMarginalizeDiagonalNormal(self):
def log_joint_einsum(x, mu, tau):
quadratic = np.einsum(',i,i,i->', -0.5, tau, x, x)
linear = np.einsum('i,i,i->', tau, x, mu)
return linear + quadratic - 3.
def log_joint_square(x, mu, tau):
quadratic = np.sum(-0.5 * tau * x ** 2)
linear = np.einsum('i,i,i->', tau, x, mu)
return linear + quadratic - 3.
self._test_condition_and_marginalize_diagonal_normal(log_joint_einsum)
self._test_condition_and_marginalize_diagonal_normal(log_joint_square)
def _test_condition_and_marginalize_diagonal_normal(self, log_joint):
n_dimensions = 5
x = np.random.randn(n_dimensions)
mu = np.random.randn(n_dimensions)
tau = np.random.randn(n_dimensions) ** 2
conditional, marginalized_value = (
_condition_and_marginalize(log_joint, 0, SupportTypes.REAL, x, mu,
tau))
correct_marginalized_value = (-0.5 * np.log(tau).sum()
+ 0.5 * np.einsum('i,i,i->', tau, mu, mu)
+ 0.5 * n_dimensions * np.log(2. * np.pi)
- 3.)
self.assertAlmostEqual(correct_marginalized_value, marginalized_value)
self.assertTrue(np.allclose(mu, conditional.args[0]))
self.assertTrue(np.allclose(1. / np.sqrt(tau), conditional.args[1]))
def testConditionAndMarginalizeGamma(self):
def log_joint(x, a, b):
return np.sum((a - 1) * np.log(x) - b * x)
a = np.random.gamma(1., 1., [3, 4])
b = np.random.gamma(1., 1., 4)
x = np.random.gamma(1., 1., [3, 4])
conditional, marginalized_value = (
_condition_and_marginalize(log_joint, 0, SupportTypes.NONNEGATIVE,
x, a, b))
correct_marginalized_value = np.sum(-a * np.log(b) + special.gammaln(a))
self.assertAlmostEqual(correct_marginalized_value, marginalized_value)
self.assertTrue(np.allclose(a, conditional.args[0]))
self.assertTrue(np.allclose(1. / b, conditional.args[2]))
def testConditionAndMarginalizeBeta(self):
def log_joint(x, a, b):
return np.sum((a - 1) * np.log(x) + (b - 1) * np.log1p(-x))
a = np.random.gamma(1., 1., [3, 4])
b = np.random.gamma(1., 1., 4)
x = np.random.gamma(1., 1., [3, 4])
conditional, marginalized_value = (
_condition_and_marginalize(log_joint, 0, SupportTypes.UNIT_INTERVAL,
x, a, b))
correct_marginalized_value = (special.gammaln(a) + special.gammaln(b) -
special.gammaln(a + b)).sum()
self.assertAlmostEqual(correct_marginalized_value, marginalized_value)
self.assertTrue(np.allclose(a, conditional.args[0]))
self.assertTrue(np.allclose(b, conditional.args[1]))
def testConditionAndMarginalizeDirichlet(self):
def log_joint(x, alpha):
return np.sum((alpha - 1) * np.log(x))
alpha = np.random.gamma(1., 1., [3, 4])
x = np.random.gamma(alpha, 1.)
x /= x.sum(-1, keepdims=True)
conditional, marginalized_value = (
_condition_and_marginalize(log_joint, 0, SupportTypes.SIMPLEX,
x, alpha))
correct_marginalized_value = (special.gammaln(alpha).sum() -
special.gammaln(np.sum(alpha, 1)).sum())
self.assertAlmostEqual(correct_marginalized_value, marginalized_value)
for i in range(alpha.shape[0]):
self.assertTrue(np.allclose(alpha[i], conditional[i].item(0).alpha))
def testConditionAndMarginalizeBernoulli(self):
def log_joint(x, logits):
return np.sum(x * logits)
p = np.random.beta(2., 2., [3, 4])
logit_p = np.log(p) - np.log1p(-p)
# TODO(mhoffman): Without the cast this gives wrong answers due to autograd
# casts. This is scary.
x = (np.random.uniform(size=(8,) + p.shape) < p).astype(np.float32)
conditional, marginalized_value = _condition_and_marginalize(
log_joint, 0, SupportTypes.BINARY, x, logit_p)
correct_marginalized_value = np.sum(-x.shape[0] * np.log1p(-p))
self.assertAlmostEqual(correct_marginalized_value, marginalized_value,
places=4)
self.assertTrue(np.allclose(p, conditional.args[0]))
def testConditionAndMarginalizeCategorical(self):
np.random.seed(0)
vocab_size = 23
def log_joint(x, probs):
one_hot_x = one_hot(x, vocab_size)
return np.sum(np.dot(one_hot_x, np.log(probs)))
n_examples = 13
alpha = 1.3
probs = np.random.gamma(alpha, 1., vocab_size)
probs /= probs.sum()
x = np.random.choice(np.arange(vocab_size), n_examples, p=probs)
conditional, marginalized_value = (
_condition_and_marginalize(log_joint, 0, SupportTypes.INTEGER,
x, probs))
self.assertTrue(np.allclose(conditional.p.sum(1), 1))
self.assertTrue(np.allclose(conditional.p, np.ones([n_examples, 1]) * probs))
self.assertAlmostEqual(0., marginalized_value, places=5)
logit_probs = np.random.randn(vocab_size)
probs = np.exp(logit_probs)
probs /= probs.sum()
conditional, marginalized_value = (
_condition_and_marginalize(log_joint, 0, SupportTypes.INTEGER,
x, np.exp(logit_probs)))
correct_marginalized_value = np.log(np.sum(np.exp(logit_probs)))
self.assertAlmostEqual(n_examples * correct_marginalized_value,
marginalized_value, places=4)
self.assertTrue(np.allclose(conditional.p, np.ones([n_examples, 1]) * probs,
rtol=1e-5))
def testGammaPoisson(self):
def log_joint(x, y, a, b):
log_prior = log_probs.gamma_gen_log_prob(x, a, b)
log_likelihood = np.sum(-special.gammaln(y + 1) + y * np.log(x) - x)
return log_prior + log_likelihood
n_examples = 10
a = 2.3
b = 3.
x = np.random.gamma(a, 1. / b)
y = np.random.poisson(x, n_examples)
conditional, marginalized_value = (
_condition_and_marginalize(log_joint, 0, SupportTypes.NONNEGATIVE,
x, y, a, b))
new_a = a + y.sum()
new_b = b + n_examples
correct_marginalized_value = (
a * np.log(b) - special.gammaln(a) -
new_a * np.log(new_b) + special.gammaln(new_a) -
special.gammaln(y + 1).sum())
self.assertAlmostEqual(correct_marginalized_value, marginalized_value)
self.assertEqual(new_a, conditional.args[0])
self.assertAlmostEqual(new_b, 1. / conditional.args[2])
def testGammaGamma(self):
def log_joint(x, y, a, b):
log_prior = log_probs.gamma_gen_log_prob(x, a, b)
log_likelihood = np.sum(log_probs.gamma_gen_log_prob(y, a, a * x))
return log_prior + log_likelihood
n_examples = 10
a = 2.3
b = 3.
x = np.random.gamma(a, 1. / b)
y = np.random.gamma(a, 1. / x, n_examples)
conditional, marginalized_value = (
_condition_and_marginalize(log_joint, 0, SupportTypes.NONNEGATIVE,
x, y, a, b))
new_a = a + a * n_examples
new_b = b + a * y.sum()
correct_marginalized_value = (
a * np.log(b) - special.gammaln(a) -
new_a * np.log(new_b) + special.gammaln(new_a) +
np.sum((a - 1) * np.log(y) - special.gammaln(a) + a * np.log(a)))
self.assertAlmostEqual(correct_marginalized_value, marginalized_value)
self.assertAlmostEqual(new_a, conditional.args[0])
self.assertAlmostEqual(new_b, 1. / conditional.args[2])
def testGammaNormalScaleParameter(self):
def log_joint(x, precision, a, b):
log_p_precision = log_probs.gamma_gen_log_prob(precision, a, b)
log_p_x = log_probs.norm_gen_log_prob(x, 0., 1. / np.sqrt(precision))
return log_p_precision + log_p_x
n_examples = 10
a = 2.3
b = 3.
precision = np.random.gamma(a, 1. / b)
x = np.random.normal(0., 1. / np.sqrt(precision), n_examples)
conditional, marginalized_value = (
_condition_and_marginalize(log_joint, 1, SupportTypes.NONNEGATIVE,
x, precision, a, b))
new_a = a + n_examples / 2
new_b = b + (x ** 2).sum() / 2
self.assertAlmostEqual(new_a, conditional.args[0])
self.assertAlmostEqual(new_b, 1. / conditional.args[2])
correct_marginalized_value = (
a * np.log(b) - special.gammaln(a) -
new_a * np.log(new_b) + special.gammaln(new_a) -
0.5 * n_examples * np.log(2 * np.pi))
self.assertAlmostEqual(correct_marginalized_value, marginalized_value)
# TODO(mhoffman): This log_joint takes way too long to canonicalize.
def testBetaBernoulli(self):
def log_joint(p, x, a, b):
log_prior = ((a - 1) * np.log(p) + (b - 1) * np.log1p(-p) -
special.gammaln(a) - special.gammaln(b) +
special.gammaln(a + b)).sum()
log_likelihood = (x * np.log(p) + (1 - x) * np.log1p(-p)).sum()
return log_prior + log_likelihood
n_examples = 10
a = 1.3
b = 2.4
p = np.random.beta(a, b, [3, 4])
x = np.random.uniform(size=(n_examples,) + p.shape) < p
x = x.astype(np.float32)
conditional, marginalized_value = (
_condition_and_marginalize(log_joint, 0, SupportTypes.UNIT_INTERVAL,
p, x, a, b))
new_a = a + x.sum(0)
new_b = b + x.shape[0] - x.sum(0)
correct_marginalized_value = (
(-special.gammaln(a) - special.gammaln(b)
+ special.gammaln(a + b)) * p.size
+ (special.gammaln(new_a) + special.gammaln(new_b)
- special.gammaln(new_a + new_b)).sum())
self.assertAlmostEqual(marginalized_value, correct_marginalized_value,
places=4)
self.assertTrue(np.allclose(new_a, conditional.args[0]))
self.assertTrue(np.allclose(new_b, conditional.args[1]))
def testDirichletCategorical(self):
def log_joint(p, x, alpha):
log_prior = np.sum((alpha - 1) * np.log(p))
log_prior += -special.gammaln(alpha).sum() + special.gammaln(alpha.sum())
# TODO(mhoffman): We should make it possible to only use one-hot
# when necessary.
one_hot_x = one_hot(x, alpha.shape[0])
log_likelihood = np.sum(np.dot(one_hot_x, np.log(p)))
return log_prior + log_likelihood
vocab_size = 5
n_examples = 11
alpha = 1.3 * np.ones(vocab_size)
p = np.random.gamma(alpha, 1.)
p /= p.sum(-1, keepdims=True)
x = np.random.choice(np.arange(vocab_size), n_examples, p=p)
conditional, marginalized_value = (
_condition_and_marginalize(log_joint, 0, SupportTypes.SIMPLEX,
p, x, alpha))
new_alpha = alpha + np.histogram(x, np.arange(vocab_size + 1))[0]
correct_marginalized_value = (
-special.gammaln(alpha).sum() + special.gammaln(alpha.sum()) +
special.gammaln(new_alpha).sum() - special.gammaln(new_alpha.sum()))
self.assertAlmostEqual(correct_marginalized_value, marginalized_value)
self.assertTrue(np.allclose(new_alpha, conditional.alpha))
def testLinearRegression(self):
def log_joint(X, beta, y):
predictions = np.einsum('ij,j->i', X, beta)
errors = y - predictions
log_prior = np.einsum('i,i,i->', -0.5 * np.ones_like(beta), beta, beta)
log_likelihood = np.einsum(',k,k->', -0.5, errors, errors)
return log_prior + log_likelihood
n_examples = 10
n_predictors = 2
X = np.random.randn(n_examples, n_predictors)
beta = np.random.randn(n_predictors)
y = np.random.randn(n_examples)
graph = make_expr(log_joint, X, beta, y)
graph = canonicalize(graph)
args = graph.free_vars.keys()
sufficient_statistic_nodes = find_sufficient_statistic_nodes(graph, args[1])
sufficient_statistics = [eval_node(node, graph.free_vars,
{'X': X, 'beta': beta, 'y': y})
for node in sufficient_statistic_nodes]
correct_sufficient_statistics = [
-0.5 * beta.dot(beta), beta,
-0.5 * np.einsum('ij,ik,j,k', X, X, beta, beta)
]
self.assertTrue(_match_values(sufficient_statistics,
correct_sufficient_statistics))
new_log_joint, _, stats_funs, _ = (
statistic_representation(log_joint, (X, beta, y),
(SupportTypes.REAL,), (1,)))
beta_stat_fun = stats_funs[0]
beta_natparam = grad_namedtuple(new_log_joint, 1)(X, beta_stat_fun(beta), y)
correct_beta_natparam = (-0.5 * X.T.dot(X), y.dot(X),
-0.5 * np.ones(n_predictors))
self.assertTrue(_match_values(beta_natparam, correct_beta_natparam))
conditional_factory = complete_conditional(log_joint, 1, SupportTypes.REAL,
X, beta, y)
conditional = conditional_factory(X, y)
true_cov = np.linalg.inv(X.T.dot(X) + np.eye(n_predictors))
true_mean = true_cov.dot(y.dot(X))
self.assertTrue(np.allclose(true_cov, conditional.cov))
self.assertTrue(np.allclose(true_mean, conditional.mean))
def testMixtureOfGaussians(self):
def log_joint(x, pi, z, mu, sigma_sq, alpha, sigma_sq_mu):
log_p_pi = log_probs.dirichlet_gen_log_prob(pi, alpha)
log_p_mu = log_probs.norm_gen_log_prob(mu, 0, np.sqrt(sigma_sq_mu))
z_one_hot = one_hot(z, len(pi))
log_p_z = np.einsum('ij,j->', z_one_hot, np.log(pi))
mu_z = np.einsum('ij,jk->ik', z_one_hot, mu)
log_p_x = log_probs.norm_gen_log_prob(x, mu_z, np.sqrt(sigma_sq))
return log_p_pi + log_p_z + log_p_mu + log_p_x
n_clusters = 5
n_dimensions = 2
n_observations = 200
alpha = 3.3 * np.ones(n_clusters)
sigma_sq_mu = 1.5 ** 2
sigma_sq = 0.5 ** 2
np.random.seed(10001)
pi = np.random.gamma(alpha)
pi /= pi.sum()
mu = np.random.normal(0, np.sqrt(sigma_sq_mu), [n_clusters, n_dimensions])
z = np.random.choice(np.arange(n_clusters), size=n_observations, p=pi)
x = np.random.normal(mu[z, :], sigma_sq)
pi_est = np.ones(n_clusters) / n_clusters
z_est = np.random.choice(np.arange(n_clusters), size=n_observations,
p=pi_est)
mu_est = np.random.normal(0., 0.01, [n_clusters, n_dimensions])
all_args = [x, pi_est, z_est, mu_est, sigma_sq, alpha, sigma_sq_mu]
pi_posterior_args = all_args[:1] + all_args[2:]
z_posterior_args = all_args[:2] + all_args[3:]
mu_posterior_args = all_args[:3] + all_args[4:]
pi_posterior = complete_conditional(log_joint, 1, SupportTypes.SIMPLEX,
*all_args)
z_posterior = complete_conditional(log_joint, 2, SupportTypes.INTEGER,
*all_args)
mu_posterior = complete_conditional(log_joint, 3, SupportTypes.REAL,
*all_args)
self.assertTrue(np.allclose(
pi_posterior(*pi_posterior_args).alpha,
alpha + np.histogram(z_est, np.arange(n_clusters+1))[0]))
correct_z_logits = -0.5 / sigma_sq * np.square(x[:, :, None] -
mu_est.T[None, :, :]).sum(1)
correct_z_logits += np.log(pi_est)
correct_z_posterior = np.exp(correct_z_logits -
misc.logsumexp(correct_z_logits, 1,
keepdims=True))
self.assertTrue(np.allclose(correct_z_posterior,
z_posterior(*z_posterior_args).p))
correct_mu_posterior_mean = np.zeros_like(mu_est)
correct_mu_posterior_var = np.zeros_like(mu_est)
for k in range(n_clusters):
n_k = (z_est == k).sum()
correct_mu_posterior_var[k] = 1. / (1. / sigma_sq_mu + n_k / sigma_sq)
correct_mu_posterior_mean[k] = (
x[z_est == k].sum(0) / sigma_sq * correct_mu_posterior_var[k])
mu_posterior_val = mu_posterior(*mu_posterior_args)
self.assertTrue(np.allclose(correct_mu_posterior_mean,
mu_posterior_val.args[0]))
self.assertTrue(np.allclose(correct_mu_posterior_var,
mu_posterior_val.args[1] ** 2))
def testTwoGaussians(self):
def log_joint(x1, x2):
log_p_x1 = -0.5 * x1 * x1
x_diff = x2 - x1
log_p_x2 = -0.5 * x_diff * x_diff
return log_p_x1 + log_p_x2
x1 = np.random.randn()
x2 = x1 + np.random.randn()
all_args = [x1, x2]
marginal_p_x2 = marginalize(log_joint, 0, SupportTypes.REAL, *all_args)
correct_marginalized_value = (
-0.25 * x2 * x2 - 0.5 * np.log(2.) + 0.5 * np.log(2. * np.pi))
self.assertAlmostEqual(correct_marginalized_value, marginal_p_x2(x2))
x2_conditional = complete_conditional(marginal_p_x2, 0, SupportTypes.REAL,
x2)()
self.assertAlmostEqual(x2_conditional.args[0], 0.)
self.assertAlmostEqual(x2_conditional.args[1] ** 2, 2.)
def testFactorAnalysis(self):
def log_joint(x, w, epsilon, tau, alpha, beta):
log_p_epsilon = log_probs.norm_gen_log_prob(epsilon, 0, 1)
log_p_w = log_probs.norm_gen_log_prob(w, 0, 1)
log_p_tau = log_probs.gamma_gen_log_prob(tau, alpha, beta)
# TODO(mhoffman): The transposed version below should work.
# log_p_x = log_probs.norm_gen_log_prob(x, np.dot(epsilon, w), 1. / np.sqrt(tau))
log_p_x = log_probs.norm_gen_log_prob(x, np.einsum('ik,jk->ij', epsilon, w),
1. / np.sqrt(tau))
return log_p_epsilon + log_p_w + log_p_tau + log_p_x
n_examples = 20
D = 10
K = 5
alpha = 2.
beta = 8.
tau = np.random.gamma(alpha, beta)
w = np.random.normal(loc=0, scale=1, size=[D, K])
epsilon = np.random.normal(loc=0, scale=1, size=[n_examples, K])
x = np.random.normal(loc=epsilon.dot(w.T), scale=np.sqrt(tau))
all_args = [x, w, epsilon, tau, alpha, beta]
w_conditional_factory = complete_conditional(log_joint, 1,
SupportTypes.REAL, *all_args)
conditional = w_conditional_factory(x, epsilon, tau, alpha, beta)
true_cov = np.linalg.inv(tau * np.einsum('nk,nl->kl', epsilon, epsilon) +
np.eye(K))
true_mean = tau * np.einsum('nk,nd,kl->dl', epsilon, x, true_cov)
for d in range(D):
self.assertTrue(np.allclose(conditional[d].cov, true_cov))
self.assertTrue(np.allclose(conditional[d].mean, true_mean[d]))
epsilon_conditional_factory = complete_conditional(log_joint, 2,
SupportTypes.REAL,
*all_args)
conditional = epsilon_conditional_factory(x, w, tau, alpha, beta)
true_cov = np.linalg.inv(tau * np.einsum('dk,dl->kl', w, w) + np.eye(K))
true_mean = tau * np.einsum('dk,nd,kl->nl', w, x, true_cov)
for n in range(n_examples):
self.assertTrue(np.allclose(conditional[n].cov, true_cov))
self.assertTrue(np.allclose(conditional[n].mean, true_mean[n]))
tau_conditional_factory = complete_conditional(log_joint, 3,
SupportTypes.NONNEGATIVE,
*all_args)
conditional = tau_conditional_factory(x, w, epsilon, alpha, beta)
true_a = alpha + 0.5 * n_examples * D
true_b = beta + 0.5 * np.sum(np.square(x - epsilon.dot(w.T)))
self.assertAlmostEqual(true_a, conditional.args[0])
self.assertAlmostEqual(true_b, 1. / conditional.args[2])
def testStatisticRepresentation(self):
A = np.array([[1., 0], [0., 1.], [1., 1.]])
Sigma = 2 * np.eye(3)
z = npr.randn(2)
x = np.array([1000., -1000., 0.])
def log_joint(z, x):
log_prior = -1./2 * np.dot(z, z)
centered = x - np.dot(A, z)
log_like = (-1./2 * np.dot(centered, np.dot(np.linalg.inv(Sigma), centered))
- 1./2 * logdet(Sigma))
return log_prior + log_like
neg_energy, normalizers, stats_funs, samplers = (
statistic_representation(log_joint, (z, x),
(SupportTypes.REAL, SupportTypes.REAL)))
initializers, _ = make_initializers((z,x), neg_energy, normalizers,
stats_funs)
# just check that these don't crash
natparams = [initializer() for initializer in initializers]
neg_energy(*natparams)
[sampler(natparam).rvs() for sampler, natparam in zip(samplers, natparams)]
expected_post_mu = A.T.dot(x / 2.)
computed_post_mu = grad_namedtuple(normalizers[0])(initializers[0]()).x
self.assertTrue(np.allclose(expected_post_mu, computed_post_mu))
if __name__ == '__main__':
absltest.main()
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from autograd import grad
import autograd.numpy as np
from autoconj import tracers
class TracersTest(absltest.TestCase):
def _CheckFunctionExprEquality(self, expected, fun_a, fun_b, *args):
expr_a = tracers.make_expr(fun_a, *args)
expr_b = tracers.make_expr(fun_b, *args)
if expected:
self.assertEqual(expr_a, expr_b)
else:
self.assertNotEqual(expr_a, expr_b)
def testEquals(self):
def fun(x):
return f(g(h(x)))
f = lambda x: np.power(x, 3.)
g = lambda x: np.power(3., x)
h = lambda x: np.power(x, x)
x = 2.
self._CheckFunctionExprEquality(True, fun, lambda x: f(g(h(x))), x)
self._CheckFunctionExprEquality(False, fun, lambda x: f(g(x)), x)
self._CheckFunctionExprEquality(False, fun, lambda x: f(h(x)), x)
self._CheckFunctionExprEquality(False, fun, lambda x: g(h(x)), x)
self._CheckFunctionExprEquality(True, f, f, x)
self._CheckFunctionExprEquality(True, g, g, x)
self._CheckFunctionExprEquality(True, h, h, x)
self._CheckFunctionExprEquality(False, fun, f, x)
self._CheckFunctionExprEquality(False, fun, g, x)
self._CheckFunctionExprEquality(False, fun, h, x)
self._CheckFunctionExprEquality(False, f, g, x)
self._CheckFunctionExprEquality(False, f, h, x)
self._CheckFunctionExprEquality(False, g, h, x)
def testPrintExpr(self):
def fun(x, y):
return 2 * x**2 + np.tanh(y)
expr = tracers.make_expr(fun, 4, 5)
printed_expr = tracers.print_expr(expr)
expected = ("temp_0 = power(x, 2)\n"
"temp_1 = multiply(2, temp_0)\n"
"temp_2 = tanh(y)\n"
"temp_3 = add(temp_1, temp_2)\n")
self.assertEqual(printed_expr, expected)
def testEvalExpr(self):
def fun(x, y):
return 2 * x**2 + np.tanh(3 * y)
expr = tracers.make_expr(fun, 4, 5)
self.assertEqual(fun(9, 10), tracers.eval_expr(expr, {'x': 9, 'y': 10}))
def testInlineExpr(self):
def f(x, y):
return 2 * x + y
def g(z):
return 3 * z + z**2
expr = tracers.make_expr(f, 1, 2)
subexpr = tracers.make_expr(g, 3)
target_node = expr.expr_node.parents[0]
new_expr = tracers.inline_expr(subexpr, {'z': target_node})
printed_expr = tracers.print_expr(new_expr)
expected = ("temp_0 = multiply(2, x)\n"
"temp_1 = multiply(3, temp_0)\n"
"temp_2 = power(temp_0, 2)\n"
"temp_3 = add(temp_1, temp_2)\n")
self.assertEqual(printed_expr, expected)
self.assertEqual(3 * (2 * 5) + (2 * 5)**2,
tracers.eval_expr(new_expr, {'x': 5}))
self.assertEqual(f(6, 7), tracers.eval_expr(expr, {'x': 6, 'y': 7}))
def testReplaceNodeWithExpr(self):
def f(x):
return 2 * x
def g(x):
return 3 * x
expr = tracers.make_expr(f, 5)
new_expr = tracers.make_expr(g, 10)
tracers.replace_node_with_expr(expr.expr_node, new_expr)
self.assertEqual(3 * 7, tracers.eval_expr(expr, {'x': 7}))
def testInlineExprAndReplace(self):
def f(x, y):
return 2 * x ** 2 + y
def g(z):
return 3 * z ** 3
expr = tracers.make_expr(f, 1, 2)
subexpr = tracers.make_expr(g, 3)
input_node = expr.expr_node.parents[0].parents[0] # x ** 2
output_node = expr.expr_node.parents[0] # 2 * x ** 2
new_expr = tracers.inline_expr(subexpr, {'z': input_node})
tracers.replace_node_with_expr(output_node, new_expr) # modify expr inplace
self.assertEqual(3 * 6 ** 6 + 7,
tracers.eval_expr(expr, {'x': 6, 'y': 7}))
def testUnusedVars(self):
def f(x, y, z):
return 3 * x + y
expr = tracers.make_expr(f, 1., 2., 3.)
self.assertEqual(set(expr.free_vars.keys()), {'x', 'y'})
def testDescendantOf(self):
def f(x, y):
return 2 * x ** 2 + y
expr = tracers.make_expr(f, 1, 2)
xnode = expr.free_vars['x']
ynode = expr.free_vars['y']
self.assertTrue(tracers.is_descendant_of(expr.expr_node, xnode))
self.assertTrue(tracers.is_descendant_of(xnode, xnode))
self.assertTrue(tracers.is_descendant_of(expr.expr_node, expr.expr_node))
self.assertFalse(tracers.is_descendant_of(xnode, ynode))
def testAllDescendantsOf(self):
def f(x, y):
return 2 * x ** 2 + y
expr = tracers.make_expr(f, 1, 2)
xnode = expr.free_vars['x']
ynode = expr.free_vars['y']
descendants = tracers.all_descendants_of(expr.expr_node, ynode)
self.assertEqual(descendants, {ynode, expr.expr_node})
def testCommonSubexpressionElimination(self):
def f1(x):
return 3 * x**2 + x**2
def f2(x):
y = x**2
return 3 * y + y
expr1 = tracers.make_expr(f1, 1)
expr2 = tracers.make_expr(f2, 1)
code1 = tracers.print_expr(expr1)
code2 = tracers.print_expr(expr2)
self.assertGreater(len(code1), len(code2))
code1_cse = tracers.print_expr(tracers.remake_expr(expr1)) # applies cse
self.assertEqual(len(code1_cse), len(code2))
def testExtractSuperexpr(self):
def f(x, y):
return 2 * x ** 2 + y
expr = tracers.make_expr(f, 1, 2)
node = expr.expr_node.parents[0].parents[0] # x ** 2
new_expr = tracers.extract_superexpr(expr, {'x2': node})
self.assertEqual(2 * 5 + 6, tracers.eval_expr(new_expr, {'x2': 5, 'y': 6}))
def testExtractSuperexprWithReplaceNode(self):
# NOTE(mattjj): this test shows an alternative way to implement, in effect,
# tracers.extract_superexpr just using tracers.replace_node_with_expr. The
# reason to have both is that one does in-place modification.
def f(x, y):
return 2 * x ** 2 + y
expr = tracers.make_expr(f, 1, 2)
node = expr.expr_node.parents[0].parents[0] # x ** 2
lookup_expr = tracers.make_expr(lambda x: x, 3, names=('x2',))
tracers.replace_node_with_expr(node, lookup_expr) # modify expr in-place
self.assertEqual(2 * 5 + 6, tracers.eval_expr(expr, {'x2': 5, 'y': 6}))
if __name__ == '__main__':
absltest.main()
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import autograd.numpy as np
from autoconj import log_probs
from autoconj import pplham as ph
class PPLHamTest(absltest.TestCase):
def testMakeLogJointUnconditional(self):
"""Test `make_log_joint` works on unconditional model."""
def model():
loc = ph.norm.rvs(loc=0.0, scale=1.0, name="loc")
x = ph.norm.rvs(loc=loc, scale=0.5, size=5, name="x")
return x
log_joint = ph.make_log_joint_fn(model)
x = np.random.normal(size=5)
loc = 0.3
value = log_joint(loc=loc, x=x)
true_value = log_probs.norm_gen_log_prob(loc, loc=0.0, scale=1.0)
true_value += log_probs.norm_gen_log_prob(x, loc=loc, scale=0.5)
self.assertAlmostEqual(value, true_value)
def testMakeLogJointConditional(self):
"""Test `make_log_joint` works on conditional model."""
def model(X, prior_precision):
beta = ph.norm.rvs(loc=0.0,
scale=1.0 / np.sqrt(prior_precision),
size=X.shape[1],
name="beta")
loc = np.einsum('ij,j->i', X, beta)
y = ph.norm.rvs(loc=loc, scale=1.0, name="y")
return y
log_joint = ph.make_log_joint_fn(model)
X = np.random.normal(size=[3, 2])
prior_precision = 0.5
beta = np.random.normal(size=[2])
y = np.random.normal(size=[3])
true_value = log_probs.norm_gen_log_prob(
beta, loc=0.0, scale=1.0 / np.sqrt(prior_precision))
loc = np.einsum('ij,j->i', X, beta)
true_value += log_probs.norm_gen_log_prob(y, loc=loc, scale=1.0)
# Test args as input.
value = log_joint(X, prior_precision, beta, y)
self.assertAlmostEqual(value, true_value)
# Test kwargs as input.
value = log_joint(X, prior_precision, y=y, beta=beta)
self.assertAlmostEqual(value, true_value)
if __name__ == '__main__':
np.random.seed(8327)
absltest.main()
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
from absl.testing import absltest
import autograd.numpy as np
import autograd.numpy.random as npr
import numpy.testing
from autoconj import rewrites
from autoconj import tracers
class NumericalTestCase(absltest.TestCase):
def assertArraysAllClose(self, x, y, err_msg='', atol=1e-8, rtol=1e-5):
numpy.testing.assert_allclose(x, y, rtol=rtol, atol=atol, err_msg=err_msg)
def assertAllClose(self, x, y, err_msg='', atol=1e-8, rtol=1e-5):
if isinstance(x, (tuple, list)):
self.assertEqual(type(x), type(y))
self.assertEqual(len(x), len(y))
for elt_a, elt_b in zip(x, y):
self.assertAllClose(elt_a, elt_b, err_msg, atol, rtol)
self.assertArraysAllClose(x, y, err_msg, atol, rtol)
class RewritesTest(NumericalTestCase):
def _rewriter_test_helper(self, fun, rewrite_rule, *args, **kwargs):
expr = kwargs.get('expr') or tracers.make_expr(fun, *args)
self.assertIsInstance(expr, tracers.GraphExpr)
env = dict(zip(inspect.getargspec(fun).args, args))
self.assertAllClose(fun(*args), tracers.eval_expr(expr, env))
rewriter = rewrites.make_rewriter(rewrite_rule)
rewrite_node = kwargs.get('rewrite_node', expr.expr_node)
rewriter(rewrite_node) # modifies expr in-place
self.assertAllClose(fun(*args), tracers.eval_expr(expr, env))
return tracers.remake_expr(expr) # constant folding
def _eager_rewriter_test_helper(self, fun, rewriter, *args, **kwargs):
expr = kwargs.get('expr') or tracers.make_expr(fun, *args)
self.assertIsInstance(expr, tracers.GraphExpr)
env = dict(zip(inspect.getargspec(fun).args, args))
self.assertAllClose(fun(*args), tracers.eval_expr(expr, env))
rewrite_node = kwargs.get('rewrite_node', expr.expr_node)
expr = tracers.remake_expr(expr, {rewrite_node.fun: rewriter})
self.assertAllClose(fun(*args), tracers.eval_expr(expr, env))
return tracers.remake_expr(expr) # constant folding
def testDotRewriter(self):
def fun(x, y):
return np.dot(x, y)
x = npr.randn(4, 3)
y = npr.randn(3)
expr = tracers.make_expr(fun, x, y)
self.assertIsInstance(expr, tracers.GraphExpr)
self.assertEqual(expr.expr_node.fun.__name__, 'dot')
expr = self._eager_rewriter_test_helper(fun, rewrites.dot_as_einsum, x, y)
self.assertEqual(expr.expr_node.fun.__name__, 'einsum')
def testMultiplyRewriter(self):
def fun(x, y):
return x * y
expr = self._eager_rewriter_test_helper(fun, rewrites.maybe_multiply,
npr.randn(4, 3), npr.randn(3))
self.assertEqual(expr.expr_node.fun.__name__, 'einsum')
expr = self._eager_rewriter_test_helper(fun, rewrites.maybe_multiply,
npr.randn(4, 3), npr.randn(4, 1))
self.assertEqual(expr.expr_node.fun.__name__, 'einsum')
expr = self._eager_rewriter_test_helper(fun, rewrites.maybe_multiply,
npr.randn(4, 3, 2),
npr.randn(4, 1, 2))
self.assertEqual(expr.expr_node.fun.__name__, 'einsum')
expr = self._eager_rewriter_test_helper(fun, rewrites.maybe_multiply,
npr.randn(4, 3, 2),
npr.randn(4, 1, 1))
self.assertEqual(expr.expr_node.fun.__name__, 'einsum')
expr = self._eager_rewriter_test_helper(fun, rewrites.maybe_multiply,
npr.randn(1, 1, 3),
npr.randn(4, 1, 3))
self.assertEqual(expr.expr_node.fun.__name__, 'einsum')
def testDivideToMultiply(self):
def fun(x, y):
return x / y
expr = self._eager_rewriter_test_helper(fun, rewrites.maybe_divide,
1.3, 4.7)
self.assertEqual(expr.expr_node.fun.__name__, 'einsum')
expr = self._eager_rewriter_test_helper(fun, rewrites.maybe_divide,
1.3 * np.ones([3, 4, 1]),
4.7 * np.ones([3, 4, 5]))
self.assertEqual(expr.expr_node.fun.__name__, 'einsum')
expr = self._eager_rewriter_test_helper(lambda y: np.ones([3, 4, 5]) / y,
rewrites.maybe_divide,
4.7 * np.ones([3, 4, 5]))
self.assertEqual(expr.expr_node.fun.__name__, 'power')
def testPowerRewriter(self):
x = npr.randn(10)
fun = lambda x: x**0
expr = self._eager_rewriter_test_helper(fun, rewrites.maybe_power, x)
self.assertIsInstance(expr, tracers.ConstExpr)
self.assertEqual(expr.val, 1)
fun = lambda x: x**1
expr = self._eager_rewriter_test_helper(fun, rewrites.maybe_power, x)
self.assertEqual(expr.expr_node.fun.__name__, 'env_lookup')
fun = lambda x: x**4
expr = self._eager_rewriter_test_helper(fun, rewrites.maybe_power, x)
self.assertEqual(expr.expr_node.fun.__name__, 'einsum')
fun = lambda x: x**-1
expr = self._eager_rewriter_test_helper(fun, rewrites.maybe_power, x)
self.assertEqual(expr.expr_node.fun.__name__, 'power')
def testAddRewriter(self):
x = npr.randn(4, 3)
y = npr.randn(3)
z = npr.randn(1, 3)
expr = self._rewriter_test_helper(lambda x, y, z: x + (y + z),
rewrites.replace_add, x, y, z)
self.assertIsInstance(expr, tracers.GraphExpr)
self.assertEqual(expr.expr_node.fun.__name__, 'add_n')
expr = self._rewriter_test_helper(lambda x, y, z: (x + y) + z,
rewrites.replace_add, x, y, z)
self.assertIsInstance(expr, tracers.GraphExpr)
self.assertEqual(expr.expr_node.fun.__name__, 'add_n')
def testAddNRewriter(self):
x = npr.randn(4, 3)
y = npr.randn(3)
z = npr.randn(1, 3)
expr = self._rewriter_test_helper(lambda x, y, z: x + tracers.add_n(y, z),
rewrites.replace_add_addn, x, y, z)
self.assertIsInstance(expr, tracers.GraphExpr)
self.assertEqual(expr.expr_node.fun.__name__, 'add_n')
expr = self._rewriter_test_helper(lambda x, y, z: tracers.add_n(x, y) + z,
rewrites.replace_add_addn, x, y, z)
self.assertIsInstance(expr, tracers.GraphExpr)
self.assertEqual(expr.expr_node.fun.__name__, 'add_n')
expr = self._rewriter_test_helper(
lambda x, y, z: tracers.add_n(tracers.add_n(x, y), z),
rewrites.replace_addn_addn, x, y, z)
self.assertIsInstance(expr, tracers.GraphExpr)
self.assertEqual(expr.expr_node.fun.__name__, 'add_n')
self.assertTrue(all([parent.fun.__name__ == 'env_lookup'
for parent in expr.expr_node.parents]))
expr = self._rewriter_test_helper(
lambda x, y, z: tracers.add_n(x, tracers.add_n(y, z), z),
rewrites.replace_addn_addn, x, y, z)
self.assertIsInstance(expr, tracers.GraphExpr)
self.assertEqual(expr.expr_node.fun.__name__, 'add_n')
self.assertTrue(all([parent.fun.__name__ == 'env_lookup'
for parent in expr.expr_node.parents]))
def testDuplicatedAddNRewriter(self):
x = npr.randn(4, 3)
y = npr.randn(3)
z = npr.randn(1, 3)
expr = self._rewriter_test_helper(
lambda x, y, z: tracers.add_n(x, y, z, y),
rewrites.replace_duplicated_addn, x, y, z)
self.assertIsInstance(expr, tracers.GraphExpr)
self.assertEqual(expr.expr_node.fun.__name__, 'add_n')
self.assertEqual(len(expr.expr_node.parents), 3)
self.assertTrue(all([parent.fun.__name__ in ('multiply', 'env_lookup')
for parent in expr.expr_node.parents]))
def testSumRewriter(self):
def fun(x):
return np.sum(x, 1)
x = npr.randn(4, 3)
expr = tracers.make_expr(fun, x)
self.assertEqual(expr.expr_node.fun, np.sum)
expr = self._rewriter_test_helper(fun, rewrites.replace_sum, x)
self.assertEqual(expr.expr_node.fun, np.einsum)
def testSumRewriterTuple(self):
def fun(x):
return np.sum(x, axis=(0, 1))
x = npr.randn(4, 3)
expr = tracers.make_expr(fun, x)
self.assertEqual(expr.expr_node.fun, np.sum)
expr = self._rewriter_test_helper(fun, rewrites.replace_sum, x)
self.assertEqual(expr.expr_node.fun, np.einsum)
def testSumRewriterKwarg(self):
def fun(x):
return np.sum(x, axis=1)
x = npr.randn(4, 3)
expr = tracers.make_expr(fun, x)
self.assertEqual(expr.expr_node.fun, np.sum)
expr = self._rewriter_test_helper(fun, rewrites.replace_sum, x)
self.assertEqual(expr.expr_node.fun, np.einsum)
def testFullSumRewriter(self):
def fun(x):
return np.sum(x)
x = npr.randn(4, 3)
expr = tracers.make_expr(fun, x)
self.assertEqual(expr.expr_node.fun, np.sum)
expr = self._rewriter_test_helper(fun, rewrites.replace_sum, x)
self.assertEqual(expr.expr_node.fun, np.einsum)
def fun(x):
return np.sum(x, None)
expr = tracers.make_expr(fun, x)
self.assertEqual(expr.expr_node.fun, np.sum)
expr = self._rewriter_test_helper(fun, rewrites.replace_sum, x)
self.assertEqual(expr.expr_node.fun, np.einsum)
def testSwapaxesToEinsum(self):
x = np.arange(9).reshape([3, 3])
self.assertTrue((np.swapaxes(x, 0, 1) == rewrites.swapaxes(x, 0, 1)).all())
def testSubtractToAdd(self):
def fun(x, y):
return x - y
expr = self._eager_rewriter_test_helper(fun, rewrites.maybe_subtract,
1.3, 4.7)
self.assertEqual(expr.expr_node.fun.__name__, 'add_n')
expr = self._eager_rewriter_test_helper(fun, rewrites.maybe_subtract,
1.3 * np.ones([3, 4, 1]),
4.7 * np.ones([3, 4, 5]))
self.assertEqual(expr.expr_node.fun.__name__, 'add_n')
def testEinsumDistributeRewriter(self):
def fun(x, y, z):
return np.einsum('ij,j->i', x, tracers.add_n(y, z))
x = npr.randn(4, 3)
y = npr.randn(3)
z = npr.randn(3)
expr = tracers.make_expr(fun, x, y, z)
self.assertEqual(expr.expr_node.fun.__name__, 'einsum')
expr = self._rewriter_test_helper(fun, rewrites.distribute_einsum,
x, y, z)
self.assertEqual(expr.expr_node.fun.__name__, 'add_n')
def testEinsumTransposeRewriter(self):
def fun(x, y):
return np.einsum('ij,j->i', x.T, y)
x = npr.randn(4, 3)
y = npr.randn(4)
expr = tracers.make_expr(fun, x, y)
self.assertEqual(expr.expr_node.fun.__name__, 'einsum')
expr = self._rewriter_test_helper(fun, rewrites.transpose_inside_einsum,
x, y)
self.assertFalse('transpose' in tracers.print_expr(expr))
def testEinsumTransposeRewriter2(self):
def fun(x, y):
return np.einsum('ij,j,kj->ik', x.T, y, x.T)
x = npr.randn(4, 3)
y = npr.randn(4)
expr = tracers.make_expr(fun, x, y)
self.assertEqual(expr.expr_node.fun.__name__, 'einsum')
expr = self._rewriter_test_helper(fun, rewrites.transpose_inside_einsum,
x, y)
expr = self._rewriter_test_helper(fun, rewrites.transpose_inside_einsum,
x, y, expr=expr)
self.assertFalse('transpose' in tracers.print_expr(expr))
def testEinsumCompositionRewriter(self):
def fun(x, y, z):
return np.einsum('ij,jk->i', x, np.einsum('ija,ijk->ka', y, z))
x = npr.randn(4, 3)
y = npr.randn(5, 4, 2)
z = npr.randn(5, 4, 3)
expr = tracers.make_expr(fun, x, y, z)
self.assertEqual(expr.expr_node.fun.__name__, 'einsum')
self.assertEqual(expr.expr_node.parents[1].fun.__name__, 'einsum')
expr = self._rewriter_test_helper(fun, rewrites.combine_einsum_compositions,
x, y, z)
self.assertNotEqual(expr.expr_node.parents[1].fun.__name__, 'einsum')
def testLogEinsumRewriter(self):
# TODO(matthewjmackay): fails on example below where axes are transposed
# def fun(x, y):
# return np.log(np.einsum('ji,ij->ij', x, y))
#
# x = np.exp(npr.randn(3, 4))
# y = np.exp(npr.randn(4, 3))
# z = np.exp(npr.randn(4))
def fun(x, y):
return np.log(np.einsum('ij,ij->ij', x, y))
x = np.exp(npr.randn(4, 3))
y = np.exp(npr.randn(4, 3))
z = np.exp(npr.randn(4))
expr = tracers.make_expr(fun, x, y)
self.assertEqual(expr.expr_node.fun.__name__, 'log')
self.assertEqual(expr.expr_node.parents[0].fun.__name__, 'einsum')
expr = self._rewriter_test_helper(fun, rewrites.replace_log_einsum,
x, y)
self.assertEqual(expr.expr_node.fun.__name__, 'add')
self.assertEqual(expr.expr_node.parents[0].fun.__name__, 'log')
self.assertEqual(expr.expr_node.parents[1].fun.__name__, 'log')
def testMultiplyDistribute(self):
def fun(x, y, z):
return x * tracers.add_n(y, z)
x = npr.randn(10)
y = npr.randn(10)
z = npr.randn(10)
expr = self._eager_rewriter_test_helper(fun, rewrites.maybe_multiply,
x, y, z)
expr = self._rewriter_test_helper(fun, rewrites.distribute_einsum, x, y, z,
expr=expr)
self.assertEqual(expr.expr_node.fun, tracers.add_n)
def testEinsumOneArg(self):
x = npr.randn(10)
def fun(x):
return np.einsum('a->a', x)
expr = tracers.make_expr(fun, x)
self.assertNotEqual(expr.expr_node.fun, tracers.env_lookup)
expr = tracers.remake_expr(expr, {np.einsum: rewrites.maybe_einsum})
self.assertAllClose(tracers.eval_expr(expr, {'x': x}), fun(x))
self.assertEqual(expr.expr_node.fun, tracers.env_lookup)
def _test_einsum_zero(self, fun, x):
expr = tracers.make_expr(fun, x)
einsum_node = expr.expr_node.parents[0].parents[0]
self.assertEqual(einsum_node.fun, np.einsum)
expr = self._eager_rewriter_test_helper(fun, rewrites.maybe_einsum, x,
expr=expr, rewrite_node=einsum_node)
self.assertIsInstance(expr, tracers.ConstExpr)
def testEinsumZero(self):
x = npr.randn(10)
zero = np.zeros_like(x)
def fun(x):
return 3.0 + np.sum(np.einsum('i,i->i', zero, x))
self._test_einsum_zero(fun, x)
def fun(x):
return 3.0 + np.sum(np.einsum('i,i->i', x, zero))
self._test_einsum_zero(fun, x)
def fun(x):
return 3.0 + np.sum(np.einsum('i,i,i->i', x, zero, x))
self._test_einsum_zero(fun, x)
def fun(x):
return 3.0 + (0.0 + np.einsum('i,i->', x, zero))
self._test_einsum_zero(fun, x)
def fun(x):
return 3.0 + (0.0 + np.einsum(',->', np.sum(x), 0.0))
self._test_einsum_zero(fun, x)
def testFoldedEinsum(self):
x = npr.randn(10)
ones = np.ones(5)
def fun(x):
return np.einsum(',,i,->', np.sum(x), 0.5, x, 2.0)
self.assertAlmostEqual(fun(x), x.sum() ** 2)
def fun(x):
return rewrites.constant_folding_einsum(',,i,->', np.sum(x), 0.5, x, 2.0)
self.assertAlmostEqual(fun(x), x.sum() ** 2)
expr = tracers.make_expr(fun, x)
self.assertEqual(len(expr.expr_node.args), 3)
def fun(x):
return rewrites.constant_folding_einsum(',,i,->', np.sum(x), 0., x, 2.0)
self.assertEqual(fun(x), 0.)
expr = tracers.make_expr(fun, x)
self.assertIsInstance(expr, tracers.ConstExpr)
self.assertEqual(expr.val, 0.)
def fun(x):
return np.einsum(',j,i,j->', np.sum(x), 0.5 * ones, x, 2.0 * ones)
self.assertAlmostEqual(fun(x), x.sum() ** 2 * len(ones))
def fun(x):
return rewrites.constant_folding_einsum(',j,i,j->', np.sum(x), 0.5 * ones,
x, 2.0 * ones)
self.assertAlmostEqual(fun(x), x.sum() ** 2 * len(ones))
expr = tracers.make_expr(fun, x)
self.assertEqual(len(expr.expr_node.args), 4)
def fun(x):
return rewrites.constant_folding_einsum(',i,i,j->', np.sum(x),
np.ones_like(x), x, ones)
self.assertAlmostEqual(fun(x), x.sum() ** 2 * len(ones))
expr = tracers.make_expr(fun, x)
self.assertEqual(len(expr.expr_node.args), 4)
def fun(x):
return rewrites.constant_folding_einsum(',j,i,j->', np.sum(x), 0. * ones,
x, 2.0 * ones)
self.assertEqual(fun(x), 0.)
expr = tracers.make_expr(fun, x)
self.assertIsInstance(expr, tracers.ConstExpr)
self.assertEqual(expr.val, 0.)
def testGatherLogAddEinsum(self):
a = abs(npr.randn())
x = abs(npr.randn(10))
def fun(a, x, y, z):
return np.log(tracers.add_n(np.einsum(',a->', a, x),
np.einsum(',a->', a, y),
np.einsum(',a->', a, z)))
expr = self._rewriter_test_helper(fun, rewrites.gather_log_add_einsum,
a, x, x, x)
self.assertEqual(expr.expr_node.parents[0].fun, np.log)
self.assertEqual(expr.expr_node.parents[1].fun, np.log)
def testAddPowersWithinEinsum(self):
x = npr.randn()
def fun(x):
return np.einsum(',,->', x ** 2, x ** 2, 3.)
expr = self._rewriter_test_helper(fun, rewrites.add_powers_within_einsum, x)
self.assertEqual(expr.expr_node.fun, np.einsum)
self.assertTrue(any([node.fun == np.power and node.args[1] == 4
for node in expr.expr_node.parents]))
def testIncrementNegativePowerInEinsum(self):
x = npr.randn(10)
def fun(x):
return np.einsum(',a,a,a,a->', 3., x, x ** -3, x, x)
expr = self._rewriter_test_helper(
fun, rewrites.increment_negative_power_in_einsum_r, x)
self.assertEqual(expr.expr_node.fun, np.einsum)
self.assertTrue(any([node.fun == np.power and node.args[1] == -2
for node in expr.expr_node.parents]))
expr = self._rewriter_test_helper(
fun, rewrites.increment_negative_power_in_einsum_r, x, expr=expr)
self.assertEqual(expr.expr_node.fun, np.einsum)
self.assertTrue(any([node.fun == np.power and node.args[1] == -1
for node in expr.expr_node.parents]))
expr = self._rewriter_test_helper(
fun, rewrites.increment_negative_power_in_einsum_l, x, expr=expr)
self.assertEqual(expr.expr_node.fun, np.einsum)
self.assertTrue(any([node.fun == np.power and node.args[1] == 0
for node in expr.expr_node.parents]))
def testSwapaxesToEinsum(self):
x = np.arange(9).reshape([3, 3])
self.assertTrue((np.swapaxes(x, 0, 1) == rewrites.swapaxes(x, 0, 1)).all())
def testRenameFormulaIndices(self):
self.assertEqual(
rewrites._rename_formula_indices('...ikj->...jk'), '...abc->...cb')
def testDebroadcastFormula(self):
self.assertEqual(
rewrites.debroadcast_formula('...i,...j->...', *[1, 1]), 'a,b->')
self.assertEqual(
rewrites.debroadcast_formula('...i,...j->...', *[2, 2]), 'ab,ac->a')
# _remove_ellipsis would fail this test
self.assertEqual(
rewrites.debroadcast_formula('...,...->...', *[1, 1]), 'a,a->a')
self.assertEqual(
rewrites.debroadcast_formula(
'...a,...b->...ab', *[2, 3]), 'ab,cad->cabd')
def testEinsumRepeatedOneHot(self):
x = npr.randn(3, 2)
y = npr.randn(3, 2)
e = npr.randint(0, x.shape[0], 5)
def fun(x, y, e):
one_hot_e = tracers.one_hot(e, x.shape[0])
return np.einsum('ab,bc,ad,dc->', one_hot_e, x, one_hot_e, y)
expr = self._rewriter_test_helper(
fun, rewrites.einsum_repeated_one_hot, x, y, e)
self.assertEqual(len(expr.expr_node.args), 4)
self.assertEqual(sum(node.fun == tracers.one_hot
for node in expr.expr_node.parents), 1)
def fun(x, y, e):
one_hot_e = tracers.one_hot(e, x.shape[0])
return np.einsum('ab,bc,ad,dc->ac', one_hot_e, x, one_hot_e, y)
expr = self._rewriter_test_helper(
fun, rewrites.einsum_repeated_one_hot, x, y, e)
self.assertEqual(len(expr.expr_node.args), 4)
self.assertEqual(sum(node.fun == tracers.one_hot
for node in expr.expr_node.parents), 1)
def testGatherPowAddMul(self):
x = npr.randn()
a = npr.randn(10)
b = npr.randn(10)
def fun(x, a, b):
return tracers.add_n(np.einsum(',a->a', x, a), np.einsum(',a->a', x, b)) ** 3
expr = self._rewriter_test_helper(fun, rewrites.gather_pow_add_einsum, x, a, b)
self.assertEqual(expr.expr_node.fun, np.multiply)
self.assertEqual(expr.expr_node.parents[0].fun, np.power)
self.assertEqual(expr.expr_node.parents[1].fun, np.power)
def testGatherInvAddMul(self):
x = npr.randn()
a = 2. * np.eye(2)
b = 2.5 * np.eye(2)
def fun(x, a, b):
return np.linalg.inv(tracers.add_n(np.einsum(',ab->ab', x, a), np.einsum(',ab->ab', x, b)))
expr = self._rewriter_test_helper(fun, rewrites.gather_inv_add_einsum, x, a, b)
self.assertEqual(expr.expr_node.fun, np.multiply)
parent_funs = [parent.fun for parent in expr.expr_node.parents]
self.assertTrue(np.power in parent_funs)
self.assertTrue(np.linalg.inv in parent_funs)
def testGatherLogdetAddMul(self):
x = np.exp(npr.randn())
a = 2. * np.eye(2)
b = 2.5 * np.eye(2)
def fun(x, a, b):
return tracers.logdet(tracers.add_n(np.einsum(',ab->ab', x, a), np.einsum(',ab->ab', x, b)))
expr = self._rewriter_test_helper(fun, rewrites.gather_logdet_add_einsum, x, a, b)
self.assertEqual(expr.expr_node.fun, np.add)
parent_funs = [parent.fun for parent in expr.expr_node.parents]
self.assertTrue(tracers.logdet in parent_funs)
if __name__ == '__main__':
absltest.main()
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import autograd.numpy as np
import autograd.numpy.random as npr
from autoconj.patterns import (Val, Node, Array, Str, Add, AddN, Subtract,
Multiply, Power, Dot, Einsum, Choice, Segment,
Star)
from autoconj import matchers
from autoconj import patterns
from autoconj import tracers
class MatchersTest(absltest.TestCase):
def testOneElementPattern(self):
def fun(x, y):
return 3 * x + y**2
x = np.ones(2)
y = 2 * np.ones(2)
end_node = tracers.make_expr(fun, x, y).expr_node
match = matchers.matcher(Val)
self.assertTrue(match(end_node))
match = matchers.matcher(Add)
self.assertTrue(match(end_node))
match = matchers.matcher(Multiply)
self.assertFalse(match(end_node))
def testOneElementPatternNameBinding(self):
def fun(x, y):
return 3 * x + y**2
x = np.ones(2)
y = 2 * np.ones(2)
end_node = tracers.make_expr(fun, x, y).expr_node
match = matchers.matcher(Val('z'))
self.assertEqual(match(end_node), {'z': end_node})
match = matchers.matcher(Add('z'))
self.assertEqual(match(end_node), {'z': end_node.fun})
match = matchers.matcher(Multiply('z'))
self.assertFalse(match(end_node))
def testLiterals(self):
match = matchers.matcher(3)
self.assertTrue(match(3))
def fun(x):
return 2 + x
x = np.ones(2)
end_node = tracers.make_expr(fun, x).expr_node
match = matchers.matcher((Add, 2, Val))
self.assertTrue(match(end_node))
def testCompoundPattern(self):
def fun(x, y):
return 3 * x + y**2
x = np.ones(2)
y = 2 * np.ones(2)
end_node = tracers.make_expr(fun, x, y).expr_node
match = matchers.matcher((Add, Val, Val))
self.assertTrue(match(end_node))
match = matchers.matcher((Add, Multiply, Val))
self.assertTrue(match(end_node))
match = matchers.matcher((Add, (Multiply, Val, Val), Val))
self.assertTrue(match(end_node))
match = matchers.matcher((Add, (Multiply, 3, Val), (Power, Val, 2)))
self.assertTrue(match(end_node))
match = matchers.matcher((Add, (Add, Val, Val), Val))
self.assertFalse(match(end_node))
match = matchers.matcher((Add, (Multiply, 4, Val), (Power, Val, 2)))
self.assertFalse(match(end_node))
def testCompoundPatternNameBindings(self):
def fun(x, y):
return 3 * x + y**2
x = np.ones(2)
y = 2 * np.ones(2)
end_node = tracers.make_expr(fun, x, y).expr_node
match = matchers.matcher((Add,
(Multiply, 3, Val('x')),
(Power, Val('y'), 2)))
self.assertEqual(match(end_node),
{'x': end_node.args[0].args[1],
'y': end_node.args[1].args[0]})
def testCompoundPatternNameConstraints(self):
def fun(x, y):
return 3 * x + y**2
x = np.ones(2)
y = 2 * np.ones(2)
end_node = tracers.make_expr(fun, x, y).expr_node
match = matchers.matcher((Add,
(Multiply, 3, Val('x')),
(Power, Val('x'), 2)))
self.assertFalse(match(end_node))
def fun(x, y):
return 3 * x + x**2 # note x used twice
x = np.ones(2)
y = 2 * np.ones(2)
end_node = tracers.make_expr(fun, x, y).expr_node
self.assertEqual(match(end_node),
{'x': end_node.args[0].args[1]})
def testChoices(self):
W = npr.randn(3, 3)
b = npr.randn(3)
def fun(x):
return np.dot(x, W) + b
x = np.ones((5, 3))
end_node = tracers.make_expr(fun, x).expr_node
match = matchers.matcher((Add, Choice(Dot('op'), Multiply('op')), Val))
self.assertEqual(match(end_node),
{'op': end_node.args[0].fun})
match = matchers.matcher((Add, Choice(Add('op'), Multiply('op')), Val))
self.assertFalse(match(end_node))
match = matchers.matcher((Choice((Add, (Multiply, Val, Val)), # backtrack
(Add, (Dot, Val('x'), Val('W')), Val('b')),
(Dot, Val('x'), Val('W')))))
self.assertEqual(match(end_node),
{'x': end_node.args[0].args[0],
'W': end_node.args[0].args[1],
'b': end_node.args[1]})
def testSegments(self):
def fun(x):
return np.einsum('i,j,,k->ijk', x, x, 2, x)
x = np.ones(3)
end_node = tracers.make_expr(fun, x).expr_node
match = matchers.matcher((Einsum, Str, Segment, 2, Segment))
self.assertTrue(match(end_node))
match = matchers.matcher((Einsum, Str, Segment, 3, Segment))
self.assertFalse(match(end_node))
match = matchers.matcher((Einsum, Str, Segment('s1'), 2, Segment('s2')))
bindings = match(end_node)
self.assertTrue('s1' in bindings)
self.assertEqual(len(bindings['s1']), 2)
match = matchers.matcher((Einsum, Str, Segment('s1'), 2, Segment('s2')))
bindings = match(end_node)
self.assertTrue('s1' in bindings)
self.assertEqual(len(bindings['s1']), 2)
match = matchers.matcher((Einsum, Str, Segment('s1'), 2, Array,
Segment('s2')))
bindings = match(end_node)
self.assertTrue('s2' in bindings)
self.assertEqual(len(bindings['s2']), 0)
def testSegmentsEmpty(self):
def fun(x, y, z):
return np.einsum('i,j,ij->', x - y, x, z)
x = np.ones(3)
y = 2 * np.ones(3)
z = 3 * np.ones((3, 3))
end_node = tracers.make_expr(fun, x, y, z).expr_node
pat = (Einsum, Str('formula'),
Segment('args1'),
(Choice(Subtract('op'), Add('op')), Val('x'), Val('y')),
Segment('args2'))
match = matchers.matcher(pat)
self.assertTrue(match(end_node))
def testStar(self):
x = np.ones(3)
def f(x):
return np.einsum('i,j->', x, x)
f_expr = tracers.make_expr(f, x)
def g(x):
return np.einsum('i,j->', x, 3 * np.ones(x.shape))
g_expr = tracers.make_expr(g, x)
pat = (Einsum, Str('formula'), Star(Val('x')))
match = matchers.matcher(pat)
self.assertTrue(match(f_expr.expr_node))
self.assertFalse(match(g_expr.expr_node))
def testStarRepeatedNames(self):
x = np.ones(3)
def f(x):
return np.einsum('i,j,k,l,m->', x, x, 3 * np.ones(x.shape), x, x)
f_expr = tracers.make_expr(f, x)
def g(x):
return np.einsum('i,j,k,l->', x, x, 3 * np.ones(x.shape), x)
g_expr = tracers.make_expr(g, x)
pat = (Einsum, Str('formula'),
Star(Val('x'), 'xs'), Val, Star(Val('x'), 'xs'))
match = matchers.matcher(pat)
self.assertTrue(match(f_expr.expr_node))
self.assertFalse(match(g_expr.expr_node))
def testAccumulateInStar(self):
def f(x):
return np.einsum('i,j,k->', x, x, 3*np.ones(x.shape))
x = np.ones(3)
f_expr = tracers.make_expr(f, x)
pat = (Einsum, Str('formula'), Star(Val('args'), accumulate=['args']))
match_fn = matchers.matcher(pat)
# should produce:
# bindings = {'args': (x, x, 3*np.ones(x.shape)), 'formula': 'i,j,k->'}
self.assertTrue(match_fn(f_expr.expr_node))
def f(x):
return tracers.add_n(np.einsum(',i->i', x, np.ones(3)),
np.einsum(',j->j', x, 2. * np.ones(3)))
x = 2.5
f_expr = tracers.make_expr(f, x)
pat = (AddN, Star((Einsum, Str('formula'),
Segment('args1'), Node('x'), Segment('args2')),
accumulate=['formula', 'args1', 'args2']))
match_fn = matchers.matcher(pat)
match = match_fn(f_expr.expr_node)
self.assertEqual(len(match['formula']), 2)
self.assertEqual(len(match['args1']), 2)
self.assertEqual(len(match['args2']), 2)
self.assertEqual(match['x'].fun.__name__, 'env_lookup')
self.assertIn(',i->i', match['formula'])
self.assertIn(',j->j', match['formula'])
if __name__ == '__main__':
absltest.main()
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from autoconj.canonicalize import canonicalize, is_canonical, simplify_sweep
from autoconj.tracers import add_n, eval_expr, GraphExpr, make_expr, print_expr
import autograd.extend as ag_extend
import autograd.numpy as np
class CanonicalizeTest(absltest.TestCase):
def testDummySimplify(self):
"""Ensures simplify_sweep() works with a dummy simplification."""
expr = make_expr(lambda x, y: x * (y * 3. + y), 3., 4.)
self.assertFalse(simplify_sweep(expr, lambda node: False))
def testEinsumAddSubSimplify(self):
# TODO(mhoffman): Think about broadcasting. We need to support `x - 2.0`.
def test_fun(x):
return np.einsum('i->', x + np.full(x.shape, 2.0))
expr = make_expr(test_fun, np.ones(3))
test_x = np.full(3, 0.5)
correct_value = eval_expr(expr, {'x': test_x})
expr = canonicalize(expr)
self.assertIsInstance(expr, GraphExpr)
self.assertEqual(expr.expr_node.fun, add_n)
self.assertEqual(expr.expr_node.parents[0].fun.__name__, 'einsum')
new_value = eval_expr(expr, {'x': test_x})
self.assertEqual(correct_value, new_value)
def testCanonicalize(self):
def mahalanobis_distance(x, y, matrix):
x_minus_y = x - y
return np.einsum('i,j,ij->', x_minus_y, x_minus_y, matrix)
x = np.array([1.3, 3.6])
y = np.array([2.3, -1.2])
matrix = np.arange(4).reshape([2, 2])
expr = make_expr(mahalanobis_distance, x, y, matrix)
self.assertFalse(is_canonical(expr))
correct_value = eval_expr(expr, {'x': x, 'y': y, 'matrix': matrix})
expr = canonicalize(expr)
self.assertTrue(is_canonical(expr))
new_value = eval_expr(expr, {'x': x, 'y': y, 'matrix': matrix})
self.assertAlmostEqual(correct_value, new_value)
def testEinsumCompose(self):
def Xbeta_squared(X, beta):
Xbeta = np.einsum('ij,j->i', X, beta)
Xbeta2 = np.einsum('lm,m->l', X, beta)
return np.einsum('k,k->', Xbeta, Xbeta)
n_examples = 10
n_predictors = 2
X = np.random.randn(n_examples, n_predictors)
beta = np.random.randn(n_predictors)
expr = make_expr(Xbeta_squared, X, beta)
correct_value = eval_expr(expr, {'X': X, 'beta': beta})
self.assertFalse(is_canonical(expr))
expr = canonicalize(expr)
new_value = eval_expr(expr, {'X': X, 'beta': beta})
self.assertAlmostEqual(correct_value, new_value)
self.assertIsInstance(expr, GraphExpr)
self.assertEqual(expr.expr_node.fun, np.einsum)
self.assertTrue(is_canonical(expr))
def testLinearRegression(self):
def squared_loss(X, beta, y):
predictions = np.einsum('ij,j->i', X, beta)
errors = y - predictions
return np.einsum('k,k->', errors, errors)
n_examples = 10
n_predictors = 2
X = np.random.randn(n_examples, n_predictors)
beta = np.random.randn(n_predictors)
y = np.random.randn(n_examples)
expr = make_expr(squared_loss, X, beta, y)
correct_value = eval_expr(expr, {'X': X, 'beta': beta, 'y':y})
self.assertFalse(is_canonical(expr))
expr = canonicalize(expr)
self.assertTrue(is_canonical(expr))
new_value = eval_expr(expr, {'X': X, 'beta': beta, 'y':y})
self.assertAlmostEqual(correct_value, new_value)
def testReciprocalToPow(self):
def fun(x):
return np.reciprocal(x)
expr = make_expr(fun, 3.)
expr = canonicalize(expr)
self.assertIsInstance(expr, GraphExpr)
self.assertEqual(expr.expr_node.fun, np.power)
self.assertEqual(eval_expr(expr, {'x': 3.}), fun(3.))
def testSquareToPow(self):
def fun(x):
return np.square(x)
expr = make_expr(fun, 3.)
expr = canonicalize(expr)
self.assertIsInstance(expr, GraphExpr)
self.assertEqual(expr.expr_node.fun, np.power)
self.assertEqual(eval_expr(expr, {'x': 3.}), fun(3.))
def testSqrtToPow(self):
def fun(x):
return np.sqrt(x)
expr = make_expr(fun, 3.)
expr = canonicalize(expr)
self.assertIsInstance(expr, GraphExpr)
self.assertEqual(expr.expr_node.fun, np.power)
self.assertEqual(eval_expr(expr, {'x': 3.}), fun(3.))
if __name__ == '__main__':
absltest.main()
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Normal-Gamma model with PPLHam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import autograd.numpy as np
from autoconj import conjugacy
from autoconj import pplham as ph
def model(alpha, beta, kappa, mu0):
tau = ph.gamma.rvs(alpha, beta)
mu = ph.norm.rvs(mu0, 1. / np.sqrt(kappa * tau))
x = ph.norm.rvs(mu, 1. / np.sqrt(tau))
return x
def main(argv):
del argv # Unused.
n_examples = 10
a = 1.3
b = 2.2
kappa = 1.5
mu0 = 0.3
tau = np.random.gamma(a, 1. / b)
mu = np.random.normal(mu0, 1. / np.sqrt(tau * kappa))
x = np.random.normal(mu, 1. / np.sqrt(tau), n_examples)
all_args = [a, b, kappa, mu0, tau, mu, x]
all_args_ex_mu = [a, b, kappa, mu0, tau, x]
log_joint = ph.make_log_joint_fn(model)
mu_conditional_factory = conjugacy.complete_conditional(
log_joint, 5, conjugacy.SupportTypes.REAL, *all_args)
mu_conditional = mu_conditional_factory(*all_args_ex_mu)
log_p_tau = conjugacy.marginalize(log_joint, 5, conjugacy.SupportTypes.REAL,
*all_args)
tau_conditional_factory = conjugacy.complete_conditional(
log_p_tau, 4, conjugacy.SupportTypes.NONNEGATIVE, *all_args_ex_mu)
tau_conditional = tau_conditional_factory(*[a, b, kappa, mu0, x])
print('True tau: {}'.format(tau))
print('tau posterior is gamma({}, {}). Mean is {}, std. dev. is {}.'.format(
tau_conditional.args[0], 1. / tau_conditional.args[2],
tau_conditional.args[0] * tau_conditional.args[2],
np.sqrt(tau_conditional.args[0]) * tau_conditional.args[2]))
print()
print('True mu: {}'.format(mu))
print('mu posterior given tau is normal({}, {})'.format(
mu_conditional.args[0], mu_conditional.args[1]))
if __name__ == '__main__':
app.run(main)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mixture of Gaussians with variational inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import autograd.numpy as np
import autograd.numpy.random as npr
import matplotlib.pyplot as plt
import time
from autoconj import log_probs
from autoconj.tracers import one_hot
from autoconj.util import SupportTypes
from autoconj.meanfield import cavi
flags.DEFINE_integer(
'num_clusters',
default=10,
help='Number of clusters.')
flags.DEFINE_integer(
'num_dimensions',
default=20,
help='Number of data dimensions.')
flags.DEFINE_integer(
'num_observations',
default=1000,
help='Number of observations.')
flags.DEFINE_integer(
'num_iterations',
default=500,
help='Number of iterations to run training.')
FLAGS = flags.FLAGS
REAL = SupportTypes.REAL
INTEGER = SupportTypes.INTEGER
SIMPLEX = SupportTypes.SIMPLEX
NONNEGATIVE = SupportTypes.NONNEGATIVE
def make_log_joint(x, alpha, a, b, kappa):
def log_joint(pi, z, mu, tau):
log_p_pi = log_probs.dirichlet_gen_log_prob(pi, alpha)
log_p_mu = log_probs.norm_gen_log_prob(mu, 0., 1. / np.sqrt(kappa * tau))
log_p_z = log_probs.categorical_gen_log_prob(z, pi)
log_p_tau = log_probs.gamma_gen_log_prob(tau, a, b)
z_one_hot = one_hot(z, len(pi))
mu_z = np.dot(z_one_hot, mu)
log_p_x = log_probs.norm_gen_log_prob(x, mu_z, 1. / np.sqrt(tau))
return log_p_pi + log_p_z + log_p_mu + log_p_x
return log_joint
def plot(mu, data):
fig, ax = plt.subplots(figsize=(6, 6), dpi=150)
ax.plot(data[:,0], data[:,1], 'k.')
(log_weights,), _, (Exx, Ex), _ = mu
for weight, second_moment, mean in zip(np.exp(log_weights), Exx, Ex):
Sigma = np.diag(second_moment - mean**2)
plot_ellipse(ax, weight, mean, Sigma)
return fig
def plot_ellipse(ax, alpha, mean, cov):
t = np.linspace(0, 2*np.pi, 100) % (2*np.pi)
circle = np.vstack((np.sin(t), np.cos(t)))
ellipse = np.dot(np.linalg.cholesky(cov), circle) + mean[:,None]
ax.plot(ellipse[0], ellipse[1], alpha=1., linestyle='-', linewidth=2)
def main(argv):
del argv
n_clusters = FLAGS.num_clusters
n_dimensions = FLAGS.num_dimensions
n_observations = FLAGS.num_observations
alpha = 3.3 * np.ones(n_clusters)
a = 1.
b = 1.
kappa = 0.1
npr.seed(10001)
# generate true latents and data
pi = npr.gamma(alpha)
pi /= pi.sum()
mu = npr.normal(0, 1.5, [n_clusters, n_dimensions])
z = npr.choice(np.arange(n_clusters), size=n_observations, p=pi)
x = npr.normal(mu[z, :], 0.5 ** 2)
# points used for initialization
pi_est = np.ones(n_clusters) / n_clusters
z_est = npr.choice(np.arange(n_clusters), size=n_observations, p=pi_est)
mu_est = npr.normal(0., 0.01, [n_clusters, n_dimensions])
tau_est = 1.
init_vals = pi_est, z_est, mu_est, tau_est
# instantiate the model log joint
log_joint = make_log_joint(x, alpha, a, b, kappa)
# run mean field on variational mean parameters
def callback(meanparams):
fig = plot(meanparams, x)
plt.savefig('/tmp/gmm_{:04d}.png'.format(itr))
plt.close(fig.number)
start = time.time()
cavi(log_joint, init_vals, (SIMPLEX, INTEGER, REAL, NONNEGATIVE),
FLAGS.num_iterations, callback=lambda *args: None)
runtime = time.time() - start
print("CAVI Runtime (s): ", runtime)
if __name__ == '__main__':
app.run(main)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mixture of Gaussians."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
import autograd.numpy as np
from autoconj import conjugacy, log_probs
from autoconj.tracers import one_hot
def log_joint(x, pi, z, mu, sigma_sq, alpha, sigma_sq_mu):
log_p_pi = log_probs.dirichlet_gen_log_prob(pi, alpha)
log_p_mu = log_probs.norm_gen_log_prob(mu, 0, np.sqrt(sigma_sq_mu))
log_p_z = log_probs.categorical_gen_log_prob(z, pi)
z_one_hot = one_hot(z, len(pi))
mu_z = np.dot(z_one_hot, mu)
log_p_x = log_probs.norm_gen_log_prob(x, mu_z, np.sqrt(sigma_sq))
return log_p_pi + log_p_z + log_p_mu + log_p_x
def remove_arg(argnum, args):
return args[:argnum] + args[argnum + 1:]
def main(argv):
del argv
n_clusters = 5
n_dimensions = 2
n_observations = 200
alpha = 3.3 * np.ones(n_clusters)
sigma_sq_mu = 1.5 ** 2
sigma_sq = 0.5 ** 2
np.random.seed(10001)
pi = np.random.gamma(alpha)
pi /= pi.sum()
mu = np.random.normal(0, np.sqrt(sigma_sq_mu), [n_clusters, n_dimensions])
z = np.random.choice(np.arange(n_clusters), size=n_observations, p=pi)
x = np.random.normal(mu[z, :], sigma_sq)
pi_est = np.ones(n_clusters) / n_clusters
z_est = np.random.choice(np.arange(n_clusters), size=n_observations, p=pi_est)
mu_est = np.random.normal(0., 0.01, [n_clusters, n_dimensions])
all_args = [x, pi_est, z_est, mu_est, sigma_sq, alpha, sigma_sq_mu]
pi_posterior = conjugacy.complete_conditional(
log_joint, 1, conjugacy.SupportTypes.SIMPLEX, *all_args)
z_posterior = conjugacy.complete_conditional(
log_joint, 2, conjugacy.SupportTypes.INTEGER, *all_args)
mu_posterior = conjugacy.complete_conditional(
log_joint, 3, conjugacy.SupportTypes.REAL, *all_args)
print('iteration\tlog_joint')
for iteration in range(100):
z_est[:] = z_posterior(*remove_arg(2, all_args)).rvs()
pi_est[:] = pi_posterior(*remove_arg(1, all_args)).rvs()
mu_est[:] = mu_posterior(*remove_arg(3, all_args)).rvs()
print('{}\t\t{}'.format(iteration, log_joint(*all_args)))
if __name__ == '__main__':
app.run(main)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Normal-Gamma model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import autograd.numpy as np
from autoconj import conjugacy, log_probs
def log_joint(tau, mu, x, alpha, beta, kappa, mu0):
log_p_tau = log_probs.gamma_gen_log_prob(tau, alpha, beta)
log_p_mu = log_probs.norm_gen_log_prob(mu, mu0, 1. / np.sqrt(kappa * tau))
log_p_x = log_probs.norm_gen_log_prob(x, mu, 1. / np.sqrt(tau))
return log_p_tau + log_p_mu + log_p_x
def main(argv):
del argv # Unused.
n_examples = 10
a = 1.3
b = 2.2
kappa = 1.5
mu0 = 0.3
tau = np.random.gamma(a, 1. / b)
mu = np.random.normal(mu0, 1. / np.sqrt(tau * kappa))
x = np.random.normal(mu, 1. / np.sqrt(tau), n_examples)
all_args = [tau, mu, x, a, b, kappa, mu0]
all_args_ex_mu = [tau, x, a, b, kappa, mu0]
mu_conditional_factory = conjugacy.complete_conditional(
log_joint, 1, conjugacy.SupportTypes.REAL, *all_args)
mu_conditional = mu_conditional_factory(*all_args_ex_mu)
log_p_tau = conjugacy.marginalize(log_joint, 1, conjugacy.SupportTypes.REAL,
*all_args)
tau_conditional_factory = conjugacy.complete_conditional(
log_p_tau, 0, conjugacy.SupportTypes.NONNEGATIVE, *all_args_ex_mu)
tau_conditional = tau_conditional_factory(*all_args_ex_mu[1:])
print('True tau: {}'.format(tau))
print('tau posterior is gamma({}, {}). Mean is {}, std. dev. is {}.'.format(
tau_conditional.args[0], 1. / tau_conditional.args[2],
tau_conditional.args[0] * tau_conditional.args[2],
np.sqrt(tau_conditional.args[0]) * tau_conditional.args[2]))
print()
print('True mu: {}'.format(mu))
print('mu posterior given tau is normal({}, {})'.format(
mu_conditional.args[0], mu_conditional.args[1]))
if __name__ == '__main__':
app.run(main)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mixture of Gaussians with variational inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from autograd import grad
import autograd.numpy as np
import autograd.numpy.random as npr
from autoconj import pplham as ph
from autoconj.util import SupportTypes
from autoconj.meanfield import cavi
REAL = SupportTypes.REAL
NONNEGATIVE = SupportTypes.NONNEGATIVE
def main(unused_argv):
npr.seed(10001)
def make_model(alpha, beta):
"""Generates matrix of shape [num_examples, num_features]."""
def sample_model():
epsilon = ph.norm.rvs(0, 1, size=[num_examples, num_latents])
w = ph.norm.rvs(0, 1, size=[num_features, num_latents])
tau = ph.gamma.rvs(alpha, beta)
x = ph.norm.rvs(np.dot(epsilon, w.T), 1. / np.sqrt(tau))
return [epsilon, w, tau, x]
return sample_model
num_examples = 50
num_features = 10
num_latents = 5
alpha = 2.
beta = 8.
sampler = make_model(alpha, beta)
_, _, _, x = sampler()
epsilon, w, tau, _ = sampler() # initialization
log_joint_fn_ = ph.make_log_joint_fn(sampler)
log_joint_fn = lambda *args: log_joint_fn_(*(args + (x,))) # crappy partial
cavi(log_joint_fn, (epsilon, w, tau), (REAL, REAL, NONNEGATIVE), 50)
if __name__ == '__main__':
app.run(main)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mixture of Gaussians with PPLHam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
import autograd.numpy as np
from autoconj import conjugacy
from autoconj import pplham as ph
from autoconj.tracers import one_hot
def model(sigma_sq, alpha, sigma_sq_mu):
pi = ph.dirichlet.rvs(alpha)
mu = ph.norm.rvs(0., np.sqrt(sigma_sq_mu), size=[5, 2])
z = ph.categorical.rvs(pi, size=200)
z_one_hot = one_hot(z, len(pi))
mu_z = np.dot(z_one_hot, mu)
x = ph.norm.rvs(mu_z, np.sqrt(sigma_sq))
return x
def remove_arg(argnum, args):
return args[:argnum] + args[argnum + 1:]
def main(argv):
del argv
n_clusters = 5
n_dimensions = 2
n_observations = 200
alpha = 3.3 * np.ones(n_clusters)
sigma_sq_mu = 1.5 ** 2
sigma_sq = 0.5 ** 2
np.random.seed(10001)
pi = np.random.gamma(alpha)
pi /= pi.sum()
mu = np.random.normal(0, np.sqrt(sigma_sq_mu), [n_clusters, n_dimensions])
z = np.random.choice(np.arange(n_clusters), size=n_observations, p=pi)
x = np.random.normal(mu[z, :], sigma_sq)
pi_est = np.ones(n_clusters) / n_clusters
z_est = np.random.choice(np.arange(n_clusters), size=n_observations, p=pi_est)
mu_est = np.random.normal(0., 0.01, [n_clusters, n_dimensions])
all_args = [sigma_sq, alpha, sigma_sq_mu, pi_est, mu_est, z_est, x]
log_joint = ph.make_log_joint_fn(model)
pi_posterior = conjugacy.complete_conditional(
log_joint, 3, conjugacy.SupportTypes.SIMPLEX, *all_args)
z_posterior = conjugacy.complete_conditional(
log_joint, 5, conjugacy.SupportTypes.INTEGER, *all_args)
mu_posterior = conjugacy.complete_conditional(
log_joint, 4, conjugacy.SupportTypes.REAL, *all_args)
print('iteration\tlog_joint')
for iteration in range(100):
z_est[:] = z_posterior(*remove_arg(5, all_args)).rvs()
pi_est[:] = pi_posterior(*remove_arg(3, all_args)).rvs()
mu_est[:] = mu_posterior(*remove_arg(4, all_args)).rvs()
print('{}\t\t{}'.format(iteration, log_joint(*all_args)))
if __name__ == '__main__':
app.run(main)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Probabilistic principal components analysis with PPLHam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import time
from absl import app
from absl import flags
from autograd import grad
from autograd.misc.optimizers import adam
import autograd.numpy as np
import matplotlib
from matplotlib import figure
from matplotlib.backends import backend_agg
import matplotlib.pyplot as plt
matplotlib.use('Agg')
plt.style.use('fivethirtyeight')
plt.rc('axes', labelsize='15')
import six # pylint: disable=g-import-not-at-top
from autoconj import conjugacy, log_probs
from autoconj import pplham as ph
from autoconj.meanfield import cavi
from autoconj.meanfield import elbo as elbo_fn
flags.DEFINE_list(
'inference',
default=['gibbs', 'advi', 'map', 'cavi'],
help='Comma-separated list of algorithms to perform and compare across. '
'Choices are gibbs, advi, map, cavi.')
flags.DEFINE_string(
'model_dir',
default=os.path.join(os.getenv('TEST_TMPDIR', '/tmp'), 'ppca/'),
help='Directory to put the model\'s fit.')
flags.DEFINE_list(
'num_iterations',
default=['500', '7500', '15000', '1500'],
help='Comma-separated list of training steps. Aligns with each algorithm.')
flags.DEFINE_integer('num_print',
default=25,
help='Print progress every many of these steps.')
flags.DEFINE_bool('plot_only',
default=None,
help='If True, only does plotting. Defaults to running.')
FLAGS = flags.FLAGS
def run_gibbs(log_joint_fn, all_args, num_iterations):
"""Train model with Gibbs sampling."""
alpha, beta, epsilon, w, tau, x = all_args
# Form complete conditionals for Gibbs sampling.
epsilon_conditional_factory = conjugacy.complete_conditional(
log_joint_fn,
2,
conjugacy.SupportTypes.REAL,
*all_args)
w_conditional_factory = conjugacy.complete_conditional(
log_joint_fn,
3,
conjugacy.SupportTypes.REAL,
*all_args)
tau_conditional_factory = conjugacy.complete_conditional(
log_joint_fn,
4,
conjugacy.SupportTypes.NONNEGATIVE,
*all_args)
epsilon_conditional = lambda w, tau: epsilon_conditional_factory( # pylint: disable=g-long-lambda
alpha, beta, w, tau, x)
w_conditional = lambda epsilon, tau: w_conditional_factory( # pylint: disable=g-long-lambda
alpha, beta, epsilon, tau, x)
tau_conditional = lambda epsilon, w: tau_conditional_factory( # pylint: disable=g-long-lambda
alpha, beta, epsilon, w, x)
log_posterior = lambda epsilon, w, tau: log_joint_fn( # pylint: disable=g-long-lambda
alpha, beta, epsilon, w, tau, x)
# Run training loop. Track expected log joint probability, i.e.,
# E [ log p(xnew, params | xtrain) ]. It is estimated with 1 posterior sample.
print('Running Gibbs...')
epsilon = ph.norm.rvs(0, 1, size=epsilon.shape)
w = ph.norm.rvs(0, 1, size=w.shape)
tau = ph.gamma.rvs(alpha, scale=1./beta)
log_joints = []
runtimes = []
start = time.time()
for t in range(num_iterations):
epsilon = epsilon_conditional(w, tau).rvs()
w = w_conditional(epsilon, tau).rvs()
tau = tau_conditional(epsilon, w).rvs()
if t % FLAGS.num_print == 0 or (t + 1) == num_iterations:
log_joint = log_posterior(epsilon, w, tau)
runtime = time.time() - start
print('Iteration: {:>3d} Log Joint: {:.3f} '
'Runtime (s): {:.3f}'.format(t, log_joint, runtime))
log_joints.append(log_joint)
runtimes.append(runtime)
return log_joints, runtimes
def run_advi(log_joint_fn, all_args, num_iterations, run_map=False):
"""Train model with automatic differentiation variational inference.
Args:
run_map: If True, runs ADVI with `E_q [ log p(data, params) ]` as loss
function.
"""
alpha, beta, epsilon, w, tau, x = all_args
log_posterior = lambda epsilon, w, tau: log_joint_fn( # pylint: disable=g-long-lambda
alpha, beta, epsilon, w, tau, x)
def unpack_params(params):
"""Unpacks `np.ndarray` into list of variational parameters."""
param_shapes = [epsilon.shape, # loc for q(epsilon)
epsilon.shape, # log scale for q(epsilon)
w.shape, # loc for q(w)
w.shape, # log scale for q(w)
tau.shape, # loc for q(tau)
tau.shape] # log scale for q(tau)
begin = 0
end = 0
unpacked_params = []
for param_shape in param_shapes:
end += int(np.prod(param_shape)) # accumulate by number of parameters
param = params[begin:end].reshape(param_shape)
begin = end
unpacked_params.append(param)
return unpacked_params
def loss(params, t, return_marginal=False):
"""Reparameterization-based Monte Carlo estimate of negative ELBO."""
del t # unused
unpacked_params = unpack_params(params)
zs = []
log_q = 0.
# TODO(trandustin): Learn gamma latent with log transform. Currently, it is
# fixed at its true value.
# for t in range(3):
for t in range(2):
loc = unpacked_params[2 * t] # 0, 2, 4
log_scale = unpacked_params[2 * t + 1] # 1, 3, 5
z = loc + np.exp(log_scale) * np.random.normal(0, 1, size=log_scale.shape)
zs.append(z)
log_q += log_probs.norm_gen_log_prob(z, loc, np.exp(log_scale))
zs.append(tau)
log_p = log_posterior(*zs) # pylint: disable=no-value-for-parameter
if return_marginal:
return log_p
elif run_map:
return -log_p
return log_q - log_p
def callback(params, t, g):
"""Callback for use in Autograd's optimizer routine."""
del g # unused
if t % FLAGS.num_print == 0 or (t + 1) == num_iterations:
log_joint = loss(params, t, return_marginal=True)
elbo = -loss(params, t)
runtime = time.time() - start
print('Iteration: {:>3d} Log Joint: {:.3f} ELBO: {:.3f} '
'Runtime (s): {:.3f}'.format(t, log_joint, elbo, runtime))
log_joints.append(log_joint)
elbos.append(elbo)
runtimes.append(runtime)
return
grad_loss = grad(loss)
# TODO(trandustin): why is the ELBO positive?
# Run training loop. Track expected log joint probability, i.e.,
# E [ log p(xnew, params | xtrain) ]. It is estimated with 1 posterior sample.
if run_map:
print('Running MAP...')
else:
print('Running ADVI...')
num_params = int(2 * np.prod(epsilon.shape) +
2 * np.prod(w.shape) +
2 * np.prod(tau.shape))
print('Number of parameters: ', num_params)
# TODO(trandustin): use lists of params
# Initialize randomly near 0 for means and largely negative for log stddev.
params = np.concatenate([
np.random.normal(0, 1, size=int(np.prod(epsilon.shape))),
np.random.normal(-3, 1e-3, size=int(np.prod(epsilon.shape))),
np.random.normal(0, 1, size=int(np.prod(w.shape))),
np.random.normal(-3, 1e-3, size=int(np.prod(w.shape))),
np.random.normal(0, 1, size=int(np.prod(tau.shape))),
np.random.normal(-3, 1e-3, size=int(np.prod(tau.shape)))], 0)
log_joints = []
elbos = []
runtimes = []
start = time.time()
params = adam(grad_loss,
params,
callback=callback,
num_iters=num_iterations,
step_size=1e-2)
return log_joints, runtimes, elbos
def run_cavi(log_joint_fn, all_args, num_iterations):
"""Train model with coordinate-ascent variational inference."""
alpha, beta, epsilon, w, tau, x = all_args
log_posterior = lambda epsilon, w, tau: log_joint_fn( # pylint: disable=g-long-lambda
alpha, beta, epsilon, w, tau, x)
def callback(t, neg_energy, normalizers, natparams):
"""Callback for use in CAVI routine."""
if t % FLAGS.num_print == 0 or (t + 1) == num_iterations:
elbo, log_joint = elbo_fn(neg_energy, normalizers, natparams,
return_lp=True)
runtime = time.time() - start
print('Iteration: {:>3d} Log Joint: {:.3f} ELBO: {:.3f} '
'Runtime (s): {:.3f}'.format(t, log_joint, elbo, runtime))
log_joints.append(log_joint)
elbos.append(elbo)
runtimes.append(runtime)
return
# Run training loop. Track expected log joint probability, i.e.,
# E [ log p(xnew, params | xtrain) ]. It is estimated with 1 posterior sample.
print('Running CAVI...')
epsilon = ph.norm.rvs(0, 1, size=epsilon.shape)
w = ph.norm.rvs(0, 1, size=w.shape)
tau = ph.gamma.rvs(alpha, scale=1./beta)
log_joints = []
elbos = []
runtimes = []
start = time.time()
_ = cavi(log_posterior,
init_vals=(epsilon, w, tau),
supports=(conjugacy.SupportTypes.REAL,
conjugacy.SupportTypes.REAL,
conjugacy.SupportTypes.NONNEGATIVE),
num_iters=num_iterations,
callback=callback)
return log_joints, runtimes, elbos
def main(argv):
del argv # Unused.
if not os.path.exists(FLAGS.model_dir):
os.makedirs(FLAGS.model_dir)
FLAGS.num_iterations = [int(i) for i in FLAGS.num_iterations]
def model(alpha, beta):
"""Generates matrix of shape [num_examples, num_features]."""
epsilon = ph.norm.rvs(0, 1, size=[num_examples, num_latents])
w = ph.norm.rvs(0, 1, size=[num_features, num_latents])
tau = ph.gamma.rvs(alpha, beta)
# TODO(trandustin): try that this works
# x = ph.norm.rvs(np.dot(epsilon, w.T), 1. / np.sqrt(tau))
x = ph.norm.rvs(np.einsum('ik,jk->ij', epsilon, w), 1. / np.sqrt(tau))
return [epsilon, w, tau, x]
if FLAGS.plot_only:
# Load results from CSV.
# TODO(trandustin): refactor data structures. this is messy..
inference_algs = []
xs = []
ys = []
fname = os.path.join(FLAGS.model_dir, 'results_lp.csv')
print('Loading {}'.format(fname))
with open(fname, 'rb') as f:
reader = csv.reader(f)
for i, row in enumerate(reader):
if i % 3 == 0:
inference_algs.append(row)
elif i % 3 == 1:
xs.append(row)
else:
ys.append(row)
results_lp = {inference_alg[0]: [x, y]
for inference_alg, x, y in zip(inference_algs, xs, ys)}
inference_algs = []
xs = []
ys = []
fname = os.path.join(FLAGS.model_dir, 'results_elbo.csv')
print('Loading {}'.format(fname))
with open(fname, 'rb') as f:
reader = csv.reader(f)
for i, row in enumerate(reader):
if i % 3 == 0:
inference_algs.append(row)
elif i % 3 == 1:
xs.append(row)
else:
ys.append(row)
results_elbo = {inference_alg[0]: [x, y]
for inference_alg, x, y in zip(inference_algs, xs, ys)}
else:
# Use synthetic data generated from model.
num_examples = 100
num_features = 20
num_latents = 5
alpha = 2.
beta = 8.
epsilon, w, tau, x = model(alpha, beta)
all_args = [alpha, beta, epsilon, w, tau, x]
log_joint_fn = ph.make_log_joint_fn(model)
results_lp = {}
results_elbo = {}
for inference_alg, num_iters in zip(FLAGS.inference, FLAGS.num_iterations):
if inference_alg == 'gibbs':
log_joints, runtimes = run_gibbs(log_joint_fn, all_args, num_iters)
elif inference_alg == 'advi':
log_joints, runtimes, elbos = run_advi(log_joint_fn, all_args, num_iters)
results_elbo[inference_alg] = [runtimes, elbos]
elif inference_alg == 'map':
log_joints, runtimes, _ = run_advi(log_joint_fn, all_args, num_iters,
run_map=True)
elif inference_alg == 'cavi':
log_joints, runtimes, elbos = run_cavi(log_joint_fn, all_args, num_iters)
results_elbo[inference_alg] = [runtimes, elbos]
else:
raise NotImplementedError("Only 'gibbs', 'advi', 'map', 'cavi' is "
"implemented.")
results_lp[inference_alg] = [runtimes, log_joints]
# Write results to CSV to easily tweak plots and not have to rerun training.
fname = os.path.join(FLAGS.model_dir, 'results_lp.csv')
with open(fname, 'wb') as f:
writer = csv.writer(f, quoting=csv.QUOTE_ALL)
for inference_alg, (x, y) in six.iteritems(results_lp):
writer.writerow([inference_alg])
writer.writerow(x)
writer.writerow(y)
print('Saved {}'.format(fname))
fname = os.path.join(FLAGS.model_dir, 'results_elbo.csv')
with open(fname, 'wb') as f:
writer = csv.writer(f, quoting=csv.QUOTE_ALL)
for inference_alg, (x, y) in six.iteritems(results_elbo):
writer.writerow([inference_alg])
writer.writerow(x)
writer.writerow(y)
print('Saved {}'.format(fname))
labels = {'gibbs': 'Gibbs', 'advi': 'ADVI', 'map': 'MAP', 'cavi': 'CAVI'}
# Plot ELBO by runtime (s).
figsize = (10, 5)
fig = figure.Figure(figsize=figsize)
canvas = backend_agg.FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 1, 1)
for inference_alg, (x, y) in six.iteritems(results_elbo):
ax.plot(x, y, label=labels[inference_alg])
ax.set_xlabel('Runtime (s)')
ax.set_ylabel('ELBO')
ax.legend(loc='lower right')
fname = os.path.join(FLAGS.model_dir, 'elbo-over-runtime.png')
canvas.print_figure(fname, format='png')
print('Saved {}'.format(fname))
fname = os.path.join(FLAGS.model_dir, 'elbo-over-runtime.pdf')
canvas.print_figure(fname, format='pdf')
print('Saved {}'.format(fname))
# Plot expected log joint density by runtime (s).
# TODO(trandustin): calculate log posterior predictive (expected log
# likelihood), not expected log joint.
fig = figure.Figure(figsize=figsize)
canvas = backend_agg.FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 1, 1)
for inference_alg, (x, y) in six.iteritems(results_lp):
ax.plot(x, y, label=labels[inference_alg])
ax.set_xlabel('Runtime (s)')
ax.set_ylabel('Expected Log Joint')
ax.legend(loc='lower right')
fname = os.path.join(FLAGS.model_dir, 'log-joint-over-runtime.png')
canvas.print_figure(fname, format='png', bbox_inches='tight')
print('Saved {}'.format(fname))
fname = os.path.join(FLAGS.model_dir, 'log-joint-over-runtime.pdf')
canvas.print_figure(fname, format='pdf', bbox_inches='tight')
print('Saved {}'.format(fname))
fig = figure.Figure(figsize=figsize)
canvas = backend_agg.FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 1, 1)
for inference_alg, (x, y) in six.iteritems(results_lp):
if inference_alg == 'advi':
continue
ax.plot(x, y, label=labels[inference_alg])
ax.set_xlabel('Runtime (s)')
ax.set_ylabel('Expected Log Joint')
ax.set_ylim((-10000.0, -0.0))
ax.legend(loc='lower right')
fname = os.path.join(FLAGS.model_dir, 'log-joint-over-runtime-zoom.png')
canvas.print_figure(fname, format='png', bbox_inches='tight')
print('Saved {}'.format(fname))
fname = os.path.join(FLAGS.model_dir, 'log-joint-over-runtime-zoom.pdf')
canvas.print_figure(fname, format='pdf', bbox_inches='tight')
print('Saved {}'.format(fname))
if __name__ == '__main__':
app.run(main)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from autograd import grad
import autograd.numpy as np
from absl import app
from autograd import grad
from autoconj.conjugacy import complete_conditional, marginalize, SupportTypes
from autoconj import canonicalize, conjugacy, log_probs, tracers
def log_p_x1_y1(x1, y1, x1_scale, y1_scale):
log_p_x1 = log_probs.norm_gen_log_prob(x1, 0, x1_scale)
log_p_y1_given_x1 = log_probs.norm_gen_log_prob(y1, x1, y1_scale)
return log_p_x1 + log_p_y1_given_x1
def log_p_xt_xtt_ytt(xt, xtt, ytt, xt_prior_mean, xt_prior_scale, x_scale,
y_scale):
log_p_xt = log_probs.norm_gen_log_prob(xt, xt_prior_mean, xt_prior_scale)
log_p_xtt = log_probs.norm_gen_log_prob(xtt, xt, x_scale)
log_p_ytt = log_probs.norm_gen_log_prob(ytt, xtt, y_scale)
return log_p_xt + log_p_xtt + log_p_ytt
def make_marginal_fn():
x1_given_y1_factory = complete_conditional(
log_p_x1_y1, 0, SupportTypes.REAL, *([1.] * 4))
log_p_y1 = marginalize(log_p_x1_y1, 0, SupportTypes.REAL, *([1.] * 4))
log_p_xtt_ytt = marginalize(
log_p_xt_xtt_ytt, 0, SupportTypes.REAL, *([1.] * 7))
log_p_ytt = marginalize(
log_p_xtt_ytt, 0, SupportTypes.REAL, *([1.] * 6))
xt_conditional_factory = complete_conditional(
log_p_xtt_ytt, 0, SupportTypes.REAL, *([1.] * 6))
def marginal(y_list, x_scale, y_scale):
log_p_y = log_p_y1(y_list[0], x_scale, y_scale)
xt_conditional = x1_given_y1_factory(y_list[0], x_scale, y_scale)
for t in range(1, len(y_list)):
log_p_y += log_p_ytt(y_list[t], xt_conditional.args[0],
xt_conditional.args[1], x_scale, y_scale)
xt_conditional = xt_conditional_factory(
y_list[t], xt_conditional.args[0], xt_conditional.args[1], x_scale,
y_scale)
return log_p_y
return marginal
def main(argv):
del argv # Unused.
x_scale = 0.1
y_scale = 1.
T = 50
x_list = np.cumsum(x_scale * np.random.randn(T))
y_list = np.array([x_list[t] + y_scale * np.random.randn() for t in range(T)])
marginal = make_marginal_fn()
marginal_grad = grad(lambda y_list, scales: marginal(y_list, *scales), 1)
x_scale_est = 0.1
y_scale_est = 1.
step_size = 0.5 / T
for i in range(100):
t0 = time.time()
x_scale_grad, y_scale_grad = marginal_grad(
y_list, (x_scale_est, y_scale_est))
x_scale_est *= np.exp(step_size * x_scale_est * x_scale_grad)
y_scale_est *= np.exp(step_size * y_scale_est * y_scale_grad)
print('{}\t{}\t{}\t{}\t{}'.format(
time.time() - t0, i, marginal(y_list, x_scale_est, y_scale_est),
x_scale_est, y_scale_est))
if __name__ == '__main__':
app.run(main)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FixMatch with Distribution Alignment and Adaptative Confidence Ratio.
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.functional import softmax
from objax.typing import JaxArray
from semi_supervised.lib.data import MixData, CTAData
from semi_supervised.lib.train import TrainableSSLModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.data.ssl import DATASETS as SSL_DATASETS, DataSetSSL
from shared.train import ScheduleCos
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class MCD(TrainableSSLModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.c1: objax.Module = self.model[-1]
self.c2: objax.Module = objax.nn.Linear(self.c1.w.value.shape[0], nclass)
self.gen: objax.Module = self.model[:-1]
self.opt1 = objax.optimizer.Momentum(self.gen.vars() + self.c1.vars('c1') + self.c2.vars('c2'))
self.opt2 = objax.optimizer.Momentum(self.c1.vars('c1') + self.c2.vars('c2'))
self.opt3 = objax.optimizer.Momentum(self.gen.vars())
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False))
def get_two_outputs(v):
feat = self.gen(v, training=True)
return self.c1(feat), self.c2(feat)
def loss_function_phase1(x, y):
x1, x2 = get_two_outputs(x[:, 0])
xes = (objax.functional.loss.cross_entropy_logits(x1, y).mean() +
objax.functional.loss.cross_entropy_logits(x2, y).mean())
return xes, {'losses/xe': xes}
def loss_function_phase2(x, u, y):
saved = self.gen.vars().tensors()
x1, x2 = get_two_outputs(x[:, 0])
u1, u2 = get_two_outputs(u[:, 0])
self.gen.vars().assign(saved)
xes = (objax.functional.loss.cross_entropy_logits(x1, y).mean() +
objax.functional.loss.cross_entropy_logits(x2, y).mean())
dis = jn.abs(softmax(u1) - softmax(u2)).mean()
return xes - dis, {'losses/xe2': xes, 'losses/dis2': dis}
def loss_function_phase3(u):
u1, u2 = get_two_outputs(u[:, 0])
dis = jn.abs(softmax(u1) - softmax(u2)).mean()
return dis, {'losses/dis3': dis}
gv1 = objax.GradValues(loss_function_phase1, self.gen.vars() + self.c1.vars('c1') + self.c2.vars('c2'))
gv2 = objax.GradValues(loss_function_phase2, self.c1.vars('c1') + self.c2.vars('c2'))
gv3 = objax.GradValues(loss_function_phase3, self.gen.vars())
@objax.Function.with_vars(self.vars())
def train_op(step, x, y, u, probe=None):
y_probe = eval_op(probe) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v1 = gv1(x, y)
self.opt1(lr, objax.functional.parallel.pmean(g))
g, v2 = gv2(x, u, y)
self.opt2(lr, objax.functional.parallel.pmean(g))
v3 = {}
for _ in range(self.params.wu):
g, v = gv3(u)
for k, val in v[1].items():
v3[k] = v3.get(k, 0) + val / self.params.wu
self.opt3(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v1[1], **v2[1], **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op)
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
dataset_name, samples_per_class, dataset_seed = DataSetSSL.parse_name(f'{FLAGS.dataset}')
labeled = SSL_DATASETS()[dataset_name](samples_per_class, dataset_seed)
unlabeled = FSL_DATASETS()[f'{dataset_name}-0']()
testsets = [unlabeled.test]
module = MCD(labeled.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
wu=FLAGS.wu,
uratio=FLAGS.uratio)
logdir = f'SSL/{FLAGS.dataset}/{FLAGS.augment}/{module.__class__.__name__}/%s' % (
'_'.join(sorted('%s%s' % k for k in module.params.items())))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((k, v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(labeled.train, unlabeled.train, labeled.nclass, FLAGS.batch, FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(labeled.train, unlabeled.train, labeled.nclass, FLAGS.batch, FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
objax.util.multi_host_barrier()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_integer('wu', 1, 'Iteration for phase3 (unlabeled weight loss for G).')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32_infograph(10,seed=1)', 'Data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm,probe=1)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FixMatch with Distribution Alignment and Adaptative Confidence Ratio.
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.typing import JaxArray
from semi_supervised.lib.data import MixData, CTAData
from semi_supervised.lib.train import TrainableSSLModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.data.ssl import DATASETS as SSL_DATASETS, DataSetSSL
from shared.train import ScheduleCos
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class Baseline(TrainableSSLModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.stats = objax.Module()
self.stats.keygen = objax.random.DEFAULT_GENERATOR
self.stats.p_labeled = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_unlabeled = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False))
def loss_function(x, y, u):
c, h, w = x.shape[-3:]
xu = jn.concatenate((x, u)).reshape((-1, c, h, w))
logit = self.model(xu, training=True)
logit_x = jn.split(logit, (2 * x.shape[0],))[0]
logit_x_weak, logit_x_strong = logit_x[::2], logit_x[1::2]
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_x_weak, y).mean() +
objax.functional.loss.cross_entropy_logits(logit_x_strong, y).mean())
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.params.wd * wd
return loss, {'losses/xe': xe, 'losses/wd': wd}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, x, y, u, probe=None):
y_probe = eval_op(probe) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(x, y, u)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op)
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
dataset_name, samples_per_class, dataset_seed = DataSetSSL.parse_name(f'{FLAGS.dataset}')
labeled = SSL_DATASETS()[dataset_name](samples_per_class, dataset_seed)
unlabeled = FSL_DATASETS()[f'{dataset_name}-0']()
testsets = [unlabeled.test]
module = Baseline(labeled.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
uratio=FLAGS.uratio)
logdir = f'SSL/{FLAGS.dataset}/{FLAGS.augment}/{module.__class__.__name__}/%s' % (
'_'.join(sorted('%s%s' % k for k in module.params.items())))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((k, v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(labeled.train, unlabeled.train, labeled.nclass, FLAGS.batch, FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(labeled.train, unlabeled.train, labeled.nclass, FLAGS.batch, FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
objax.util.multi_host_barrier()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32_infograph(10,seed=1)', 'Data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm,probe=1)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FixMatch with Distribution Alignment and Adaptative Confidence Ratio.
"""
import os
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app, flags
from absl.flags import FLAGS
from objax.functional import stop_gradient
from objax.typing import JaxArray
from semi_supervised.lib.data import MixData, CTAData
from semi_supervised.lib.train import TrainableSSLModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.data.ssl import DATASETS as SSL_DATASETS, DataSetSSL
from shared.train import ScheduleCos
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class FixMatchDA(TrainableSSLModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.stats = objax.Module()
self.stats.p_data = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_model = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False))
def loss_function(x, y, u):
c, h, w = x.shape[-3:]
xu = jn.concatenate((x, u)).reshape((-1, c, h, w))
logit = self.model(xu, training=True)
logit_x_weak = logit[:2 * x.shape[0]:2]
logit_weak, logit_strong = logit[::2], logit[1::2]
confidence_ratio = self.params.confidence
pseudo_labels = objax.functional.softmax(logit_weak)
p_data = self.stats.p_data(y.mean(0))
p_model = self.stats.p_model(pseudo_labels.mean(0))
pseudo_labels *= (1e-6 + p_data) / (1e-6 + p_model)
pseudo_labels = stop_gradient(pseudo_labels / pseudo_labels.sum(1, keepdims=True))
pseudo_mask = (pseudo_labels.max(axis=1) >= confidence_ratio).astype(pseudo_labels.dtype)
xe = objax.functional.loss.cross_entropy_logits(logit_x_weak, y).mean()
xeu = objax.functional.loss.cross_entropy_logits_sparse(logit_strong, pseudo_labels.argmax(axis=1))
xeu = (xeu * pseudo_mask).mean()
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.params.wu * xeu + self.params.wd * wd
return loss, {'losses/xe': xe,
'losses/xeu': xeu,
'losses/wd': wd,
'monitors/confidence_ratio': confidence_ratio,
'monitors/mask': pseudo_mask.mean(),
'monitors/klmodel': objax.functional.divergence.kl(p_data, p_model)}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, x, y, u, probe=None):
y_probe = eval_op(probe) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(x, y, u)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op)
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
dataset_name, samples_per_class, dataset_seed = DataSetSSL.parse_name(f'{FLAGS.dataset}')
labeled = SSL_DATASETS()[dataset_name](samples_per_class, dataset_seed)
unlabeled = FSL_DATASETS()[f'{dataset_name}-0']()
testsets = [unlabeled.test]
module = FixMatchDA(labeled.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
wu=FLAGS.wu,
confidence=FLAGS.confidence,
uratio=FLAGS.uratio)
logdir = f'SSL/{FLAGS.dataset}/{FLAGS.augment}/{module.__class__.__name__}/%s' % (
'_'.join(sorted('%s%s' % k for k in module.params.items())))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((k, v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(labeled.train, unlabeled.train, labeled.nclass, FLAGS.batch, FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(labeled.train, unlabeled.train, labeled.nclass, FLAGS.batch, FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
objax.util.multi_host_barrier()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_float('confidence', 0.9, 'Confidence threshold.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_float('wu', 1, 'Unlabeled loss weight.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32_infograph(10,seed=1)', 'Data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm,probe=1)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NextMatch9.
"""
import os
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app, flags
from absl.flags import FLAGS
from objax.functional import stop_gradient
from objax.typing import JaxArray
from semi_supervised.lib.data import MixData, CTAData
from semi_supervised.lib.train import TrainableSSLModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.data.ssl import DATASETS as SSL_DATASETS, DataSetSSL
from shared.train import ScheduleCos, ScheduleCosPhases
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class AdaMatch(TrainableSSLModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.stats = objax.Module()
self.stats.keygen = objax.random.DEFAULT_GENERATOR
self.stats.p_labeled = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_unlabeled = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.wu = ScheduleCosPhases(1, [(0.5, 1), (1, self.params.wu)], start_value=0)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False))
def loss_function(sx, sy, tu, progress):
c, h, w = sx.shape[-3:]
saved_vars = self.model.vars().tensors()
logit_bn_x = self.model(sx.reshape((-1, c, h, w)), training=True)
self.model.vars().assign(saved_vars)
xu = jn.concatenate((sx, tu)).reshape((-1, c, h, w))
logit = self.model(xu, training=True)
logit_sx, logit_tu = jn.split(logit, (2 * sx.shape[0],))
logit_sx += (logit_bn_x - logit_sx) * objax.random.uniform(logit_sx.shape)
logit_sx_weak, logit_sx_strong = logit_sx[::2], logit_sx[1::2]
logit_tu_weak, logit_tu_strong = logit_tu[::2], logit_tu[1::2]
if self.params.use_cr:
real_confidence = objax.functional.softmax(stop_gradient(logit_sx_weak))
confidence_ratio = real_confidence.max(1).mean(0) * self.params.confidence
else:
confidence_ratio = self.params.confidence
pseudo_labels = objax.functional.softmax(logit_tu_weak)
p_labeled = self.stats.p_labeled(objax.functional.softmax(logit_sx_weak).mean(0))
p_unlabeled = self.stats.p_unlabeled(pseudo_labels.mean(0))
pseudo_labels *= (1e-6 + p_labeled) / (1e-6 + p_unlabeled)
pseudo_labels = stop_gradient(pseudo_labels / pseudo_labels.sum(1, keepdims=True))
pseudo_mask = (pseudo_labels.max(axis=1) >= confidence_ratio).astype(pseudo_labels.dtype)
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean() +
objax.functional.loss.cross_entropy_logits(logit_sx_strong, sy).mean())
xeu = objax.functional.loss.cross_entropy_logits_sparse(logit_tu_strong, pseudo_labels.argmax(axis=1))
xeu = (xeu * pseudo_mask).mean()
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.wu(progress) * xeu + self.params.wd * wd
return loss, {'losses/xe': xe,
'losses/xeu': xeu,
'losses/wd': wd,
'losses/hregbn': jn.square(logit_sx - logit_bn_x).mean(),
'monitors/confidence_ratio': confidence_ratio,
'monitors/wu': self.wu(progress),
'monitors/mask': pseudo_mask.mean(),
'monitors/klmodel': objax.functional.divergence.kl(p_labeled, p_unlabeled)}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, x, y, u, probe=None):
y_probe = eval_op(probe) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(x, y, u, p)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op)
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
dataset_name, samples_per_class, dataset_seed = DataSetSSL.parse_name(f'{FLAGS.dataset}')
labeled = SSL_DATASETS()[dataset_name](samples_per_class, dataset_seed)
unlabeled = FSL_DATASETS()[f'{dataset_name}-0']()
testsets = [unlabeled.test]
module = AdaMatch(labeled.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
wu=FLAGS.wu,
confidence=FLAGS.confidence,
use_cr=FLAGS.use_cr,
uratio=FLAGS.uratio)
logdir = f'SSL/{FLAGS.dataset}/{FLAGS.augment}/{module.__class__.__name__}/%s' % (
'_'.join(sorted('%s%s' % k for k in module.params.items())))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((k, v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(labeled.train, unlabeled.train, labeled.nclass, FLAGS.batch, FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(labeled.train, unlabeled.train, labeled.nclass, FLAGS.batch, FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
objax.util.multi_host_barrier()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_bool('use_cr', True, 'Make confidence threshold proportional to real data.')
flags.DEFINE_float('confidence', 0.9, 'Confidence threshold.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_float('wu', 1, 'Unlabeled loss weight.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32_infograph(10,seed=1)', 'Data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm,probe=1)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NoisyStudent.
"""
import os
import sys
from typing import Callable
import jax
import numpy as np
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.typing import JaxArray
from tqdm import tqdm
from semi_supervised.lib.data import MixData, CTAData
from semi_supervised.lib.train import TrainableSSLModule
from shared.data.augment.augment import AugmentPoolBase
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.data.ssl import DATASETS as SSL_DATASETS, DataSetSSL
from shared.train import ScheduleCos
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class NoisyStudent(TrainableSSLModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
train_vars = self.model.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False))
def loss_function(x, y):
c, h, w = x.shape[-3:]
logit_x = self.model(x.reshape((-1, c, h, w)), training=True)
logit_x_weak, logit_x_strong = logit_x[::2], logit_x[1::2]
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_x_weak, y).mean() +
objax.functional.loss.cross_entropy_logits(logit_x_strong, y).mean())
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.params.wd * wd
return loss, {'losses/xe': xe, 'losses/wd': wd}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, x, y, u, probe=None):
y_probe = eval_op(probe) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(x, y)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op)
# jit eval op used only for saving prediction files, avoids batch padding
self.eval_op_jit = objax.Jit(eval_op)
class PseudoLabeler(AugmentPoolBase):
def __init__(self, data: AugmentPoolBase, pseudo_label_file: str, threshold: float, uratio: float = 1):
with open(pseudo_label_file, 'rb') as f:
self.pseudo_label = np.load(f)
self.has_pseudo_label = self.pseudo_label.max(axis=1) > threshold
self.data = data
self.uratio = uratio
def stop(self):
self.data.stop()
def __iter__(self) -> dict:
batch = []
batch_size = None
for d in self.data:
batch_size = batch_size or d['x']['image'].shape[0]
for p, index in enumerate(d['x']['index']):
batch.append(dict(index=index, label=d['x']['label'][p], image=d['x']['image'][p]))
for p, index in enumerate(d['u']['index']):
if self.has_pseudo_label[index]:
batch.append(dict(index=index, label=self.pseudo_label[index], image=d['u']['image'][p]))
np.random.shuffle(batch)
while len(batch) >= batch_size:
current_batch, batch = batch[:batch_size], batch[batch_size:]
d['x'] = dict(image=np.stack([x['image'] for x in current_batch]),
label=np.stack([x['label'] for x in current_batch]),
index=np.stack([x['index'] for x in current_batch]))
yield d
def main(argv):
del argv
assert FLAGS.id, f'You must specify a --id for the run'
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
dataset_name, samples_per_class, dataset_seed = DataSetSSL.parse_name(f'{FLAGS.dataset}')
labeled = SSL_DATASETS()[dataset_name](samples_per_class, dataset_seed)
unlabeled = FSL_DATASETS()[f'{dataset_name}-0']()
testsets = [unlabeled.test]
module = NoisyStudent(labeled.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
uratio=FLAGS.uratio)
logdir = f'SSL/{FLAGS.dataset}/{FLAGS.augment}/{module.__class__.__name__}/'
logdir += '_'.join(sorted('%s%s' % k for k in module.params.items()))
if FLAGS.teacher_id != '':
pseudo_label_logdir = os.path.join(FLAGS.logdir, logdir, FLAGS.teacher_id)
pseudo_label_file = os.path.join(pseudo_label_logdir, 'predictions.npy')
print('Using pseudo label file ', pseudo_label_file)
else:
pseudo_label_file = FLAGS.pseudo_label_file
logdir = os.path.join(FLAGS.logdir, logdir, FLAGS.id)
prediction_file = os.path.join(logdir, 'predictions.npy')
assert not os.path.exists(prediction_file), f'The prediction file "{prediction_file}" already exists, ' \
f'remove it if you want to overwrite it.'
test = {}
for domain, testset in enumerate(testsets):
test.update((k, v.parse().batch(FLAGS.batch).nchw().map(
lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(labeled.train, unlabeled.train, labeled.nclass, FLAGS.batch, FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(labeled.train, unlabeled.train, labeled.nclass, FLAGS.batch, FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
if pseudo_label_file != '':
train = PseudoLabeler(train, pseudo_label_file, FLAGS.pseudo_label_th, FLAGS.uratio)
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
predictions = []
for batch in tqdm(unlabeled.train.parse().batch(FLAGS.batch).nchw(),
desc=f'Evaluating unlabeled data', leave=False):
predictions.append(module.eval_op_jit(batch['image']._numpy()))
predictions = np.concatenate(predictions)
with open(prediction_file, 'wb') as f:
np.save(f, predictions)
print('Saved target predictions to', prediction_file)
train.stop()
objax.util.multi_host_barrier()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('pseudo_label_th', 0.9, 'Pseudo-label threshold.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('id', '', 'Id of the experiments.')
flags.DEFINE_string('pseudo_label_file', '', 'Prediction file to read.')
flags.DEFINE_string('teacher_id', '', 'Exp id of teacher. Overrides pseudo_label_file if set.')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32_infograph(10,seed=1)', 'Data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm,probe=1)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Iterable, Optional
import numpy as np
from jax import numpy as jn
from objax.jaxboard import Summary
from tqdm import tqdm
from shared.train import TrainableModule
class TrainableSSLModule(TrainableModule):
def eval(self, summary: Summary, epoch: int, test: Dict[str, Iterable], valid: Optional[Iterable] = None):
def get_accuracy(dataset: Iterable):
accuracy, total, batch = 0, 0, None
for data in tqdm(dataset, leave=False, desc='Evaluating'):
x, y = data['image']._numpy(), data['label']._numpy()
total += x.shape[0]
batch = batch or x.shape[0]
if x.shape[0] != batch:
# Pad the last batch if it's smaller than expected (must divide properly on GPUs).
x = np.concatenate([x] + [x[-1:]] * (batch - x.shape[0]))
p = self.eval_op(x)[:y.shape[0]]
accuracy += (np.argmax(p, axis=1) == data['label'].numpy()).sum()
return accuracy / total if total else 0
test_accuracy = {key: get_accuracy(value) for key, value in test.items()}
to_print = []
for key, value in sorted(test_accuracy.items()):
summary.scalar('accuracy/%s' % key, 100 * value)
to_print.append('Acccuracy/%s %.2f' % (key, summary['accuracy/%s' % key]()))
print('Epoch %-4d Loss %.2f %s' % (epoch + 1, summary['losses/xe'](), ' '.join(to_print)))
def train_step(self, summary, data, step):
kv, probe = self.train_op(step,
data['x']['image'], data['x']['label'],
data['u']['image'], data.get('probe'))
if 'probe_callback' in data:
data['probe_callback'](jn.concatenate(probe))
for k, v in kv.items():
v = v[0]
if jn.isnan(v):
raise ValueError('NaN', k)
summary.scalar(k, float(v))
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import functools
import multiprocessing
from time import sleep
from typing import List, Tuple
import numpy as np
import objax
import tensorflow as tf
from absl.flags import FLAGS
from shared.data.augment import ctaugment
from shared.data.augment.augment import AugmentPoolBase
from shared.data.augment.core import get_tf_augment
from shared.data.augment.ctaugment import CTAugment
from shared.data.core import DataSet
from shared.data.core import Numpyfier
from shared.data.merger import DataMerger
from shared.util import StoppableThread
class MixData(AugmentPoolBase):
def __init__(self,
labeled: DataSet,
unlabeled: DataSet,
nclass: int, batch: int, uratio: int):
a = FLAGS.augment.split('(')[-1]
a = a[:-1].split(',')
weak, strong = tuple(get_tf_augment(ai, size=labeled.image_shape[0]) for ai in a)
def bi_augment(d):
return dict(image=tf.stack([weak(d)['image'], strong(d)['image']]), index=d['index'], label=d['label'])
x, u = (v.repeat().shuffle(FLAGS.shuffle).parse().map(bi_augment, FLAGS.para_augment).nchw()
for v in (labeled, unlabeled))
x = Numpyfier(x.batch(batch).one_hot(nclass).prefetch(16))
u = Numpyfier(u.batch(batch * uratio).prefetch(16))
self.train = DataMerger((x, u))
def __iter__(self) -> dict:
for x, u in self.train:
yield dict(x=x, u=u)
class CTAData(AugmentPoolBase):
def __init__(self,
labeled: DataSet,
unlabeled: DataSet,
nclass: int, batch: int, uratio: int):
a = FLAGS.augment.split('(')[-1]
a = a[:-1].split(',')
a, kwargs = a[:2], {k: v for k, v in (x.split('=') for x in a[2:])}
h, w, c = labeled.image_shape
para = FLAGS.para_augment
probe_shape = (para, batch, c, h, w)
x_shape = (para, batch, 2, c, h, w)
u_shape = (para, batch * uratio, 2, c, h, w)
del h, w, c
self.shapes = probe_shape, x_shape, u_shape
self.check_mem_requirements()
self.probe, self.x, self.u = self.get_np_arrays(self.shapes)
self.pool = multiprocessing.Pool(para)
self.probe_period = int(kwargs.get('probe', 1))
self.cta = CTAugment(int(kwargs.get('depth', 2)),
float(kwargs.get('th', 0.8)),
float(kwargs.get('decay', 0.99)))
self.to_schedule = collections.deque()
self.deque = collections.deque()
self.free = list(range(para))
self.thread = StoppableThread(target=self.scheduler)
self.thread.start()
weak, strong = tuple(get_tf_augment(ai, size=labeled.image_shape[0]) for ai in a)
def bi_augment(d):
return dict(image=tf.stack([weak(d)['image'], strong(d)['image']]), index=d['index'], label=d['label'])
x, u = (v.repeat().shuffle(FLAGS.shuffle).parse().map(bi_augment, FLAGS.para_augment).nchw()
for v in (labeled, unlabeled))
x = Numpyfier(x.batch(batch).one_hot(nclass).prefetch(16))
u = Numpyfier(u.batch(batch * uratio).prefetch(16))
self.train = DataMerger((x, u))
def stop(self):
self.thread.stop()
def scheduler(self):
while not self.thread.stopped():
sleep(0.0001)
while self.to_schedule:
d, idx, do_probe = self.to_schedule.popleft()
self.x[idx] = d['x']['image']
self.u[idx] = d['u']['image']
worker_args = (idx, self.shapes, do_probe, self.cta)
self.deque.append((d, idx, self.pool.apply_async(self.worker, worker_args)))
@staticmethod
def worker(idx: int, shapes: List[Tuple[int, ...]], do_probe: bool, cta: CTAugment):
def cutout_policy():
return cta.policy(probe=False) + [ctaugment.OP('cutout', (1,))]
probe, x_array, u_array = (arr[idx] for arr in CTAData.get_np_arrays(shapes))
x = objax.util.image.nhwc(x_array)
u = objax.util.image.nhwc(u_array)
nchw = objax.util.image.nchw
sx_strong = np.stack([ctaugment.apply(x[i, 1], cutout_policy()) for i in range(x.shape[0])])
tu_strong = np.stack([ctaugment.apply(u[i, 1], cutout_policy()) for i in range(u.shape[0])])
x_array[:] = nchw(np.stack([x[:, 0], sx_strong], axis=1))
u_array[:] = nchw(np.stack([u[:, 0], tu_strong], axis=1))
if not do_probe:
return
policy_list = [cta.policy(probe=True) for _ in range(x.shape[0])]
probe[:] = nchw(np.stack([ctaugment.apply(x[i, 0], policy)
for i, policy in enumerate(policy_list)]))
return policy_list
def update_rates(self, policy, label, y_probe):
w1 = 1 - 0.5 * np.abs(y_probe - label).sum(1)
for p in range(w1.shape[0]):
self.cta.update_rates(policy[p], w1[p])
def __iter__(self):
for i, (x, u) in enumerate(self.train):
if not self.free:
while not self.deque:
sleep(0.0001)
d, idx, pd = self.deque.popleft()
self.free.append(idx)
policy = pd.get()
if policy:
d['probe'] = np.copy(self.probe[idx])
d['policy'] = policy
d['probe_callback'] = functools.partial(self.update_rates, d['policy'], d['x']['label'])
d['x']['image'][:] = self.x[idx]
d['u']['image'][:] = self.u[idx]
yield d
self.to_schedule.append((dict(x=x, u=u), self.free.pop(), (i % self.probe_period) == 0))
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fully supervised on a pair of domains.
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.typing import JaxArray
from fully_supervised.lib.data import MixData, CTAData
from fully_supervised.lib.train import TrainableFSLModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.train import ScheduleCos
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class Baseline(TrainableFSLModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
train_vars = self.model.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False))
def loss_function(sx, sy, tx, ty):
c, h, w = sx.shape[-3:]
xu = jn.concatenate((sx, tx)).reshape((-1, c, h, w))
logit = self.model(xu, training=True)
logit_sx, logit_tx = jn.split(logit, (2 * sx.shape[0],))
logit_sx_weak, logit_sx_strong = logit_sx[::2], logit_sx[1::2]
logit_tx_weak, logit_tx_strong = logit_tx[::2], logit_tx[1::2]
xe = 0.25 * (objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean() +
objax.functional.loss.cross_entropy_logits(logit_sx_strong, sy).mean() +
objax.functional.loss.cross_entropy_logits(logit_tx_weak, ty).mean() +
objax.functional.loss.cross_entropy_logits(logit_tx_strong, ty).mean())
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.params.wd * wd
return loss, {'losses/xe': xe, 'losses/wd': wd}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tx, ty, probe=None):
y_probe = eval_op(probe) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(sx, sy, tx, ty)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op)
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
# In fully supervised, source and target are both labeled, we just kept the same names to reuse the code.
if FLAGS.target == '':
FLAGS.target = FLAGS.source
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.target}-0']()
module = Baseline(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch)
logdir = f'FSL/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/%s' % (
'_'.join(sorted('%s%s' % k for k in module.params.items())))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {
f'{FLAGS.source}_to_{FLAGS.target}': (list(target.test.values())[0].parse().batch(FLAGS.batch).nchw()
.map(lambda d: {**d, 'domain': 0}).prefetch(16)),
f'{FLAGS.target}_to_{FLAGS.source}': (list(source.test.values())[0].parse().batch(FLAGS.batch).nchw()
.map(lambda d: {**d, 'domain': 1}).prefetch(16)),
}
if FLAGS.test_extra:
for ds_name in FLAGS.test_extra.split(','):
ds = FSL_DATASETS()[f'{FLAGS.dataset}_{ds_name}-0']()
test[f'{FLAGS.target}_to_{ds_name}'] = (list(ds.test.values())[0].parse().batch(FLAGS.batch).nchw()
.map(lambda d: {**d, 'domain': 1}).prefetch(16))
if FLAGS.augment.startswith('('):
train = MixData(source.train, target.train, source.nclass, FLAGS.batch)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target.train, source.nclass, FLAGS.batch)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', '', 'Target data to train on.')
flags.DEFINE_string('test_extra', 'clipart,infograph,quickdraw,real,sketch,painting',
'Comma-separated list of datasets on which to report test accuracy.')
FLAGS.set_default('augment', 'CTA(sm,sm,probe=1)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Iterable, Optional
import numpy as np
from jax import numpy as jn
from objax.jaxboard import Summary
from tqdm import tqdm
from shared.train import TrainableModule
class TrainableFSLModule(TrainableModule):
def eval(self, summary: Summary, epoch: int, test: Dict[str, Iterable], valid: Optional[Iterable] = None):
def get_accuracy(dataset: Iterable):
accuracy, total, batch = 0, 0, None
for data in tqdm(dataset, leave=False, desc='Evaluating'):
x, y = data['image']._numpy(), data['label']._numpy()
total += x.shape[0]
batch = batch or x.shape[0]
if x.shape[0] != batch:
# Pad the last batch if it's smaller than expected (must divide properly on GPUs).
x = np.concatenate([x] + [x[-1:]] * (batch - x.shape[0]))
p = self.eval_op(x)[:y.shape[0]]
accuracy += (np.argmax(p, axis=1) == data['label'].numpy()).sum()
return accuracy / total if total else 0
test_accuracy = {key: get_accuracy(value) for key, value in test.items()}
to_print = []
for key, value in sorted(test_accuracy.items()):
summary.scalar('accuracy/%s' % key, 100 * value)
to_print.append('Acccuracy/%s %.2f' % (key, summary['accuracy/%s' % key]()))
print('Epoch %-4d Loss %.2f %s' % (epoch + 1, summary['losses/xe'](), ' '.join(to_print)))
def train_step(self, summary, data, step):
kv, probe = self.train_op(step,
data['sx']['image'], data['sx']['label'],
data['tx']['image'], data['tx']['label'], data.get('probe'))
if 'probe_callback' in data:
data['probe_callback'](jn.concatenate(probe))
for k, v in kv.items():
v = v[0]
if jn.isnan(v):
raise ValueError('NaN', k)
summary.scalar(k, float(v))
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import functools
import multiprocessing
from time import sleep
from typing import List, Tuple
import numpy as np
import objax
import tensorflow as tf
from absl.flags import FLAGS
from shared.data.augment import ctaugment
from shared.data.augment.augment import AugmentPoolBase
from shared.data.augment.core import get_tf_augment
from shared.data.augment.ctaugment import CTAugment
from shared.data.core import DataSet
from shared.data.core import Numpyfier
from shared.data.merger import DataMerger
from shared.util import StoppableThread
class MixData(AugmentPoolBase):
def __init__(self,
source: DataSet,
target: DataSet,
nclass: int, batch: int):
a = FLAGS.augment.split('(')[-1]
a = a[:-1].split(',')
weak, strong = tuple(get_tf_augment(ai, size=source.image_shape[0]) for ai in a)
def bi_augment(d):
return dict(image=tf.stack([weak(d)['image'], strong(d)['image']]), index=d['index'], label=d['label'])
sx, tx = (Numpyfier(v.repeat().shuffle(FLAGS.shuffle).parse()
.map(bi_augment, FLAGS.para_augment).nchw()
.batch(batch).one_hot(nclass).prefetch(16))
for v in (source, target))
self.train = DataMerger((sx, tx))
def __iter__(self) -> dict:
for sx, tx in self.train:
yield dict(sx=sx, tx=tx)
class CTAData(AugmentPoolBase):
def __init__(self,
source: DataSet,
target: DataSet,
nclass: int, batch: int):
a = FLAGS.augment.split('(')[-1]
a = a[:-1].split(',')
a, kwargs = a[:2], {k: v for k, v in (x.split('=') for x in a[2:])}
h, w, c = source.image_shape
para = FLAGS.para_augment
probe_shape = (para, 2 * batch, c, h, w)
sx_shape = (para, batch, 2, c, h, w)
tx_shape = (para, batch, 2, c, h, w)
del h, w, c
self.shapes = probe_shape, sx_shape, tx_shape
self.check_mem_requirements()
self.probe, self.sx, self.tx = self.get_np_arrays(self.shapes)
self.pool = multiprocessing.Pool(para)
self.probe_period = int(kwargs.get('probe', 1))
self.cta = CTAugment(int(kwargs.get('depth', 2)),
float(kwargs.get('th', 0.8)),
float(kwargs.get('decay', 0.99)))
self.to_schedule = collections.deque()
self.deque = collections.deque()
self.free = list(range(para))
self.thread = StoppableThread(target=self.scheduler)
self.thread.start()
weak, strong = tuple(get_tf_augment(ai, size=source.image_shape[0]) for ai in a)
def bi_augment(d):
return dict(image=tf.stack([weak(d)['image'], strong(d)['image']]), index=d['index'], label=d['label'])
sx, tx = (Numpyfier(v.repeat().shuffle(FLAGS.shuffle).parse()
.map(bi_augment, FLAGS.para_augment).nchw()
.batch(batch).one_hot(nclass).prefetch(16))
for v in (source, target))
self.train = DataMerger((sx, tx))
def stop(self):
self.thread.stop()
def scheduler(self):
while not self.thread.stopped():
sleep(0.0001)
while self.to_schedule:
d, idx, do_probe = self.to_schedule.popleft()
self.sx[idx] = d['sx']['image']
self.tx[idx] = d['tx']['image']
worker_args = (idx, self.shapes, do_probe, self.cta)
self.deque.append((d, idx, self.pool.apply_async(self.worker, worker_args)))
@staticmethod
def worker(idx: int, shapes: List[Tuple[int, ...]], do_probe: bool, cta: CTAugment):
def cutout_policy():
return cta.policy(probe=False) + [ctaugment.OP('cutout', (1,))]
probe, sx_array, tx_array = (arr[idx] for arr in CTAData.get_np_arrays(shapes))
sx = objax.util.image.nhwc(sx_array)
tx = objax.util.image.nhwc(tx_array)
nchw = objax.util.image.nchw
sx_strong = np.stack([ctaugment.apply(sx[i, 1], cutout_policy()) for i in range(sx.shape[0])])
tu_strong = np.stack([ctaugment.apply(tx[i, 1], cutout_policy()) for i in range(tx.shape[0])])
sx_array[:] = nchw(np.stack([sx[:, 0], sx_strong], axis=1))
tx_array[:] = nchw(np.stack([tx[:, 0], tu_strong], axis=1))
if not do_probe:
return
policy_list_s = [cta.policy(probe=True) for _ in range(sx.shape[0])]
policy_list_t = [cta.policy(probe=True) for _ in range(tx.shape[0])]
probe[:] = nchw(np.stack([ctaugment.apply(sx[i, 0], policy) for i, policy in enumerate(policy_list_s)] +
[ctaugment.apply(tx[i, 0], policy) for i, policy in enumerate(policy_list_t)]))
return policy_list_s + policy_list_t
def update_rates(self, policy, label, y_probe):
w1 = 1 - 0.5 * np.abs(y_probe - label).sum(1)
for p in range(w1.shape[0]):
self.cta.update_rates(policy[p], w1[p])
def __iter__(self):
for i, (sx, tx) in enumerate(self.train):
if not self.free:
while not self.deque:
sleep(0.0001)
d, idx, pd = self.deque.popleft()
self.free.append(idx)
policy = pd.get()
if policy:
d['probe'] = np.copy(self.probe[idx])
d['policy'] = policy
d['probe_callback'] = functools.partial(self.update_rates, d['policy'],
np.concatenate((d['sx']['label'], d['tx']['label'])))
d['sx']['image'][:] = self.sx[idx]
d['tx']['image'][:] = self.tx[idx]
yield d
self.to_schedule.append((dict(sx=sx, tx=tx), self.free.pop(), (i % self.probe_period) == 0))
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import numpy as np
import objax
import tensorflow as tf
from objax.typing import JaxArray
class StoppableThread(threading.Thread):
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self._stop_event = threading.Event()
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
class MyParallel(objax.Parallel):
def device_reshape(self, x: JaxArray) -> JaxArray:
"""Utility to reshape an input array in order to broadcast to multiple devices."""
assert hasattr(x, 'ndim'), f'Expected JaxArray, got {type(x)}. If you are trying to pass a scalar to ' \
f'parallel, first convert it to a JaxArray, for example np.float(0.5)'
if x.ndim == 0:
return np.broadcast_to(x, [self.ndevices]) # Use np instead of jnp, 2x faster.
assert x.shape[0] % self.ndevices == 0, f'Must be able to equally divide batch {x.shape} among ' \
f'{self.ndevices} devices, but does not go equally.'
return x.reshape((self.ndevices, x.shape[0] // self.ndevices) + x.shape[1:])
def setup_tf():
tf.config.experimental.set_visible_devices([], "GPU")
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional, Dict, Iterable, List, Tuple, Callable
import jax
import numpy as np
import objax
from jax import numpy as jn
from objax.jaxboard import SummaryWriter, Summary
from objax.util import EasyDict
from tqdm import tqdm
from tqdm.std import trange
from shared.data import core
class TrainableModule(objax.Module):
model: objax.Module
model_ema: objax.Module
eval_op: Callable
train_op: Callable
def __init__(self, nclass: int, params: dict):
self.nclass = nclass
self.params = EasyDict(params)
def __str__(self):
text = [str(self.model.vars()), 'Parameters'.center(79, '-')]
for kv in sorted(self.params.items()):
text.append('%-32s %s' % kv)
return '\n'.join(text)
def train_step(self, summary: objax.jaxboard.Summary, data: dict, step: int):
kv = self.train_op(step, data['image'], data['label'])
for k, v in kv.items():
if jn.isnan(v):
raise ValueError('NaN', k)
summary.scalar(k, float(v))
def eval(self, summary: Summary, epoch: int, test: Dict[str, Iterable], valid: Optional[Iterable] = None):
def get_accuracy(dataset: Iterable):
accuracy, total, batch = 0, 0, None
for data in tqdm(dataset, leave=False, desc='Evaluating'):
x, y = data['image'].numpy(), data['label'].numpy()
total += x.shape[0]
batch = batch or x.shape[0]
if x.shape[0] != batch:
# Pad the last batch if it's smaller than expected (must divide properly on GPUs).
x = np.concatenate([x] + [x[-1:]] * (batch - x.shape[0]))
p = self.eval_op(x)[:y.shape[0]]
accuracy += (np.argmax(p, axis=1) == data['label'].numpy()).sum()
return accuracy / total if total else 0
if valid:
valid_accuracy = get_accuracy(valid)
summary.scalar('accuracy/valid', 100 * valid_accuracy)
else:
valid_accuracy = 0
test_accuracy = {key: get_accuracy(value) for key, value in test.items()}
to_print = []
for key, value in sorted(test_accuracy.items()):
summary.scalar(f'accuracy/{key}', 100 * value)
to_print.append('Acccuracy/%s %.2f' % (key, summary[f'accuracy/{key}']()))
print(f'Epoch {epoch + 1:%-4d} Loss {summary["losses/xe"]():%.2f} '
f'{" ".join(to_print)} (Valid {valid_accuracy * 100:%.2f})')
def auto_gpu(self):
if isinstance(self.train_op, objax.Parallel):
return self.train_op.vars().replicate()
return objax.util.dummy_context_mgr()
def train(self, train_kimg: int, report_kimg: int, train: Iterable,
test: Dict[str, Iterable], logdir: str, keep_ckpts: int, verbose: bool = True):
if verbose:
print(self)
print()
print('Training config'.center(79, '-'))
print('%-20s %s' % ('Project:', os.path.basename(core.DATA_DIR)))
print('%-20s %s' % ('Test sets:', test.keys()))
print('%-20s %s' % ('Work directory:', logdir))
print()
ckpt = objax.io.Checkpoint(logdir=logdir, keep_ckpts=keep_ckpts)
start_epoch = ckpt.restore(self.vars())[0]
tensorboard = SummaryWriter(os.path.join(logdir, 'tb'))
train_iter = iter(train)
for epoch in range(start_epoch, train_kimg // report_kimg):
summary = Summary()
loop = trange(0, report_kimg << 10, self.params.batch,
leave=False, unit='img', unit_scale=self.params.batch,
desc='Epoch %d/%d' % (1 + epoch, train_kimg // report_kimg))
with self.auto_gpu():
for step in loop:
self.train_step(summary, next(train_iter), step=np.uint32(step + (epoch * (report_kimg << 10))))
self.eval(summary, epoch, test)
tensorboard.write(summary, step=(epoch + 1) * report_kimg * 1024)
ckpt.save(self.vars(), epoch + 1)
class ScheduleCosPhases:
def __init__(self, v: float, phases: List[Tuple[float, float]], start_value: float = 1): # (Phase end, value)
self.v = v
self.start_value = float(start_value)
self.phases = tuple(tuple(map(float, x)) for x in phases)
def __call__(self, progress: float):
start, gain, cur_gain = 0., self.start_value, self.start_value
for stop, next_gain in self.phases:
assert stop > start
scale = 0.5 - 0.5 * jn.cos(jn.pi * (progress - start) / (stop - start))
gain = jax.lax.cond(jn.logical_and(jn.greater_equal(progress, start), jn.less_equal(progress, stop)),
lambda gain: cur_gain + (next_gain - cur_gain) * scale,
lambda gain: gain,
operand=gain)
cur_gain, start = next_gain, stop
return self.v * gain
class ScheduleCos:
def __init__(self, v: float, decay: float):
self.v = float(v)
self.slope = jn.arccos(decay)
def __call__(self, progress: float):
return self.v * jn.cos(self.slope * progress)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import jax.numpy as jn
import objax
from objax.typing import JaxArray
from objax.zoo.resnet_v2 import ResNet101, load_pretrained_weights_from_keras
from shared.zoo.wideresnet import WideResNet
ARCHS = 'wrn28-2 wrn28-4 wrn34-2 wrn40-2 resnet101 resnet101pretrain'.split()
class preprocess(objax.nn.Sequential):
@staticmethod
def _swap_channel(x):
return x[:, ::-1, :, :]
@staticmethod
def _scale_back(x):
return (x * 128) + 127.5
def _subtract_mean(self, x):
return x - jn.array([103.939, 116.779, 123.68])[None, :, None, None]
def __init__(self):
ops = [self._swap_channel, self._scale_back, self._subtract_mean]
super().__init__(ops)
def resnet(cls, colors, nclass, bn=objax.nn.BatchNorm2D, **kwargs):
return cls(colors, nclass, normalization_fn=bn, **objax.util.local_kwargs(kwargs, ResNet101))
def resnet_pretrained(cls, colors, nclass, bn=objax.nn.BatchNorm2D, **kwargs):
preprocess_input = preprocess()
model = cls(include_top=False, num_classes=nclass)
return objax.nn.Sequential(preprocess_input + model)
def network(arch: str):
if arch == 'wrn28-2':
return functools.partial(WideResNet, scales=3, filters=32, repeat=4)
elif arch == 'wrn28-4':
return functools.partial(WideResNet, scales=3, filters=64, repeat=4)
elif arch == 'wrn34-2':
return functools.partial(WideResNet, scales=4, filters=32, repeat=4)
elif arch == 'wrn40-2':
return functools.partial(WideResNet, scales=5, filters=32, repeat=4)
elif arch == 'resnet101':
return functools.partial(resnet, cls=ResNet101)
elif arch == 'resnet101pretrain':
return functools.partial(resnet_pretrained, cls=functools.partial(load_pretrained_weights_from_keras,
arch='ResNet101'))
raise ValueError('Architecture not recognized', arch)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from typing import Callable
import jax
import objax
from objax.typing import JaxArray
__all__ = ['WideResNetBlock', 'WideResNet']
def local_kwargs(f, **kwargs):
s = inspect.signature(f).parameters
if s:
if next(reversed(s.values())).kind == inspect.Parameter.VAR_KEYWORD:
return kwargs
else:
return {k: v for k, v in kwargs.items() if k in s}
return {}
def leaky_relu(x):
return objax.functional.leaky_relu(x, 0.1)
def conv_args(k, f):
return dict(w_init=jax.partial(objax.random.normal, stddev=objax.functional.rsqrt(0.5 * k * k * f)))
class WideResNetBlock(objax.Module):
def __init__(self, nin: int, nout: int, stride: int = 1, activate_before_residual: bool = False,
bn: Callable = objax.nn.BatchNorm2D):
self.activate_before_residual = activate_before_residual
self.bn = bn(nin, momentum=0.999)
self.residual = objax.nn.Sequential([objax.nn.Conv2D(nin, nout, 3, strides=stride, **conv_args(3, nout)),
bn(nout, momentum=0.999), leaky_relu,
objax.nn.Conv2D(nout, nout, 3, **conv_args(3, nout))])
self.passthrough = objax.nn.Conv2D(nin, nout, 1, strides=stride, **conv_args(1, nout)) if nin != nout else None
def __call__(self, x: JaxArray, **kwargs) -> JaxArray:
y = leaky_relu(self.bn(x, **local_kwargs(self.bn, **kwargs)))
if self.activate_before_residual:
x = y
if self.passthrough:
x = self.passthrough(x)
return x + self.residual(y, **kwargs)
class WideResNet(objax.nn.Sequential):
@staticmethod
def mean_reduce(x: JaxArray) -> JaxArray:
return x.mean((2, 3))
def __init__(self, colors: int, nclass: int, scales: int, filters: int, repeat: int, dropout: int = 0,
bn: Callable = objax.nn.BatchNorm2D, **kwargs):
del kwargs
n = 16
ops = [objax.nn.Conv2D(colors, n, 3, **conv_args(3, n))]
for scale in range(scales):
last_n, n = n, filters << scale
ops.append(WideResNetBlock(last_n, n, stride=2 if scale else 1, activate_before_residual=scale == 0, bn=bn))
ops.extend([WideResNetBlock(n, n, bn=bn) for _ in range(repeat - 1)])
ops.extend([bn(n, momentum=0.999), leaky_relu, self.mean_reduce,
objax.nn.Dropout(1 - dropout),
objax.nn.Linear(n, nclass, w_init=objax.nn.init.xavier_truncated_normal)])
super().__init__(ops)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .core import *
from .merger import *
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Callable, Optional, Tuple, List, Iterable
import numpy as np
import tensorflow as tf
from absl import app
from absl import flags
from absl.flags import FLAGS
# Data directory. Value is initialized in _data_setup
#
# Note that if you need to use DATA_DIR outside of this module then
# you should do following:
# from shared.data import core as shared_data
# ...
# dir = shared_data.DATA_DIR
#
# If you directly import DATA_DIR:
# from shared.data.core import DATA_DIR
# then None will be imported.
DATA_DIR = None
_DATA_CACHE = None
flags.DEFINE_integer('para_parse', 8, 'Parallel parsing.')
flags.DEFINE_integer('para_augment', 8, 'Parallel augmentation.')
flags.DEFINE_integer('shuffle', 8192, 'Size of dataset shuffling.')
flags.DEFINE_string('data_dir', '',
'Data directory. '
'If None then environment variable ML_DATA '
'will be used as a data directory.')
DATA_DIR = os.path.join(os.environ['ML_DATA'], 'NextMatch') if 'ML_DATA' in os.environ else ''
def _data_setup():
# set up data directory
global DATA_DIR
if FLAGS.data_dir != '':
DATA_DIR = os.path.join(FLAGS.data_dir, 'NextMatch')
assert DATA_DIR
app.call_after_init(_data_setup)
def from_uint8(image):
return (tf.cast(image, tf.float32) - 127.5) / 128
def to_uint8(image):
return tf.cast(128 * image + 127.5, tf.uint8)
def label_parse(index: int, record: str, *args):
features = tf.io.parse_single_example(record, features={'label': tf.io.FixedLenFeature([], tf.int64)})
return dict(index=index, label=features['label'])
def record_parse(index: int, record: str, image_shape: Tuple[int, int, int]):
features = tf.io.parse_single_example(record, features={'image': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([], tf.int64)})
image = tf.image.decode_image(features['image'])
image.set_shape(image_shape)
image = from_uint8(image)
return dict(index=index, image=image, label=features['label'])
def record_parse_mnist(index: int, record: str, image_shape: Tuple[int, int, int]):
del image_shape
features = tf.io.parse_single_example(record, features={'image': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([], tf.int64)})
image = tf.image.decode_image(features['image'])
image = tf.pad(image, [(2, 2), (2, 2), (0, 0)])
image.set_shape((32, 32, 3))
image = from_uint8(image)
return dict(index=index, image=image, label=features['label'])
class DataSet:
"""Wrapper for tf.data.Dataset to permit extensions."""
def __init__(self, data: tf.data.Dataset,
image_shape: Tuple[int, int, int],
parse_fn: Optional[Callable] = record_parse):
self.data = data
self.parse_fn = parse_fn
self.image_shape = image_shape
@classmethod
def from_arrays(cls, images: np.ndarray, labels: np.ndarray):
return cls(tf.data.Dataset.from_tensor_slices(dict(image=images, label=labels,
index=np.arange(images.shape[0], dtype=np.int64))),
images.shape[1:], parse_fn=None)
@classmethod
def from_files(cls, filenames: List[str],
image_shape: Tuple[int, int, int],
parse_fn: Optional[Callable] = record_parse,
cache: bool = False):
filenames_in = filenames
filenames = sorted(sum([tf.io.gfile.glob(x) for x in filenames], []))
if not filenames:
raise ValueError('Empty dataset, files not found:', filenames_in)
d = tf.data.TFRecordDataset(filenames, num_parallel_reads=len(filenames))
d = d.cache() if cache else d
return cls(d.enumerate(), image_shape, parse_fn=parse_fn)
@classmethod
def from_tfds(cls, dataset: tf.data.Dataset, image_shape: Tuple[int, int, int], cache: bool = False):
d = dataset.cache() if cache else dataset.prefetch(16384)
d = d.enumerate()
d = d.map(lambda index, x: dict(image=tf.cast(x['image'], tf.float32) / 127.5 - 1, label=x['label'], index=x))
return cls(d, image_shape, parse_fn=None)
def __iter__(self):
return iter(self.data)
def __getattr__(self, item):
if item in self.__dict__:
return self.__dict__[item]
def call_and_update(*args, **kwargs):
v = getattr(self.__dict__['data'], item)(*args, **kwargs)
if isinstance(v, tf.data.Dataset):
return self.__class__(v, self.image_shape, parse_fn=self.parse_fn)
return v
return call_and_update
def dmap(self, f: Callable, para: int = 0):
return self.map(lambda x: f(**x), para or FLAGS.para_parse)
def nchw(self, key: str = 'image'):
def apply(d_in):
return {**d_in, key: tf.transpose(d_in[key], [0, 3, 1, 2])}
return self.map(apply, FLAGS.para_parse)
def one_hot(self, nclass):
return self.dmap(lambda label, **kw: dict(label=tf.one_hot(label, nclass), **kw))
def parse(self, para: int = 0):
if not self.parse_fn:
return self
para = para or FLAGS.para_parse
if self.image_shape:
return self.map(lambda index, record: self.parse_fn(index, record, self.image_shape), para)
return self.map(lambda index, record: self.parse_fn(index, record), para)
def __len__(self):
count = 0
for _ in self.data:
count += 1
return count
class Numpyfier:
def __init__(self, dataset: Iterable):
self.dataset = dataset
def __iter__(self):
for d in self.dataset:
yield {k: v.numpy() for k, v in d.items()}
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
from typing import List, Callable, Dict
import numpy as np
from shared.data import core
from shared.data.core import DataSet, record_parse, record_parse_mnist
class DataSetsFSL:
def __init__(self, name: str, train: DataSet, test: Dict[str, DataSet], valid: DataSet, nclass: int = 10):
self.name = name
self.train = train
self.test = test
self.valid = valid
self.nclass = nclass
@property
def image_shape(self):
return self.train.image_shape
@property
def colors(self):
return self.image_shape[2]
@property
def height(self):
return self.image_shape[0]
@property
def width(self):
return self.image_shape[1]
@classmethod
def creator(cls, name: str, train_files: List[str], test_files: Dict[str, List[str]], valid: int,
parse_fn: Callable = record_parse,
nclass: int = 10, height: int = 32, width: int = 32, colors: int = 3, cache: bool = False):
train_files = [os.path.join(core.DATA_DIR, x) for x in train_files]
test_files = {key: [os.path.join(core.DATA_DIR, x) for x in value] for key, value in test_files.items()}
def create():
image_shape = height, width, colors
kw = dict(parse_fn=parse_fn)
datasets = dict(train=DataSet.from_files(train_files, image_shape, cache=cache, **kw).skip(valid),
valid=DataSet.from_files(train_files, image_shape, cache=cache, **kw).take(valid),
test={key: DataSet.from_files(value, image_shape, cache=cache, **kw)
for key, value in test_files.items()})
return cls(name + '-' + str(valid), nclass=nclass, **datasets)
return name + '-' + str(valid), create
def create_datasets(samples_per_class=(1, 2, 3, 4, 5, 10, 25, 100, 400)):
samples_per_class = np.array(samples_per_class, np.uint32)
d = {}
d.update([DataSetsFSL.creator('mnist', ['mnist-train.tfrecord'], {'mnist': ['mnist-test.tfrecord']}, valid,
cache=True, parse_fn=record_parse_mnist) for valid in [0, 5000]])
d.update([DataSetsFSL.creator('cifar10', ['cifar10-train.tfrecord'], {'cifar10': ['cifar10-test.tfrecord']}, valid,
cache=True) for valid in [0, 5000]])
d.update(
[DataSetsFSL.creator('cifar100', ['cifar100-train.tfrecord'], {'cifar100': ['cifar100-test.tfrecord']}, valid,
nclass=100, cache=True) for valid in [0, 5000]])
d.update([DataSetsFSL.creator('svhn', ['svhn-train.tfrecord'], {'svhn': ['svhn-test.tfrecord']},
valid) for valid in [0, 5000]])
d.update([DataSetsFSL.creator('svhnx', ['svhn-train.tfrecord', 'svhn-extra.tfrecord'],
{'svhn': ['svhn-test.tfrecord']}, valid) for valid in [0, 5000]])
d.update([DataSetsFSL.creator('cifar10_1', ['cifar10-train.tfrecord'],
{'cifar10': ['cifar10-test.tfrecord'], 'cifar10.1': ['cifar10.1-test.tfrecord']},
valid, cache=True) for valid in [0, 5000]])
d.update([DataSetsFSL.creator('cifar10.%d@%d' % (seed, sz), ['SSL/cifar10.%d@%d-label.tfrecord' % (seed, sz)],
{'cifar10': ['cifar10-test.tfrecord']}, valid, cache=True)
for valid, seed, sz in itertools.product([0, 5000], range(6), 10 * samples_per_class)])
d.update([DataSetsFSL.creator('cifar100.%d@%d' % (seed, sz), ['SSL/cifar100.%d@%d-label.tfrecord' % (seed, sz)],
{'cifar100': ['cifar100-test.tfrecord']}, valid, nclass=100)
for valid, seed, sz in itertools.product([0, 5000], range(6), 100 * samples_per_class)])
d.update([DataSetsFSL.creator('svhn.%d@%d' % (seed, sz), ['SSL/svhn.%d@%d-label.tfrecord' % (seed, sz)],
{'svhn': ['svhn-test.tfrecord']}, valid)
for valid, seed, sz in itertools.product([0, 5000], range(6), 10 * samples_per_class)])
d.update([DataSetsFSL.creator('svhnx.%d@%d' % (seed, sz), ['SSL/svhnx.%d@%d-label.tfrecord' % (seed, sz)],
{'svhn': ['svhn-test.tfrecord']}, valid)
for valid, seed, sz in itertools.product([0, 5000], range(6), 10 * samples_per_class)])
d.update([DataSetsFSL.creator('stl10.%d@%d' % (seed, sz), ['SSL/stl10.%d@%d-label.tfrecord' % (seed, sz)],
{'stl10': ['stl10-test.tfrecord']}, valid, height=96, width=96)
for valid, seed, sz in itertools.product([0, 5000], range(6), 10 * samples_per_class)])
# DomainNet datasets
categories = ['clipart', 'infograph', 'painting', 'quickdraw', 'real', 'sketch']
for category, size in itertools.product(categories, [32, 64, 128, 224]):
d.update([DataSetsFSL.creator(f'domainnet{size}_{category}',
[f'domainnet{size}_{category}-train.tfrecord'],
{category: [f'domainnet{size}_{category}-test.tfrecord']},
valid, height=size, width=size, nclass=345,
cache=size <= 64) for valid in [0, 5000]])
d.update([DataSetsFSL.creator(
f'domainnet{size}_no_{category}',
[f'domainnet{size}_{t}-train.tfrecord' for t in categories if t != category],
{f'no_{category}': [f'domainnet{size}_{t}-test.tfrecord' for t in categories if t != category]},
valid, height=size, width=size, nclass=345, cache=size <= 64) for valid in [0, 5000]])
# Office31 datasets
categories = ['webcam', 'dslr', 'amazon']
for category, size in itertools.product(categories, [32, 64, 128, 224]):
d.update([DataSetsFSL.creator(f'office31{size}_{category}',
[f'office31{size}_{category}-train.tfrecord'],
{category: [f'office31{size}_{category}-test.tfrecord']},
valid, height=size, width=size, nclass=31,
cache=size <= 64) for valid in [0, 5000]])
# DigitFive datasets
categories = ['usps', 'mnist', 'mnistm', 'svhn', 'syndigit']
for category, size in itertools.product(categories, [32]):
d.update([DataSetsFSL.creator(f'digitfive{size}_{category}',
[f'digitfive{size}_{category}-train.tfrecord'],
{category: [f'digitfive{size}_{category}-test.tfrecord']},
valid, height=size, width=size, nclass=10,
cache=size <= 64) for valid in [0, 5000]])
return d
DATASETS = create_datasets
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import time
from typing import List, Callable, Tuple
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from shared.data import core
from shared.data.core import record_parse_mnist, DataSet, record_parse, label_parse
class DataSetSSL:
def __init__(self, name: str, train: DataSet, nclass: int = 10):
self.name = name
self.train = train
self.nclass = nclass
@property
def image_shape(self):
return self.train.image_shape
@property
def colors(self):
return self.image_shape[2]
@property
def height(self):
return self.image_shape[0]
@property
def width(self):
return self.image_shape[1]
@classmethod
def creator(cls, name: str, train_files: List[str], parse_fn: Callable = record_parse,
nclass: int = 10, height: int = 32, width: int = 32, colors: int = 3, cache: bool = False):
train_files = [os.path.join(core.DATA_DIR, x) for x in train_files]
def create(samples_per_class: int, seed: int):
target_file = os.path.join(core.DATA_DIR, f'{name}({samples_per_class},seed={seed}).tfrecord')
if not os.path.exists(target_file):
cls.materialize_subset(target_file, train_files, samples_per_class, seed, nclass)
image_shape = height, width, colors
train = DataSet.from_files([target_file], image_shape, cache=cache, parse_fn=parse_fn)
return cls(name, nclass=nclass, train=train)
return name, create
@staticmethod
def parse_name(name: str) -> Tuple[str, int, int]:
try:
name, params = name.split('(')
params = params.split(',')
samples_per_class = int(params[0])
seed = int(params[1][5:-1])
except:
raise ValueError(f'Name "{name}" must be of the form name(int,seed=int).')
return name, samples_per_class, seed
@staticmethod
def materialize_subset(target_file: str, train_files: List[str], samples_per_class: int, seed: int, nclass: int):
print(f'Materializing subset {target_file}')
print(f'\015 {"Samples per class":32s}', samples_per_class)
print(f'\015 {"Random seed":32s}', seed)
t0 = time.time()
train = DataSet.from_files(train_files, (0, 0, 0), parse_fn=label_parse)
class_to_idx = [[] for _ in range(nclass)]
for batch in tqdm(train.parse().batch(1024), leave=False, desc='Building class map'):
for idx, label in zip(batch['index']._numpy(), batch['label']._numpy()):
class_to_idx[label].append(idx)
print(f'\015 {"Number of source samples":32s}', sum(len(x) for x in class_to_idx))
np.random.seed(seed)
class_to_idx = [np.random.choice(x, samples_per_class, replace=True) for x in class_to_idx]
keep_idx = set()
for x in class_to_idx:
keep_idx |= set(x)
print(f'\015 {"Number of target samples":32s}', sum(len(x) for x in class_to_idx))
with tf.io.TFRecordWriter(target_file + '.tmp') as writer:
for index, record in tqdm(train, leave=False, desc=f'Saving dataset f{target_file}'):
if index._numpy() not in keep_idx:
continue
writer.write(record._numpy())
os.rename(target_file + '.tmp', target_file)
print(f'\015 {"File size":32s}', os.path.getsize(target_file))
print(f'\015 Completed in {int(time.time() - t0)}s')
def create_datasets():
d = {}
d.update([DataSetSSL.creator('cifar10', ['cifar10-train.tfrecord'], cache=True)])
d.update([DataSetSSL.creator('mnist', ['mnist-train.tfrecord'], cache=True, parse_fn=record_parse_mnist)])
d.update([DataSetSSL.creator('svhn', ['svhn-train.tfrecord'])])
d.update([DataSetSSL.creator('svhnx', ['svhn-train.tfrecord', 'svhn-extra.tfrecord'])])
# DomainNet datasets
categories = ['clipart', 'infograph', 'painting', 'quickdraw', 'real', 'sketch']
for category, size in itertools.product(categories, [32, 64, 128, 224]):
d.update([DataSetSSL.creator(f'domainnet{size}_{category}', [f'domainnet{size}_{category}-train.tfrecord'],
height=size, width=size, nclass=345, cache=size <= 64)])
# Office31 datasets
categories = ['webcam', 'dslr', 'amazon']
for category, size in itertools.product(categories, [32, 64, 128, 224]):
d.update([DataSetSSL.creator(f'office31{size}_{category}', [f'office31{size}_{category}-train.tfrecord'],
height=size, width=size, nclass=31, cache=size <= 64)])
# DigitFive datasets
categories = ['usps', 'mnist', 'mnistm', 'svhn', 'syndigit']
for category, size in itertools.product(categories, [32]):
d.update([DataSetSSL.creator(f'digitfive{size}_{category}', [f'digitfive{size}_{category}-train.tfrecord'],
height=size, width=size, nclass=10, cache=size <= 64)])
return d
DATASETS = create_datasets
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterable
import numpy as np
class DataMerger:
def __init__(self, datasets: Iterable[Iterable]):
self.datasets = list(datasets)
def __iter__(self):
iterators = [iter(x) for x in self.datasets]
while 1:
yield [next(x) for x in iterators]
class MergeBalanced(DataMerger):
def __init__(self, datasets: Iterable[Iterable], sizes: Iterable[int]):
del sizes
super().__init__(datasets)
def __iter__(self):
count = len(self.datasets)
iterators = [iter(x) for x in self.datasets]
while 1:
data = [next(x) for x in iterators]
source = np.zeros([count, data[0]['index'].shape[0]], np.uint32)
source += np.arange(count, dtype=np.uint32)[:, None]
mixed = {k: np.concatenate([v[k] for v in data]) for k in ('index', 'label', 'image')}
mixed['source'] = source.ravel()
for x in range(len(data)):
yield {k: v[x::len(data)] for k, v in mixed.items()}
class MergeProportional(DataMerger):
def __init__(self, datasets: Iterable[Iterable], sizes: Iterable[int]):
super().__init__(datasets)
self.ratios = np.array(list(sizes), 'f')
self.ratios /= self.ratios.sum()
assert len(self.datasets) == len(self.ratios)
@staticmethod
def collect(buffer, iterator, count):
available = buffer['index'].shape[0]
if available < count:
entry = next(iterator)
for k in buffer:
buffer[k] = np.concatenate([buffer[k], entry[k]])
vout = {}
for k, v in buffer.items():
vout[k] = v[:count]
buffer[k] = v[count:]
return vout
def __iter__(self):
count = len(self.datasets)
p = np.zeros(count, np.uint32)
iterators = [iter(x) for x in self.datasets]
buffers = [next(x) for x in iterators]
batch = buffers[0]['index'].shape[0]
order = np.arange(batch, dtype=np.uint32)
while True:
fsizes = self.ratios * (p.sum() + batch) - p
sizes = fsizes.astype(np.uint32)
delta = batch - int(sizes.sum())
if delta:
high_p = np.argsort(fsizes - sizes)[-delta:]
sizes[high_p] += 1
data = [self.collect(buffers[i], iterators[i], n) for i, n in enumerate(sizes)]
data = {k: np.concatenate([d[k] for d in data]) for k in data[0]}
data['source'] = np.concatenate([np.zeros(s, dtype=np.uint32) + i for i, s in enumerate(sizes)])
np.random.shuffle(order)
yield {k: v[order] for k, v in data.items()}
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Augmentations for images.
"""
import tensorflow as tf
def cutout(x, w):
offsets = tf.random.uniform([2], 0, 1)
s = tf.shape(x)
y0 = tf.cast(tf.round(offsets[0] * (tf.cast(s[0], tf.float32) - w)), tf.int32)
x0 = tf.cast(tf.round(offsets[1] * (tf.cast(s[1], tf.float32) - w)), tf.int32)
hr, wr = tf.range(s[0])[:, None, None], tf.range(s[1])[None, :, None]
mask = 1 - tf.cast((hr >= y0) & (hr < y0 + w) & (wr >= x0) & (wr < x0 + w), tf.float32)
return mask * x
def mirror(x):
return tf.image.random_flip_left_right(x)
def shift(x, w):
y = tf.pad(x, [[w] * 2, [w] * 2, [0] * 2], mode='REFLECT')
return tf.image.random_crop(y, tf.shape(x))
def noise(x, std):
return x + std * tf.random.normal(tf.shape(x), dtype=x.dtype)
def get_tf_augment(augment, size=32):
aug = dict(
x=lambda **kw: kw,
s=lambda image, **kw: dict(image=shift(image, size >> 3), **kw),
sc=lambda image, **kw: dict(image=cutout(shift(image, size >> 3), size >> 1), **kw),
sm=lambda image, **kw: dict(image=mirror(shift(image, size >> 3)), **kw),
smc=lambda image, **kw: dict(image=cutout(mirror(shift(image, size >> 3)), size >> 1), **kw))
return lambda x: aug[augment](**x)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import os
import numpy as np
from absl import flags
SHARED_MEMORY_SIZE = int(os.environ.get('SHARED_MEMORY_SIZE', 1 << 30))
SHARED_MEMORY = multiprocessing.Array('f', SHARED_MEMORY_SIZE, lock=False)
SHARED_MEMORY_NP = np.ctypeslib.as_array(SHARED_MEMORY)
flags.DEFINE_string('augment', 'MixUp(sm,smc)',
'Dataset augmentation method:\n'
' Augmentations primitives:\n'
' x = identity\n'
' m = mirror\n'
' s = shift\n'
' sc = shift+cutout\n'
' sm = shift+mirror\n'
' smc = shift+mirror+cutout\n'
' Pair augmentations:\n'
' (weak,strong) = Standard augmentation\n'
' CTA(weak,strong,depth:int=2,th:float=0.8,decay:float=0.99) = CTAugment\n')
class AugmentPoolBase:
@staticmethod
def round_mem(v: int, align: int = 16):
return align * ((v + align - 1) // align)
@staticmethod
def get_np_arrays(shapes):
offsets = np.cumsum([0] + [AugmentPoolBase.round_mem(np.prod(s)) for s in shapes[:-1]])
return [SHARED_MEMORY_NP[o:o + np.prod(s)].reshape(s) for o, s in zip(offsets, shapes)]
def check_mem_requirements(self):
total = sum(self.round_mem(np.prod(v)) for v in self.shapes)
assert total <= SHARED_MEMORY_SIZE, f'Too little shared memory, do: export SHARED_MEMORY_SIZE={total}'
def stop(self):
pass
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Control Theory based self-augmentation."""
import random
from collections import namedtuple
import numpy as np
from PIL import Image, ImageOps, ImageEnhance, ImageFilter
OPS = {}
OP = namedtuple('OP', ('f', 'bins'))
Sample = namedtuple('Sample', ('train', 'probe'))
def register(*bins):
def wrap(f):
OPS[f.__name__] = OP(f, bins)
return f
return wrap
def apply(x, ops):
if ops is None:
return x
y = Image.fromarray(np.round(127.5 + 128 * x).clip(0, 255).astype('uint8'))
for op, args in ops:
y = OPS[op].f(y, *args)
return (np.asarray(y).astype('f') - 127.5) / 128
class CTAugment:
def __init__(self, depth: int = 2, th: float = 0.85, decay: float = 0.99):
self.decay = decay
self.depth = depth
self.th = th
self.rates = {}
for k, op in OPS.items():
self.rates[k] = tuple([np.ones(x, 'f') for x in op.bins])
def rate_to_p(self, rate):
p = rate + (1 - self.decay) # Avoid to have all zero.
p = p / p.max()
p[p < self.th] = 0
return p
def policy(self, probe):
kl = list(OPS.keys())
v = []
if probe:
for _ in range(self.depth):
k = random.choice(kl)
bins = self.rates[k]
rnd = np.random.uniform(0, 1, len(bins))
v.append(OP(k, rnd.tolist()))
return v
for _ in range(self.depth):
vt = []
k = random.choice(kl)
bins = self.rates[k]
rnd = np.random.uniform(0, 1, len(bins))
for r, bin in zip(rnd, bins):
p = self.rate_to_p(bin)
value = np.random.choice(p.shape[0], p=p / p.sum())
vt.append((value + r) / p.shape[0])
v.append(OP(k, vt))
return v
def update_rates(self, policy, proximity):
for k, bins in policy:
for p, rate in zip(bins, self.rates[k]):
p = int(p * len(rate) * 0.999)
rate[p] = rate[p] * self.decay + proximity * (1 - self.decay)
def stats(self):
return '\n'.join('%-16s %s' % (k, ' / '.join(' '.join('%.2f' % x for x in self.rate_to_p(rate))
for rate in self.rates[k]))
for k in sorted(OPS.keys()))
def _enhance(x, op, level):
return op(x).enhance(0.1 + 1.9 * level)
def _imageop(x, op, level):
return Image.blend(x, op(x), level)
def _filter(x, op, level):
return Image.blend(x, x.filter(op), level)
@register(17)
def autocontrast(x, level):
return _imageop(x, ImageOps.autocontrast, level)
@register(17)
def blur(x, level):
return _filter(x, ImageFilter.BLUR, level)
@register(17)
def brightness(x, brightness):
return _enhance(x, ImageEnhance.Brightness, brightness)
@register(17)
def color(x, color):
return _enhance(x, ImageEnhance.Color, color)
@register(17)
def contrast(x, contrast):
return _enhance(x, ImageEnhance.Contrast, contrast)
@register(17)
def cutout(x, level):
"""Apply cutout to pil_img at the specified level."""
size = 1 + int(level * min(x.size) * 0.499)
img_height, img_width = x.size
height_loc = np.random.randint(low=0, high=img_height)
width_loc = np.random.randint(low=0, high=img_width)
upper_coord = (max(0, height_loc - size // 2), max(0, width_loc - size // 2))
lower_coord = (min(img_height, height_loc + size // 2), min(img_width, width_loc + size // 2))
pixels = x.load() # create the pixel map
for i in range(upper_coord[0], lower_coord[0]): # for every col:
for j in range(upper_coord[1], lower_coord[1]): # For every row
pixels[i, j] = (127, 127, 127) # set the color accordingly
return x
@register(17)
def equalize(x, level):
return _imageop(x, ImageOps.equalize, level)
@register(17)
def invert(x, level):
return _imageop(x, ImageOps.invert, level)
@register()
def identity(x):
return x
@register(8)
def posterize(x, level):
level = 1 + int(level * 7.999)
return ImageOps.posterize(x, level)
@register(17, 6)
def rescale(x, scale, method):
s = x.size
scale *= 0.25
crop = (scale * s[0], scale * s[1], s[0] * (1 - scale), s[1] * (1 - scale))
methods = (Image.ANTIALIAS, Image.BICUBIC, Image.BILINEAR, Image.BOX, Image.HAMMING, Image.NEAREST)
method = methods[int(method * 5.99)]
return x.crop(crop).resize(x.size, method)
@register(17)
def rotate(x, angle):
angle = int(np.round((2 * angle - 1) * 45))
return x.rotate(angle)
@register(17)
def sharpness(x, sharpness):
return _enhance(x, ImageEnhance.Sharpness, sharpness)
@register(17)
def shear_x(x, shear):
shear = (2 * shear - 1) * 0.3
return x.transform(x.size, Image.AFFINE, (1, shear, 0, 0, 1, 0))
@register(17)
def shear_y(x, shear):
shear = (2 * shear - 1) * 0.3
return x.transform(x.size, Image.AFFINE, (1, 0, 0, shear, 1, 0))
@register(17)
def smooth(x, level):
return _filter(x, ImageFilter.SMOOTH, level)
@register(17)
def solarize(x, th):
th = int(th * 255.999)
return ImageOps.solarize(x, th)
@register(17)
def translate_x(x, delta):
delta = (2 * delta - 1) * 0.3
return x.transform(x.size, Image.AFFINE, (1, 0, delta, 0, 1, 0))
@register(17)
def translate_y(x, delta):
delta = (2 * delta - 1) * 0.3
return x.transform(x.size, Image.AFFINE, (1, 0, 0, 0, 1, delta))
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AdaMatch
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.functional import stop_gradient
from objax.typing import JaxArray
from domain_adaptation.lib.data import MixData, CTAData
from domain_adaptation.lib.train import TrainableDAModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.train import ScheduleCos, ScheduleCosPhases
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class AdaMatchNoDistAlign(TrainableDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
self.stats = objax.Module()
self.stats.keygen = objax.random.DEFAULT_GENERATOR
self.stats.p_labeled = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_unlabeled = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.wu = ScheduleCosPhases(1, [(0.5, 1), (1, self.params.wu)], start_value=0)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def loss_function(sx, sy, tu, progress):
c, h, w = sx.shape[-3:]
saved_vars = self.model.vars().tensors()
logit_bn_x = self.model(sx.reshape((-1, c, h, w)), training=True)
self.model.vars().assign(saved_vars)
xu = jn.concatenate((sx, tu)).reshape((-1, c, h, w))
logit = self.model(xu, training=True)
logit_sx, logit_tu = jn.split(logit, (2 * sx.shape[0],))
logit_sx += (logit_bn_x - logit_sx) * objax.random.uniform(logit_sx.shape)
logit_sx_weak, logit_sx_strong = logit_sx[::2], logit_sx[1::2]
logit_tu_weak, logit_tu_strong = logit_tu[::2], logit_tu[1::2]
if self.params.use_cr:
real_confidence = objax.functional.softmax(stop_gradient(logit_sx_weak))
confidence_ratio = real_confidence.max(1).mean(0) * self.params.confidence
else:
confidence_ratio = self.params.confidence
pseudo_labels = objax.functional.softmax(logit_tu_weak)
p_labeled = self.stats.p_labeled(objax.functional.softmax(logit_sx_weak).mean(0))
p_unlabeled = self.stats.p_unlabeled(pseudo_labels.mean(0))
pseudo_labels = stop_gradient(pseudo_labels)
pseudo_mask = (pseudo_labels.max(axis=1) >= confidence_ratio).astype(pseudo_labels.dtype)
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean() +
objax.functional.loss.cross_entropy_logits(logit_sx_strong, sy).mean())
xeu = objax.functional.loss.cross_entropy_logits_sparse(logit_tu_strong, pseudo_labels.argmax(axis=1))
xeu = (xeu * pseudo_mask).mean()
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.wu(progress) * xeu + self.params.wd * wd
return loss, {'losses/xe': xe,
'losses/xeu': xeu,
'losses/wd': wd,
'losses/hregbn': jn.square(logit_sx - logit_bn_x).mean(),
'monitors/confidence_ratio': confidence_ratio,
'monitors/wu': self.wu(progress),
'monitors/mask': pseudo_mask.mean(),
'monitors/klmodel': objax.functional.divergence.kl(p_labeled, p_unlabeled)}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(sx, sy, tu, p)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.target}-0']()
testsets = [target.test, source.test] # Ordered by domain (target always first)
module = AdaMatchNoDistAlign(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
wu=FLAGS.wu,
confidence=FLAGS.confidence,
use_cr=FLAGS.use_cr,
uratio=FLAGS.uratio)
logdir = f'DA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/%s' % (
'_'.join(sorted('%s%s' % k for k in module.params.items())))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((f'{FLAGS.source}_to_{k}',
v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_bool('use_cr', True, 'Make confidence threshold proportional to real data.')
flags.DEFINE_float('confidence', 0.9, 'Confidence threshold.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_float('wu', 1, 'Unlabeled loss weight.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AdaMatch
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.functional import stop_gradient
from objax.typing import JaxArray
from domain_adaptation.lib.data import MixData, CTAData
from domain_adaptation.lib.train import TrainableDAModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.train import ScheduleCos, ScheduleCosPhases
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class AdaMatchSourceAlign(TrainableDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
self.stats = objax.Module()
self.stats.keygen = objax.random.DEFAULT_GENERATOR
self.stats.p_data = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_unlabeled = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.wu = ScheduleCosPhases(1, [(0.5, 1), (1, self.params.wu)], start_value=0)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def loss_function(sx, sy, tu, progress):
c, h, w = sx.shape[-3:]
saved_vars = self.model.vars().tensors()
logit_bn_x = self.model(sx.reshape((-1, c, h, w)), training=True)
self.model.vars().assign(saved_vars)
xu = jn.concatenate((sx, tu)).reshape((-1, c, h, w))
logit = self.model(xu, training=True)
logit_sx, logit_tu = jn.split(logit, (2 * sx.shape[0],))
logit_sx += (logit_bn_x - logit_sx) * objax.random.uniform(logit_sx.shape)
logit_sx_weak, logit_sx_strong = logit_sx[::2], logit_sx[1::2]
logit_tu_weak, logit_tu_strong = logit_tu[::2], logit_tu[1::2]
if self.params.use_cr:
real_confidence = objax.functional.softmax(stop_gradient(logit_sx_weak))
confidence_ratio = real_confidence.max(1).mean(0) * self.params.confidence
else:
confidence_ratio = self.params.confidence
pseudo_labels = objax.functional.softmax(logit_tu_weak)
p_data = self.stats.p_data(sy.mean(0))
p_unlabeled = self.stats.p_unlabeled(pseudo_labels.mean(0))
pseudo_labels *= (1e-6 + p_data) / (1e-6 + p_unlabeled)
pseudo_labels = stop_gradient(pseudo_labels / pseudo_labels.sum(1, keepdims=True))
pseudo_mask = (pseudo_labels.max(axis=1) >= confidence_ratio).astype(pseudo_labels.dtype)
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean() +
objax.functional.loss.cross_entropy_logits(logit_sx_strong, sy).mean())
xeu = objax.functional.loss.cross_entropy_logits_sparse(logit_tu_strong, pseudo_labels.argmax(axis=1))
xeu = (xeu * pseudo_mask).mean()
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.wu(progress) * xeu + self.params.wd * wd
return loss, {'losses/xe': xe,
'losses/xeu': xeu,
'losses/wd': wd,
'losses/hregbn': jn.square(logit_sx - logit_bn_x).mean(),
'monitors/confidence_ratio': confidence_ratio,
'monitors/wu': self.wu(progress),
'monitors/mask': pseudo_mask.mean(),
'monitors/klmodel': objax.functional.divergence.kl(p_data, p_unlabeled)}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(sx, sy, tu, p)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.target}-0']()
testsets = [target.test, source.test] # Ordered by domain (target always first)
module = AdaMatchSourceAlign(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
wu=FLAGS.wu,
confidence=FLAGS.confidence,
use_cr=FLAGS.use_cr,
uratio=FLAGS.uratio)
logdir = f'DA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/%s' % (
'_'.join(sorted('%s%s' % k for k in module.params.items())))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((f'{FLAGS.source}_to_{k}',
v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_bool('use_cr', True, 'Make confidence threshold proportional to real data.')
flags.DEFINE_float('confidence', 0.9, 'Confidence threshold.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_float('wu', 1, 'Unlabeled loss weight.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AdaMatch
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.functional import stop_gradient
from objax.typing import JaxArray
from domain_adaptation.lib.data import MixData, CTAData
from domain_adaptation.lib.train import TrainableDAModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.train import ScheduleCos, ScheduleCosPhases
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class AdaMatchNoLogitReg(TrainableDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
self.stats = objax.Module()
self.stats.keygen = objax.random.DEFAULT_GENERATOR
self.stats.p_labeled = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_unlabeled = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.wu = ScheduleCosPhases(1, [(0.5, 1), (1, self.params.wu)], start_value=0)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def loss_function(sx, sy, tu, progress):
c, h, w = sx.shape[-3:]
xu = jn.concatenate((sx, tu)).reshape((-1, c, h, w))
logit = self.model(xu, training=True)
logit_sx, logit_tu = jn.split(logit, (2 * sx.shape[0],))
logit_sx_weak, logit_sx_strong = logit_sx[::2], logit_sx[1::2]
logit_tu_weak, logit_tu_strong = logit_tu[::2], logit_tu[1::2]
if self.params.use_cr:
real_confidence = objax.functional.softmax(stop_gradient(logit_sx_weak))
confidence_ratio = real_confidence.max(1).mean(0) * self.params.confidence
else:
confidence_ratio = self.params.confidence
pseudo_labels = objax.functional.softmax(logit_tu_weak)
p_labeled = self.stats.p_labeled(objax.functional.softmax(logit_sx_weak).mean(0))
p_unlabeled = self.stats.p_unlabeled(pseudo_labels.mean(0))
pseudo_labels *= (1e-6 + p_labeled) / (1e-6 + p_unlabeled)
pseudo_labels = stop_gradient(pseudo_labels / pseudo_labels.sum(1, keepdims=True))
pseudo_mask = (pseudo_labels.max(axis=1) >= confidence_ratio).astype(pseudo_labels.dtype)
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean() +
objax.functional.loss.cross_entropy_logits(logit_sx_strong, sy).mean())
xeu = objax.functional.loss.cross_entropy_logits_sparse(logit_tu_strong, pseudo_labels.argmax(axis=1))
xeu = (xeu * pseudo_mask).mean()
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.wu(progress) * xeu + self.params.wd * wd
return loss, {'losses/xe': xe,
'losses/xeu': xeu,
'losses/wd': wd,
'monitors/confidence_ratio': confidence_ratio,
'monitors/wu': self.wu(progress),
'monitors/mask': pseudo_mask.mean(),
'monitors/klmodel': objax.functional.divergence.kl(p_labeled, p_unlabeled)}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(sx, sy, tu, p)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.target}-0']()
testsets = [target.test, source.test] # Ordered by domain (target always first)
module = AdaMatchNoLogitReg(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
wu=FLAGS.wu,
confidence=FLAGS.confidence,
use_cr=FLAGS.use_cr,
uratio=FLAGS.uratio)
logdir = f'DA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/%s' % (
'_'.join(sorted('%s%s' % k for k in module.params.items())))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((f'{FLAGS.source}_to_{k}',
v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_bool('use_cr', True, 'Make confidence threshold proportional to real data.')
flags.DEFINE_float('confidence', 0.9, 'Confidence threshold.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_float('wu', 1, 'Unlabeled loss weight.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AdaMatch
"""
from google3.pyglib import gfile
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from google3.learning.deepmind.xmanager2.client import google as xm # xm module is needed for flags.
from absl import flags
from absl.flags import FLAGS
from objax.functional import stop_gradient
from objax.typing import JaxArray
from google3.experimental.brain.red_team.nextmatch.domain_adaptation.lib.data import MixData, CTAData
from google3.experimental.brain.red_team.nextmatch.domain_adaptation.lib.train import TrainableDAModule
from google3.experimental.brain.red_team.nextmatch.shared.data.fsl import DATASETS as FSL_DATASETS
from google3.experimental.brain.red_team.nextmatch.shared.train import ScheduleCos, ScheduleCosPhases
from google3.experimental.brain.red_team.nextmatch.shared.util import setup_tf, MyParallel
from google3.experimental.brain.red_team.nextmatch.shared.zoo.models import network, ARCHS
class AdaMatchPretrainNoDistAlign(TrainableDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.stats = objax.Module()
self.stats.keygen = objax.random.DEFAULT_GENERATOR
self.stats.p_labeled = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_unlabeled = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.wu = ScheduleCosPhases(1, [(0.5, 1), (1, self.params.wu)], start_value=0)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def loss_function(sx, sy, tu, progress):
c, h, w = sx.shape[-3:]
saved_vars = self.model.vars().tensors()
logit_bn_x = self.model(sx.reshape((-1, c, h, w)), training=True)
self.model.vars().assign(saved_vars)
xu = jn.concatenate((sx, tu)).reshape((-1, c, h, w))
logit = self.model(xu, training=True)
logit_sx, logit_tu = jn.split(logit, (2 * sx.shape[0],))
logit_sx += (logit_bn_x - logit_sx) * objax.random.uniform(logit_sx.shape)
logit_sx_weak, logit_sx_strong = logit_sx[::2], logit_sx[1::2]
logit_tu_weak, logit_tu_strong = logit_tu[::2], logit_tu[1::2]
if self.params.use_cr:
real_confidence = objax.functional.softmax(stop_gradient(logit_sx_weak))
confidence_ratio = real_confidence.max(1).mean(0) * self.params.confidence
else:
confidence_ratio = self.params.confidence
pseudo_labels = objax.functional.softmax(logit_tu_weak)
p_labeled = self.stats.p_labeled(objax.functional.softmax(logit_sx_weak).mean(0))
p_unlabeled = self.stats.p_unlabeled(pseudo_labels.mean(0))
pseudo_labels = stop_gradient(pseudo_labels)
pseudo_mask = (pseudo_labels.max(axis=1) >= confidence_ratio).astype(pseudo_labels.dtype)
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean() +
objax.functional.loss.cross_entropy_logits(logit_sx_strong, sy).mean())
xeu = objax.functional.loss.cross_entropy_logits_sparse(logit_tu_strong, pseudo_labels.argmax(axis=1))
xeu = (xeu * pseudo_mask).mean()
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.wu(progress) * xeu + self.params.wd * wd
return loss, {'losses/xe': xe,
'losses/xeu': xeu,
'losses/wd': wd,
'losses/hregbn': jn.square(logit_sx - logit_bn_x).mean(),
'monitors/confidence_ratio': confidence_ratio,
'monitors/wu': self.wu(progress),
'monitors/mask': pseudo_mask.mean(),
'monitors/klmodel': objax.functional.divergence.kl(p_labeled, p_unlabeled)}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(sx, sy, tu, p)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.target}-0']()
testsets = [target.test, source.test] # Ordered by domain (target always first)
module = AdaMatchPretrainNoDistAlign(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
wu=FLAGS.wu,
confidence=FLAGS.confidence,
use_cr=FLAGS.use_cr,
uratio=FLAGS.uratio)
logdir = f'DA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/%s' % (
'_'.join(sorted('%s%s' % k for k in module.params.items())))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((f'{FLAGS.source}_to_{k}',
v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_bool('use_cr', True, 'Make confidence threshold proportional to real data.')
flags.DEFINE_float('confidence', 0.9, 'Confidence threshold.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_float('wu', 1, 'Unlabeled loss weight.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm)')
FLAGS.set_default('para_augment', 8)
jax.config.config_with_absl()
app.run(main)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AdaMatch
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.functional import stop_gradient
from objax.typing import JaxArray
from domain_adaptation.lib.data import MixData, CTAData
from domain_adaptation.lib.train import TrainableDAModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.train import ScheduleCos, ScheduleCosPhases
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class AdaMatchDistAlignRMM(TrainableDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.stats = objax.Module()
self.stats.keygen = objax.random.DEFAULT_GENERATOR
self.stats.p_data = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_unlabeled = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.wu = ScheduleCosPhases(1, [(0.5, 1), (1, self.params.wu)], start_value=0)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def loss_function(sx, sy, tu, progress):
c, h, w = sx.shape[-3:]
saved_vars = self.model.vars().tensors()
logit_bn_x = self.model(sx.reshape((-1, c, h, w)), training=True)
self.model.vars().assign(saved_vars)
xu = jn.concatenate((sx, tu)).reshape((-1, c, h, w))
logit = self.model(xu, training=True)
logit_sx, logit_tu = jn.split(logit, (2 * sx.shape[0],))
logit_sx += (logit_bn_x - logit_sx) * objax.random.uniform(logit_sx.shape)
logit_sx_weak, logit_sx_strong = logit_sx[::2], logit_sx[1::2]
logit_tu_weak, logit_tu_strong = logit_tu[::2], logit_tu[1::2]
if self.params.use_cr:
real_confidence = objax.functional.softmax(stop_gradient(logit_sx_weak))
confidence_ratio = real_confidence.max(1).mean(0) * self.params.confidence
else:
confidence_ratio = self.params.confidence
pseudo_labels = objax.functional.softmax(logit_tu_weak)
p_data = self.stats.p_data(sy.mean(0))
p_unlabeled = self.stats.p_unlabeled(pseudo_labels.mean(0))
pseudo_labels *= (1e-6 + p_data) / (1e-6 + p_unlabeled)
pseudo_labels = stop_gradient(pseudo_labels / pseudo_labels.sum(1, keepdims=True))
pseudo_mask = (pseudo_labels.max(axis=1) >= confidence_ratio).astype(pseudo_labels.dtype)
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean() +
objax.functional.loss.cross_entropy_logits(logit_sx_strong, sy).mean())
xeu = objax.functional.loss.cross_entropy_logits_sparse(logit_tu_strong, pseudo_labels.argmax(axis=1))
xeu = (xeu * pseudo_mask).mean()
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.wu(progress) * xeu + self.params.wd * wd
return loss, {'losses/xe': xe,
'losses/xeu': xeu,
'losses/wd': wd,
'losses/hregbn': jn.square(logit_sx - logit_bn_x).mean(),
'monitors/confidence_ratio': confidence_ratio,
'monitors/wu': self.wu(progress),
'monitors/mask': pseudo_mask.mean(),
'monitors/klmodel': objax.functional.divergence.kl(p_data, p_unlabeled)}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(sx, sy, tu, p)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.target}-0']()
testsets = [target.test, source.test] # Ordered by domain (target always first)
module = AdaMatchDistAlignRMM(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
wu=FLAGS.wu,
confidence=FLAGS.confidence,
use_cr=FLAGS.use_cr,
uratio=FLAGS.uratio)
logdir = f'DA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/%s' % (
'_'.join(sorted('%s%s' % k for k in module.params.items())))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((f'{FLAGS.source}_to_{k}',
v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_bool('use_cr', True, 'Make confidence threshold proportional to real data.')
flags.DEFINE_float('confidence', 0.9, 'Confidence threshold.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_float('wu', 1, 'Unlabeled loss weight.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AdaMatch
"""
from google3.pyglib import gfile
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from google3.learning.deepmind.xmanager2.client import google as xm # xm module is needed for flags.
from absl import flags
from absl.flags import FLAGS
from objax.functional import stop_gradient
from objax.typing import JaxArray
from google3.experimental.brain.red_team.nextmatch.domain_adaptation.lib.data import MixData, CTAData
from google3.experimental.brain.red_team.nextmatch.domain_adaptation.lib.train import TrainableDAModule
from google3.experimental.brain.red_team.nextmatch.shared.data.fsl import DATASETS as FSL_DATASETS
from google3.experimental.brain.red_team.nextmatch.shared.train import ScheduleCos, ScheduleCosPhases
from google3.experimental.brain.red_team.nextmatch.shared.util import setup_tf, MyParallel
from google3.experimental.brain.red_team.nextmatch.shared.zoo.models import network, ARCHS
class AdaMatchPretrainFixedWu(TrainableDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.stats = objax.Module()
self.stats.keygen = objax.random.DEFAULT_GENERATOR
self.stats.p_labeled = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_unlabeled = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.wu = self.params.wu
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def loss_function(sx, sy, tu, progress):
c, h, w = sx.shape[-3:]
saved_vars = self.model.vars().tensors()
logit_bn_x = self.model(sx.reshape((-1, c, h, w)), training=True)
self.model.vars().assign(saved_vars)
xu = jn.concatenate((sx, tu)).reshape((-1, c, h, w))
logit = self.model(xu, training=True)
logit_sx, logit_tu = jn.split(logit, (2 * sx.shape[0],))
logit_sx += (logit_bn_x - logit_sx) * objax.random.uniform(logit_sx.shape)
logit_sx_weak, logit_sx_strong = logit_sx[::2], logit_sx[1::2]
logit_tu_weak, logit_tu_strong = logit_tu[::2], logit_tu[1::2]
if self.params.use_cr:
real_confidence = objax.functional.softmax(stop_gradient(logit_sx_weak))
confidence_ratio = real_confidence.max(1).mean(0) * self.params.confidence
else:
confidence_ratio = self.params.confidence
pseudo_labels = objax.functional.softmax(logit_tu_weak)
p_labeled = self.stats.p_labeled(objax.functional.softmax(logit_sx_weak).mean(0))
p_unlabeled = self.stats.p_unlabeled(pseudo_labels.mean(0))
pseudo_labels *= (1e-6 + p_labeled) / (1e-6 + p_unlabeled)
pseudo_labels = stop_gradient(pseudo_labels / pseudo_labels.sum(1, keepdims=True))
pseudo_mask = (pseudo_labels.max(axis=1) >= confidence_ratio).astype(pseudo_labels.dtype)
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean() +
objax.functional.loss.cross_entropy_logits(logit_sx_strong, sy).mean())
xeu = objax.functional.loss.cross_entropy_logits_sparse(logit_tu_strong, pseudo_labels.argmax(axis=1))
xeu = (xeu * pseudo_mask).mean()
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.wu * xeu + self.params.wd * wd
return loss, {'losses/xe': xe,
'losses/xeu': xeu,
'losses/wd': wd,
'losses/hregbn': jn.square(logit_sx - logit_bn_x).mean(),
'monitors/confidence_ratio': confidence_ratio,
'monitors/wu': self.wu,
'monitors/mask': pseudo_mask.mean(),
'monitors/klmodel': objax.functional.divergence.kl(p_labeled, p_unlabeled)}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(sx, sy, tu, p)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.target}-0']()
testsets = [target.test, source.test] # Ordered by domain (target always first)
module = AdaMatchPretrainFixedWu(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
wu=FLAGS.wu,
confidence=FLAGS.confidence,
use_cr=FLAGS.use_cr,
uratio=FLAGS.uratio)
logdir = f'DA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/%s' % (
'_'.join(sorted('%s%s' % k for k in module.params.items())))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((f'{FLAGS.source}_to_{k}',
v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_bool('use_cr', True, 'Make confidence threshold proportional to real data.')
flags.DEFINE_float('confidence', 0.9, 'Confidence threshold.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_float('wu', 1, 'Unlabeled loss weight.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm)')
FLAGS.set_default('para_augment', 8)
jax.config.config_with_absl()
app.run(main)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AdaMatch
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.functional import stop_gradient
from objax.typing import JaxArray
from domain_adaptation.lib.data import MixData, CTAData
from domain_adaptation.lib.train import TrainableDAModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.train import ScheduleCos, ScheduleCosPhases
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class AdaMatchFixedConfidence(TrainableDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
self.stats = objax.Module()
self.stats.keygen = objax.random.DEFAULT_GENERATOR
self.stats.p_labeled = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_unlabeled = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.wu = ScheduleCosPhases(1, [(0.5, 1), (1, self.params.wu)], start_value=0)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def loss_function(sx, sy, tu, progress):
c, h, w = sx.shape[-3:]
saved_vars = self.model.vars().tensors()
logit_bn_x = self.model(sx.reshape((-1, c, h, w)), training=True)
self.model.vars().assign(saved_vars)
xu = jn.concatenate((sx, tu)).reshape((-1, c, h, w))
logit = self.model(xu, training=True)
logit_sx, logit_tu = jn.split(logit, (2 * sx.shape[0],))
logit_sx += (logit_bn_x - logit_sx) * objax.random.uniform(logit_sx.shape)
logit_sx_weak, logit_sx_strong = logit_sx[::2], logit_sx[1::2]
logit_tu_weak, logit_tu_strong = logit_tu[::2], logit_tu[1::2]
confidence_ratio = self.params.confidence
pseudo_labels = objax.functional.softmax(logit_tu_weak)
p_labeled = self.stats.p_labeled(objax.functional.softmax(logit_sx_weak).mean(0))
p_unlabeled = self.stats.p_unlabeled(pseudo_labels.mean(0))
pseudo_labels *= (1e-6 + p_labeled) / (1e-6 + p_unlabeled)
pseudo_labels = stop_gradient(pseudo_labels / pseudo_labels.sum(1, keepdims=True))
pseudo_mask = (pseudo_labels.max(axis=1) >= confidence_ratio).astype(pseudo_labels.dtype)
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean() +
objax.functional.loss.cross_entropy_logits(logit_sx_strong, sy).mean())
xeu = objax.functional.loss.cross_entropy_logits_sparse(logit_tu_strong, pseudo_labels.argmax(axis=1))
xeu = (xeu * pseudo_mask).mean()
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.wu(progress) * xeu + self.params.wd * wd
return loss, {'losses/xe': xe,
'losses/xeu': xeu,
'losses/wd': wd,
'losses/hregbn': jn.square(logit_sx - logit_bn_x).mean(),
'monitors/confidence_ratio': confidence_ratio,
'monitors/wu': self.wu(progress),
'monitors/mask': pseudo_mask.mean(),
'monitors/klmodel': objax.functional.divergence.kl(p_labeled, p_unlabeled)}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(sx, sy, tu, p)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.target}-0']()
testsets = [target.test, source.test] # Ordered by domain (target always first)
module = AdaMatchFixedConfidence(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
wu=FLAGS.wu,
confidence=FLAGS.confidence,
use_cr=FLAGS.use_cr,
uratio=FLAGS.uratio)
logdir = f'DA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/%s' % (
'_'.join(sorted('%s%s' % k for k in module.params.items())))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((f'{FLAGS.source}_to_{k}',
v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_bool('use_cr', True, 'Make confidence threshold proportional to real data.')
flags.DEFINE_float('confidence', 0.9, 'Confidence threshold.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_float('wu', 1, 'Unlabeled loss weight.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AdaMatchPretrain
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.functional import stop_gradient
from objax.typing import JaxArray
from domain_adaptation.lib.data import MixData, CTAData
from domain_adaptation.lib.train import TrainableDAModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.train import ScheduleCos, ScheduleCosPhases
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class AdaMatchPretrain(TrainableDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.stats = objax.Module()
self.stats.keygen = objax.random.DEFAULT_GENERATOR
self.stats.p_labeled = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_unlabeled = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.wu = ScheduleCosPhases(1, [(0.5, 1), (1, self.params.wu)], start_value=0)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def loss_function(sx, sy, tu, progress):
c, h, w = sx.shape[-3:]
saved_vars = self.model.vars().tensors()
logit_bn_x = self.model(sx.reshape((-1, c, h, w)), training=True)
self.model.vars().assign(saved_vars)
xu = jn.concatenate((sx, tu)).reshape((-1, c, h, w))
logit = self.model(xu, training=True)
logit_sx, logit_tu = jn.split(logit, (2 * sx.shape[0],))
logit_sx += (logit_bn_x - logit_sx) * objax.random.uniform(logit_sx.shape)
logit_sx_weak, logit_sx_strong = logit_sx[::2], logit_sx[1::2]
logit_tu_weak, logit_tu_strong = logit_tu[::2], logit_tu[1::2]
if self.params.use_cr:
real_confidence = objax.functional.softmax(stop_gradient(logit_sx_weak))
confidence_ratio = real_confidence.max(1).mean(0) * self.params.confidence
else:
confidence_ratio = self.params.confidence
pseudo_labels = objax.functional.softmax(logit_tu_weak)
p_labeled = self.stats.p_labeled(objax.functional.softmax(logit_sx_weak).mean(0))
p_unlabeled = self.stats.p_unlabeled(pseudo_labels.mean(0))
pseudo_labels *= (1e-6 + p_labeled) / (1e-6 + p_unlabeled)
pseudo_labels = stop_gradient(pseudo_labels / pseudo_labels.sum(1, keepdims=True))
pseudo_mask = (pseudo_labels.max(axis=1) >= confidence_ratio).astype(pseudo_labels.dtype)
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean() +
objax.functional.loss.cross_entropy_logits(logit_sx_strong, sy).mean())
xeu = objax.functional.loss.cross_entropy_logits_sparse(logit_tu_strong, pseudo_labels.argmax(axis=1))
xeu = (xeu * pseudo_mask).mean()
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.wu(progress) * xeu + self.params.wd * wd
return loss, {'losses/xe': xe,
'losses/xeu': xeu,
'losses/wd': wd,
'losses/hregbn': jn.square(logit_sx - logit_bn_x).mean(),
'monitors/confidence_ratio': confidence_ratio,
'monitors/wu': self.wu(progress),
'monitors/mask': pseudo_mask.mean(),
'monitors/klmodel': objax.functional.divergence.kl(p_labeled, p_unlabeled)}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(sx, sy, tu, p)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.target}-0']()
testsets = [target.test, source.test] # Ordered by domain (target always first)
module = AdaMatchPretrain(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
wu=FLAGS.wu,
confidence=FLAGS.confidence,
use_cr=FLAGS.use_cr,
uratio=FLAGS.uratio)
logdir = f'DA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/%s' % (
'_'.join(sorted('%s%s' % k for k in module.params.items())))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((f'{FLAGS.source}_to_{k}',
v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target.train, source.nclass, FLAGS.batch, FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_bool('use_cr', True, 'Make confidence threshold proportional to real data.')
flags.DEFINE_float('confidence', 0.9, 'Confidence threshold.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_float('wu', 1, 'Unlabeled loss weight.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm)')
FLAGS.set_default('para_augment', 8)
app.run(main)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import app
from absl import flags
from shared.data.fsl import DATASETS
FLAGS = flags.FLAGS
def main(argv):
del argv # Unused.
dataset = DATASETS()[FLAGS.dataset]()
data = dataset.train.parse(1)
# Iterate through data
for it in data:
print(it['image'][0])
break
if __name__ == '__main__':
flags.DEFINE_string('dataset', 'domainnet32_infograph-0', 'Data to check.')
app.run(main)
|
#!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to download all datasets and create .tfrecord files.
"""
import collections
import gzip
import itertools
import os
import tarfile
import tempfile
import zipfile
from functools import partial, reduce
from urllib import request
import wget
import h5py
import numpy as np
import scipy.io
import tensorflow as tf
from PIL import Image
from absl import app
from google_drive_downloader import GoogleDriveDownloader as gdd
from objax.util.image import to_png
from tqdm import trange, tqdm
from shared.data import core as libml_data
if 'NEXTMATCH_DOWNLOAD_PATH' in os.environ:
DOWNLOAD_DIR = os.environ['NEXTMATCH_DOWNLOAD_PATH']
else:
DOWNLOAD_DIR = os.path.join(libml_data.DATA_DIR, 'Downloads')
URLS = {
'cifar10': 'https://www.cs.toronto.edu/~kriz/cifar-10-matlab.tar.gz',
'cifar100': 'https://www.cs.toronto.edu/~kriz/cifar-100-matlab.tar.gz',
'domainnet': {
'clipart': 'http://csr.bu.edu/ftp/visda/2019/multi-source/groundtruth/%sclipart%s',
'infograph': 'http://csr.bu.edu/ftp/visda/2019/multi-source/%sinfograph%s',
'painting': 'http://csr.bu.edu/ftp/visda/2019/multi-source/groundtruth/%spainting%s',
'quickdraw': 'http://csr.bu.edu/ftp/visda/2019/multi-source/%squickdraw%s',
'real': 'http://csr.bu.edu/ftp/visda/2019/multi-source/%sreal%s',
'sketch': 'http://csr.bu.edu/ftp/visda/2019/multi-source/%ssketch%s'
},
'mnist': 'http://yann.lecun.com/exdb/mnist/{}',
'office31': dict(images='0B4IapRTv9pJ1WGZVd1VDMmhwdlE'),
'svhn': 'http://ufldl.stanford.edu/housenumbers/{}_32x32.mat',
'mnistm': 'https://www.dropbox.com/s/rb7pr65fo26h9lh/mnist_m.tar.gz?dl=1',
'syndigit': 'https://storage.googleapis.com/kihyuks-0001/SynDigits/synth_{}_32x32.mat',
'usps': 'https://storage.googleapis.com/kihyuks-0001/usps.h5',
}
def _encode_png(images):
return [to_png(images[x]) for x in trange(images.shape[0], desc='PNG Encoding', leave=False)]
def _image_resize(x, size: int):
"""Resizing that tries to minimize artifacts."""
original = max(x.size)
if original < size:
return x.resize((size, size), Image.BICUBIC)
nearest = original - (original % size)
if nearest != original:
x = x.resize((nearest, nearest), Image.BILINEAR)
if nearest != size:
x = x.resize((size, size), Image.BOX)
if x.size[0] != x.size[1]:
x = x.resize((size, size), Image.BICUBIC)
return x
def _load_cifar10():
def unflatten(images):
return np.transpose(images.reshape((images.shape[0], 3, 32, 32)), [0, 2, 3, 1])
with tempfile.NamedTemporaryFile() as f:
request.urlretrieve(URLS['cifar10'], f.name)
tar = tarfile.open(fileobj=f)
train_data_batches, train_data_labels = [], []
for batch in range(1, 6):
data_dict = scipy.io.loadmat(tar.extractfile('cifar-10-batches-mat/data_batch_{}.mat'.format(batch)))
train_data_batches.append(data_dict['data'])
train_data_labels.append(data_dict['labels'].flatten())
train_set = {'images': np.concatenate(train_data_batches, axis=0),
'labels': np.concatenate(train_data_labels, axis=0)}
data_dict = scipy.io.loadmat(tar.extractfile('cifar-10-batches-mat/test_batch.mat'))
test_set = {'images': data_dict['data'],
'labels': data_dict['labels'].flatten()}
train_set['images'] = _encode_png(unflatten(train_set['images']))
test_set['images'] = _encode_png(unflatten(test_set['images']))
return dict(train=train_set, test=test_set)
def _load_cifar100():
def unflatten(images):
return np.transpose(images.reshape((images.shape[0], 3, 32, 32)), [0, 2, 3, 1])
with tempfile.NamedTemporaryFile() as f:
request.urlretrieve(URLS['cifar100'], f.name)
tar = tarfile.open(fileobj=f)
data_dict = scipy.io.loadmat(tar.extractfile('cifar-100-matlab/train.mat'))
train_set = {'images': data_dict['data'],
'labels': data_dict['fine_labels'].flatten()}
data_dict = scipy.io.loadmat(tar.extractfile('cifar-100-matlab/test.mat'))
test_set = {'images': data_dict['data'],
'labels': data_dict['fine_labels'].flatten()}
train_set['images'] = _encode_png(unflatten(train_set['images']))
test_set['images'] = _encode_png(unflatten(test_set['images']))
return dict(train=train_set, test=test_set)
def _load_domainnet(domain: str, size: int) -> dict:
assert domain in ('clipart', 'infograph', 'painting', 'quickdraw', 'real', 'sketch')
path = os.path.join(DOWNLOAD_DIR, 'DomainNet')
os.makedirs(path, exist_ok=True)
prefixes = '', 'txt/', 'txt/'
suffixes = '.zip', '_train.txt', '_test.txt'
files = [os.path.join(path, f'{domain}{suffix}') for suffix in suffixes]
for f, prefix, suffix in zip(files, prefixes, suffixes):
if not os.path.exists(f):
print(f'Downloading {URLS["domainnet"][domain] % (prefix, suffix)}')
request.urlretrieve(URLS['domainnet'][domain] % (prefix, suffix), f)
train = [(k, int(v)) for k, v in [x.split() for x in open(files[1], 'r').readlines()]]
test = [(k, int(v)) for k, v in [x.split() for x in open(files[2], 'r').readlines()]]
zipped = zipfile.ZipFile(files[0])
image = {}
for info in tqdm(zipped.infolist(), 'Resizing images', leave=False):
if info.is_dir():
continue
with zipped.open(info) as f:
x = np.array(_image_resize(Image.open(f), size))
image[info.filename] = to_png(x)
np.random.seed(0)
np.random.shuffle(train)
return dict(all=dict(images=[image[k] for k, _ in train + test], labels=np.array([v for _, v in train + test])),
test=dict(images=[image[k] for k, _ in test], labels=np.array([v for _, v in test])),
train=dict(images=[image[k] for k, _ in train], labels=np.array([v for _, v in train])))
def _load_mnist():
image_filename = '{}-images-idx3-ubyte.gz'
label_filename = '{}-labels-idx1-ubyte.gz'
split_files = [('train', 'train'), ('test', 't10k')]
splits = {}
for split, split_file in split_files:
with tempfile.NamedTemporaryFile() as f:
url = URLS['mnist'].format(image_filename.format(split_file))
print(url)
request.urlretrieve(url, f.name)
with gzip.GzipFile(fileobj=f, mode='r') as data:
assert _read32(data) == 2051
n_images = _read32(data)
row = _read32(data)
col = _read32(data)
images = np.frombuffer(data.read(n_images * row * col), dtype=np.uint8)
images = images.reshape((n_images, row, col, 1))
with tempfile.NamedTemporaryFile() as f:
request.urlretrieve(URLS['mnist'].format(label_filename.format(split_file)), f.name)
with gzip.GzipFile(fileobj=f, mode='r') as data:
assert _read32(data) == 2049
n_labels = _read32(data)
labels = np.frombuffer(data.read(n_labels), dtype=np.uint8)
splits[split] = {'images': _encode_png(images), 'labels': labels}
return splits
def _load_mnist32():
image_filename = '{}-images-idx3-ubyte.gz'
label_filename = '{}-labels-idx1-ubyte.gz'
split_files = [('train', 'train'), ('test', 't10k')]
splits = {}
for split, split_file in split_files:
with tempfile.NamedTemporaryFile() as f:
url = URLS['mnist'].format(image_filename.format(split_file))
print(url)
request.urlretrieve(url, f.name)
with gzip.GzipFile(fileobj=f, mode='r') as data:
assert _read32(data) == 2051
n_images = _read32(data)
row = _read32(data)
col = _read32(data)
images = np.frombuffer(data.read(n_images * row * col), dtype=np.uint8)
images = images.reshape((n_images, row, col, 1))
# Pad 2x2 so that it becomes 32x32
images_pad = np.zeros((images.shape[0],
images.shape[1] + 4,
images.shape[2] + 4,
images.shape[3])).astype(np.uint8)
images_pad[:, 2:-2, 2:-2, :] = images
with tempfile.NamedTemporaryFile() as f:
request.urlretrieve(URLS['mnist'].format(label_filename.format(split_file)), f.name)
with gzip.GzipFile(fileobj=f, mode='r') as data:
assert _read32(data) == 2049
n_labels = _read32(data)
labels = np.frombuffer(data.read(n_labels), dtype=np.uint8)
splits[split] = {'images': _encode_png(images_pad), 'labels': labels}
return splits
def _load_mnistm():
with tempfile.NamedTemporaryFile() as f:
request.urlretrieve(URLS['mnistm'], f.name)
tar = tarfile.open(fileobj=f)
splits = {}
for split in ['train', 'test']:
prefix = f'mnist_m/mnist_m_{split}'
img_list = tar.extractfile(f'{prefix}_labels.txt').readlines()
images = []
labels = []
for img_path in tqdm(img_list, f'Loading mnistm {split} images and labels', leave=False):
images.append(np.array(Image.open(tar.extractfile(os.path.join(
prefix, img_path.split()[0].decode('utf-8'))))))
labels.append(int(img_path.split()[1].decode('utf-8')))
images = np.stack(images, axis=0)
splits[split] = {'images': _encode_png(images), 'labels': labels}
return splits
def _load_syndigit():
splits = {}
for split in ['train', 'test']:
filename = 'synth_{}_32x32.mat'.format(split)
if not os.path.exists(filename):
wget.download(URLS['syndigit'].format(split), out=filename)
data_dict = scipy.io.loadmat(filename)
images = np.transpose(data_dict['X'], (3, 0, 1, 2))
labels = data_dict['y'].flatten()
splits[split] = {'images': _encode_png(images), 'labels': labels}
return splits
def _load_usps():
def _hdf5(path, data_key = "data", target_key = "target", flatten = True):
"""
loads data from hdf5:
- hdf5 should have 'train' and 'test' groups
- each group should have 'data' and 'target' dataset or spcify the key
- flatten means to flatten images N * (C * H * W) as N * D array
code from: https://www.kaggle.com/bistaumanga/usps-getting-started?scriptVersionId=3215146&cellId=3
"""
with h5py.File(path, 'r') as hf:
train = hf.get('train')
X_tr = train.get(data_key)[:]
y_tr = train.get(target_key)[:]
test = hf.get('test')
X_te = test.get(data_key)[:]
y_te = test.get(target_key)[:]
if flatten:
X_tr = X_tr.reshape(X_tr.shape[0], reduce(lambda a, b: a * b, X_tr.shape[1:]))
X_te = X_te.reshape(X_te.shape[0], reduce(lambda a, b: a * b, X_te.shape[1:]))
return X_tr, y_tr, X_te, y_te
filename = 'usps.h5'
if not os.path.exists(filename):
wget.download(URLS['usps'], out=filename)
X_tr, y_tr, X_te, y_te = _hdf5(filename)
X_tr = np.concatenate([(255.0 * X_tr).astype(np.uint8).reshape(-1, 16, 16, 1)] * 3, axis=-1)
X_tr = np.stack([np.array(_image_resize(Image.fromarray(x), 32)) for x in X_tr], axis=0)
X_te = np.concatenate([(255.0 * X_te).astype(np.uint8).reshape(-1, 16, 16, 1)] * 3, axis=-1)
X_te = np.stack([np.array(_image_resize(Image.fromarray(x), 32)) for x in X_te], axis=0)
splits = {'train': {'images': _encode_png(X_tr), 'labels': y_tr},
'test': {'images': _encode_png(X_te), 'labels': y_te}}
return splits
def _load_digitfive(domain: str, size: int) -> dict:
assert size == 32
assert domain in 'mnist svhn usps mnistm syndigit'.split()
if domain == 'mnist':
return _load_mnist32()
elif domain == 'svhn':
return _load_svhn()
elif domain == 'usps':
return _load_usps()
elif domain == 'mnistm':
return _load_mnistm()
elif domain == 'syndigit':
return _load_syndigit()
def _load_office31(domain: str, size: int) -> dict:
assert domain in 'amazon dslr webcam'.split()
path = os.path.join(DOWNLOAD_DIR, 'office31_images.tgz')
if not os.path.exists(path):
gdd.download_file_from_google_drive(file_id=URLS['office31']['images'], dest_path=path, overwrite=True)
if b'Quota exceeded' in open(path, 'rb').read(1024):
os.remove(path)
raise FileNotFoundError('Quota exceeded: File office31_images.tgz for Office31 could not be downloaded from'
' Google drive. Try again later.')
data = collections.defaultdict(list)
with tarfile.open(name=path, mode='r:gz') as tar:
for entry in tar.getmembers():
domain_, _, class_, name = entry.name.split('/')
if domain == domain_:
data[class_].append((class_, name, entry))
np.random.seed(0)
train, test = [], []
for class_ in data.keys():
np.random.shuffle(data[class_])
total_num_frames = len(data[class_])
num_train_frames = int(0.8*total_num_frames)
train_frames = data[class_][:num_train_frames]
test_frames = data[class_][num_train_frames:]
assert len(train_frames) + len(test_frames) == total_num_frames
train += train_frames
test += test_frames
train_images, train_labels, train_label_set = [], [], set()
for class_, name, entry in tqdm(train, leave=False, desc='Resizing train images'):
train_images.append(np.array(_image_resize(Image.open(tar.extractfile(entry)), size)))
assert train_images[-1].shape == (size, size, 3)
train_labels.append(class_)
train_label_set.add(class_)
train_label_id = {x: p for p, x in enumerate(sorted(train_label_set))}
test_images, test_labels, test_label_set = [], [], set()
for class_, name, entry in tqdm(test, leave=False, desc='Resizing train images'):
test_images.append(np.array(_image_resize(Image.open(tar.extractfile(entry)), size)))
assert test_images[-1].shape == (size, size, 3)
test_labels.append(class_)
test_label_set.add(class_)
test_label_id = {x: p for p, x in enumerate(sorted(test_label_set))}
return dict(train=dict(images=_encode_png(np.stack(train_images)),
labels=np.array([train_label_id[x] for x in train_labels], 'int32')),
test=dict(images=_encode_png(np.stack(test_images)),
labels=np.array([test_label_id[x] for x in test_labels], 'int32')))
def _load_svhn():
splits = collections.OrderedDict()
for split in ['train', 'test', 'extra']:
with tempfile.NamedTemporaryFile() as f:
request.urlretrieve(URLS['svhn'].format(split), f.name)
data_dict = scipy.io.loadmat(f.name)
dataset = {}
dataset['images'] = np.transpose(data_dict['X'], [3, 0, 1, 2])
dataset['images'] = _encode_png(dataset['images'])
dataset['labels'] = data_dict['y'].reshape((-1))
# SVHN raw data uses labels from 1 to 10; use 0 to 9 instead.
dataset['labels'] %= 10 # Label number 10 is for 0.
splits[split] = dataset
return splits
def _read32(data):
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(data.read(4), dtype=dt)[0]
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _save_as_tfrecord(data, filename):
assert len(data['images']) == len(data['labels'])
filename = os.path.join(libml_data.DATA_DIR, filename + '.tfrecord')
print('Saving dataset:', filename)
with tf.io.TFRecordWriter(filename) as writer:
for x in trange(len(data['images']), desc='Building records'):
feat = dict(image=_bytes_feature(data['images'][x]),
label=_int64_feature(data['labels'][x]))
record = tf.train.Example(features=tf.train.Features(feature=feat))
writer.write(record.SerializeToString())
print('Saved:', filename)
def _is_installed(name, checksums):
for subset, checksum in checksums.items():
filename = os.path.join(libml_data.DATA_DIR, '%s-%s.tfrecord' % (name, subset))
if not tf.io.gfile.exists(filename):
return False
return True
def _save_files(files, *args, **kwargs):
del args, kwargs
for folder in frozenset(os.path.dirname(x) for x in files):
tf.io.gfile.makedirs(os.path.join(libml_data.DATA_DIR, folder))
for filename, contents in files.items():
with tf.io.gfile.GFile(os.path.join(libml_data.DATA_DIR, filename), 'w') as f:
f.write(contents)
def _is_installed_folder(name, folder):
return tf.io.gfile.exists(os.path.join(libml_data.DATA_DIR, name, folder))
CONFIGS = {
'cifar10': dict(loader=_load_cifar10, checksums=dict(train=None, test=None)),
'cifar100': dict(loader=_load_cifar100, checksums=dict(train=None, test=None)),
'mnist': dict(loader=_load_mnist, checksums=dict(train=None, test=None)),
'svhn': dict(loader=_load_svhn, checksums=dict(train=None, test=None, extra=None)),
}
CONFIGS.update({
f'domainnet{size}_{domain}': dict(loader=partial(_load_domainnet, domain=domain, size=size),
checksums=dict(train=None, test=None, all=None))
for size, domain in
itertools.product((32, 64, 128, 224), 'clipart infograph painting quickdraw real sketch'.split())
})
CONFIGS.update({
f'office31{size}_{domain}': dict(loader=partial(_load_office31, domain=domain, size=size),
checksums=dict(train=None))
for size, domain in itertools.product((32, 64, 128, 224), 'amazon dslr webcam'.split())
})
CONFIGS.update({
f'digitfive{size}_{domain}': dict(loader=partial(_load_digitfive, domain=domain, size=size),
checksums=dict(train=None))
for size, domain in itertools.product((32,), 'mnist svhn usps mnistm syndigit'.split())
})
def main(argv):
if len(argv[1:]):
subset = set(argv[1:])
else:
subset = set(CONFIGS.keys())
tf.io.gfile.makedirs(libml_data.DATA_DIR)
for name in subset:
assert name in CONFIGS, f'Dataset not recognized {name}'
for name, config in CONFIGS.items():
if name not in subset:
continue
if 'is_installed' in config:
if config['is_installed']():
print('Skipping already installed:', name)
continue
elif _is_installed(name, config['checksums']):
print('Skipping already installed:', name)
continue
print('Preparing', name)
datas = config['loader']()
saver = config.get('saver', _save_as_tfrecord)
for sub_name, data in datas.items():
if sub_name == 'readme':
filename = os.path.join(libml_data.DATA_DIR, '%s-%s.txt' % (name, sub_name))
with tf.io.gfile.GFile(filename, 'w') as f:
f.write(data)
elif sub_name == 'files':
for file_and_data in data:
path = os.path.join(libml_data.DATA_DIR, file_and_data.filename)
with tf.io.gfile.GFile(path, "wb") as f:
f.write(file_and_data.data)
else:
saver(data, '%s-%s' % (name, sub_name))
if __name__ == '__main__':
app.run(main)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import app
from absl import flags
from fully_supervised.libml.data.fsl import DATASETS
FLAGS = flags.FLAGS
def main(argv):
del argv # Unused.
dataset = DATASETS()[FLAGS.dataset]()
train = dataset.labeled.repeat().shuffle(FLAGS.shuffle).parse().augment().batch(64).prefetch(16)
for it in train:
print(it)
break
if __name__ == '__main__':
app.run(main)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.