python_code
stringlengths 0
780k
| repo_name
stringlengths 7
38
| file_path
stringlengths 5
103
|
---|---|---|
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Date processing functions."""
import numpy as np
def date_num_bins(date_min, date_max, date_interval, unknown_bin=True):
num_bins = (date_max - date_min - 1) // date_interval
if unknown_bin:
num_bins += 1 # +1 for unk
return num_bins
def date_to_bin(date_cur, date_min, date_max, date_interval, date_bins):
if date_cur >= date_min and date_cur < date_max:
date_bin = np.digitize(
date_cur,
list(range(date_min + date_interval, date_max, date_interval)))
else:
date_bin = date_bins - 1
return date_bin
def bin_to_date(date_cur_bin, date_min, date_interval):
return date_min + date_cur_bin * date_interval + date_interval // 2
def date_range_to_dist(date_min_cur,
date_max_cur,
date_min,
date_max,
date_interval,
date_bins,
return_logits=True):
"""Converts a date range to a uniform distribution."""
dist = np.zeros(date_bins)
if (date_min_cur and date_max_cur and date_min_cur >= date_min and
date_max_cur < date_max and date_min_cur <= date_max_cur):
date_min_cur_bin = date_to_bin(date_min_cur, date_min, date_max,
date_interval, date_bins)
date_max_cur_bin = date_to_bin(date_max_cur, date_min, date_max,
date_interval, date_bins)
else:
date_min_cur_bin = date_bins - 1
date_max_cur_bin = date_bins - 1
date_bins_cur = date_max_cur_bin - date_min_cur_bin + 1
dist[date_min_cur_bin:date_max_cur_bin + 1] = 1. / date_bins_cur
if return_logits:
eps = 1e-6
dist = np.clip(dist, eps, 1. - eps)
dist = np.log(dist)
return dist
| ithaca-main | ithaca/util/dates.py |
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Eval utils."""
from typing import List, NamedTuple
import jax
import jax.numpy as jnp
import numpy as np
from .text import idx_to_text
from .text import text_to_idx
from .text import text_to_word_idx
import tqdm
def date_loss_l1(pred, target_min, target_max):
"""L1 loss function for dates."""
loss = 0.
loss += np.abs(pred - target_min) * np.less(pred, target_min).astype(
pred.dtype)
loss += np.abs(pred - target_max) * np.greater(pred, target_max).astype(
pred.dtype)
return loss
def grad_to_saliency_char(gradient_char, text_char_onehot, text_len, alphabet):
"""Generates saliency map."""
saliency_char = np.linalg.norm(gradient_char, axis=2)[0, :text_len[0]]
text_char = np.array(text_char_onehot).argmax(axis=-1)
idx_mask = np.logical_or(
text_char[0, :text_len[0]] > alphabet.alphabet_end_idx,
text_char[0, :text_len[0]] < alphabet.alphabet_start_idx)
idx_unmask = np.logical_not(idx_mask)
saliency_char_tmp = saliency_char.copy()
saliency_char_tmp[idx_mask] = 0.
if idx_unmask.any():
saliency_char_tmp[idx_unmask] = (saliency_char[idx_unmask] -
saliency_char[idx_unmask].min()) / (
saliency_char[idx_unmask].max() -
saliency_char[idx_unmask].min() + 1e-8)
return saliency_char_tmp
def grad_to_saliency_word(gradient_word, text_word_onehot, text_len, alphabet):
"""Generates saliency map."""
saliency_word = np.linalg.norm(gradient_word, axis=2)[0, :text_len[0]]
text_word = np.array(text_word_onehot).argmax(axis=-1)
saliency_word = saliency_word.copy()
start_idx = None
for i in range(text_len[0]):
if text_word[0, i] == alphabet.unk_idx:
if start_idx is not None:
saliency_word[start_idx:i] = np.sum(saliency_word[start_idx:i])
start_idx = None
elif start_idx is None:
start_idx = i
idx_mask = text_word[0, :text_len[0]] == alphabet.unk_idx
idx_unmask = np.logical_not(idx_mask)
saliency_word_tmp = saliency_word.copy()
saliency_word_tmp[idx_mask] = 0.
if idx_unmask.any():
saliency_word_tmp[idx_unmask] = (
saliency_word[idx_unmask] - saliency_word[idx_unmask].min())
saliency_word_tmp[idx_unmask] = saliency_word_tmp[idx_unmask] / (
saliency_word[idx_unmask].max() - saliency_word[idx_unmask].min() +
1e-8)
return saliency_word_tmp
def softmax(x, axis=-1):
"""Compute softmax values for each sets of scores in x."""
unnormalized = np.exp(x - x.max(axis, keepdims=True))
return unnormalized / unnormalized.sum(axis, keepdims=True)
def log_softmax(x, axis=-1):
"""Log-Softmax function."""
shifted = x - x.max(axis, keepdims=True)
return shifted - np.log(np.sum(np.exp(shifted), axis, keepdims=True))
def nucleus_sample_inner(logits, top_p=0.95, temp=1.0):
"""Samples from the most likely tokens whose probability sums to top_p."""
sorted_logits = np.sort(logits)
sorted_probs = softmax(sorted_logits)
threshold_idx = np.argmax(np.cumsum(sorted_probs, -1) >= 1 - top_p)
threshold_largest_logits = sorted_logits[..., [threshold_idx]]
assert threshold_largest_logits.shape == logits.shape[:-1] + (1,)
mask = logits >= threshold_largest_logits
logits += (1 - mask) * -1e12 # Set unused logits to -inf.
logits /= np.maximum(temp, 1e-12)
return logits
class BeamEntry(NamedTuple):
text_pred: str
mask_idx: int
pred_len: int
pred_logprob: float
def beam_search_batch_2d(forward,
alphabet,
text_pred,
mask_idx,
rng=None,
beam_width=20,
temperature=1.,
nucleus=False,
nucleus_top_p=0.8,
display_progress=False) -> List[BeamEntry]:
"""Non-sequential beam search."""
beam = [BeamEntry(text_pred, mask_idx, 0, 0.)]
beam_top = {}
text_len = len(text_pred.rstrip(alphabet.pad))
# Initialise tqdm bar
if display_progress:
pbar = tqdm.tqdm(total=len(mask_idx))
while beam:
beam_tmp = []
beam_batch = []
text_chars = []
text_words = []
for text_pred, mask_idx, pred_len, pred_logprob in beam:
mask_idx = mask_idx.copy() # pytype: disable=attribute-error # strict_namedtuple_checks
text_char = text_to_idx(text_pred, alphabet).reshape(1, -1)
text_word = text_to_word_idx(text_pred, alphabet).reshape(1, -1)
text_chars.append(text_char)
text_words.append(text_word)
beam_batch.append(BeamEntry(text_pred, mask_idx, pred_len, pred_logprob))
text_chars = np.vstack(text_chars)
text_words = np.vstack(text_words)
_, _, mask_logits, _ = forward(
text_char=text_chars,
text_word=text_words,
text_char_onehot=None,
text_word_onehot=None,
rngs={'dropout': rng},
is_training=False)
mask_logits = mask_logits / temperature
mask_logits = np.array(mask_logits)
for batch_i in range(mask_logits.shape[0]):
text_pred, mask_idx, pred_len, pred_logprob = beam_batch[batch_i]
mask_logprob = log_softmax(mask_logits)[batch_i, :text_len]
mask_pred = softmax(mask_logits)[batch_i, :text_len]
mask_pred_argmax = np.dstack(
np.unravel_index(np.argsort(-mask_pred.ravel()), mask_pred.shape))[0]
# Keep only predictions for mask
for i in range(mask_pred_argmax.shape[0]):
if (mask_pred_argmax[i][0] in mask_idx and # pytype: disable=unsupported-operands # strict_namedtuple_checks
(mask_pred_argmax[i][1] == alphabet.char2idx[alphabet.space] or
(mask_pred_argmax[i][1] >= alphabet.alphabet_start_idx and
mask_pred_argmax[i][1] <=
alphabet.char2idx[alphabet.punctuation[-1]]))):
text_char_i = text_chars.copy()
text_char_i[batch_i, mask_pred_argmax[i][0]] = mask_pred_argmax[i][1]
text_pred_i = idx_to_text(
text_char_i[batch_i], alphabet, strip_sos=False, strip_pad=False)
mask_idx_i = mask_idx.copy() # pytype: disable=attribute-error # strict_namedtuple_checks
mask_idx_i.remove(mask_pred_argmax[i][0])
if nucleus:
mask_logits_i = mask_logits[batch_i, mask_pred_argmax[i][0]]
mask_logits_i = nucleus_sample_inner(mask_logits_i, nucleus_top_p)
mask_logprob_i = log_softmax(mask_logits_i)
# Skip expanding the beam if logprob too small
if mask_logits_i[mask_pred_argmax[i][1]] < -1e12:
continue
pred_logprob_i = pred_logprob + mask_logprob_i[mask_pred_argmax[i]
[1]]
else:
pred_logprob_i = pred_logprob + mask_logprob[mask_pred_argmax[i][0],
mask_pred_argmax[i][1]]
if not mask_idx_i:
if (text_pred_i
not in beam_top) or (text_pred_i in beam_top and
beam_top[text_pred_i][3] > pred_logprob_i):
beam_top[text_pred_i] = BeamEntry(text_pred_i, mask_idx_i,
pred_len + 1, pred_logprob_i)
else:
beam_tmp.append(
BeamEntry(text_pred_i, mask_idx_i, pred_len + 1,
pred_logprob_i))
# order all candidates by score
beam_tmp_kv = {}
for text_pred, mask_idx, pred_len, pred_logprob in beam_tmp:
if (text_pred not in beam_tmp_kv) or (
text_pred in beam_tmp_kv and
beam_tmp_kv[text_pred].pred_logprob > pred_logprob):
beam_tmp_kv[text_pred] = BeamEntry(text_pred, mask_idx, pred_len,
pred_logprob)
beam_tmp = sorted(
beam_tmp_kv.values(),
key=lambda entry: entry.pred_logprob,
reverse=True)
# select k best
beam = beam_tmp[:beam_width]
# update progress bar
if display_progress:
pbar.update(1)
# order all candidates by score
return sorted(
beam_top.values(), key=lambda entry: entry.pred_logprob,
reverse=True)[:beam_width]
def beam_search_batch_1d(forward,
alphabet,
text_pred,
mask_idx,
rng=None,
beam_width=20,
temperature=1.,
nucleus=False,
nucleus_top_p=0.8,
display_progress=False) -> List[BeamEntry]:
"""Sequential beam search."""
beam = [BeamEntry(text_pred, mask_idx, 0, 0.)]
beam_top = {}
# Initialise tqdm bar
if display_progress:
pbar = tqdm.tqdm(total=len(mask_idx))
while beam:
beam_tmp = []
beam_batch = []
text_chars = []
text_words = []
for text_pred, mask_idx, pred_len, pred_logprob in beam:
mask_idx = mask_idx.copy() # pytype: disable=attribute-error # strict_namedtuple_checks
text_char = text_to_idx(text_pred, alphabet).reshape(1, -1)
text_word = text_to_word_idx(text_pred, alphabet).reshape(1, -1)
text_chars.append(text_char)
text_words.append(text_word)
beam_batch.append(BeamEntry(text_pred, mask_idx, pred_len, pred_logprob))
text_chars = np.vstack(text_chars)
text_words = np.vstack(text_words)
_, _, mask_logits, _ = forward(
text_char=text_chars,
text_word=text_words,
text_char_onehot=None,
text_word_onehot=None,
rngs={'dropout': rng},
is_training=False)
mask_logits = mask_logits / temperature
mask_logits = np.array(mask_logits)
for batch_i in range(mask_logits.shape[0]):
text_pred, mask_idx, pred_len, pred_logprob = beam_batch[batch_i]
mask_logits_i = mask_logits[batch_i, mask_idx[0]] # pytype: disable=unsupported-operands # strict_namedtuple_checks
if nucleus:
mask_logits_i = nucleus_sample_inner(mask_logits_i, nucleus_top_p)
mask_logprob = log_softmax(mask_logits_i)
# Keep only predictions for mask
alphabet_chars = [alphabet.char2idx[alphabet.space]]
alphabet_chars += list(
range(alphabet.alphabet_start_idx,
alphabet.char2idx[alphabet.punctuation[-1]]))
for char_i in alphabet_chars:
# Skip expanding the beam if logprob too small
if nucleus and mask_logits_i[char_i] < -1e12:
continue
text_char_i = text_chars.copy()
text_char_i[batch_i, mask_idx[0]] = char_i # pytype: disable=unsupported-operands # strict_namedtuple_checks
text_pred_i = idx_to_text(
text_char_i[batch_i], alphabet, strip_sos=False, strip_pad=False)
mask_idx_i = mask_idx.copy() # pytype: disable=attribute-error # strict_namedtuple_checks
mask_idx_i.pop(0)
pred_logprob_i = pred_logprob + mask_logprob[char_i]
if not mask_idx_i:
if (text_pred_i
not in beam_top) or (text_pred_i in beam_top and
beam_top[text_pred_i][3] > pred_logprob_i):
beam_top[text_pred_i] = BeamEntry(text_pred_i, mask_idx_i,
pred_len + 1, pred_logprob_i)
else:
beam_tmp.append(
BeamEntry(text_pred_i, mask_idx_i, pred_len + 1, pred_logprob_i))
# order all candidates by score
beam_tmp_kv = {}
for text_pred, mask_idx, pred_len, pred_logprob in beam_tmp:
if (text_pred
not in beam_tmp_kv) or (text_pred in beam_tmp_kv and
beam_tmp_kv[text_pred][3] > pred_logprob):
beam_tmp_kv[text_pred] = BeamEntry(text_pred, mask_idx, pred_len,
pred_logprob)
beam_tmp = sorted(
beam_tmp_kv.values(),
key=lambda entry: entry.pred_logprob,
reverse=True)
# select k best
beam = beam_tmp[:beam_width]
# update progress bar
if display_progress:
pbar.update(1)
# order all candidates by score
return sorted(
beam_top.values(), key=lambda entry: entry.pred_logprob,
reverse=True)[:beam_width]
def saliency_loss_subregion(forward,
text_char_emb,
text_word_emb,
padding,
rng,
subregion=None):
"""Saliency map for subregion."""
_, subregion_logits, _, _ = forward(
text_char_emb=text_char_emb,
text_word_emb=text_word_emb,
padding=padding,
rngs={'dropout': rng},
is_training=False)
if subregion is None:
subregion = subregion_logits.argmax(axis=-1)[0]
return subregion_logits[0, subregion]
def saliency_loss_date(forward, text_char_emb, text_word_emb, padding, rng):
"""saliency_loss_date."""
date_pred, _, _, _ = forward(
text_char_emb=text_char_emb,
text_word_emb=text_word_emb,
padding=padding,
rngs={'dropout': rng},
is_training=False)
date_pred_argmax = date_pred.argmax(axis=-1)
return date_pred[0, date_pred_argmax[0]]
def predicted_dates(date_pred_probs, date_min, date_max, date_interval):
"""Returns mode and mean prediction."""
date_years = np.arange(date_min + date_interval / 2,
date_max + date_interval / 2, date_interval)
# Compute mode:
date_pred_argmax = (
date_pred_probs.argmax() * date_interval + date_min + date_interval // 2)
# Compute mean:
date_pred_avg = np.dot(date_pred_probs, date_years)
return date_pred_argmax, date_pred_avg
def compute_attribution_saliency_maps(text_char,
text_word,
text_len,
padding,
forward,
params,
rng,
alphabet,
vocab_char_size,
vocab_word_size,
subregion_loss_kwargs=None):
"""Compute saliency maps for subregions and dates."""
if subregion_loss_kwargs is None:
subregion_loss_kwargs = {}
# Get saliency gradients
dtype = params['params']['char_embeddings']['embedding'].dtype
text_char_onehot = jax.nn.one_hot(text_char, vocab_char_size).astype(dtype)
text_word_onehot = jax.nn.one_hot(text_word, vocab_word_size).astype(dtype)
text_char_emb = jnp.matmul(text_char_onehot,
params['params']['char_embeddings']['embedding'])
text_word_emb = jnp.matmul(text_word_onehot,
params['params']['word_embeddings']['embedding'])
gradient_subregion_char, gradient_subregion_word = jax.grad(
saliency_loss_subregion, (1, 2))(
forward,
text_char_emb,
text_word_emb,
padding,
rng=rng,
**subregion_loss_kwargs)
gradient_date_char, gradient_date_word = jax.grad(saliency_loss_date, (1, 2))(
forward, text_char_emb, text_word_emb, padding=padding, rng=rng)
# Generate saliency maps for subregions
input_grad_subregion_char = np.multiply(gradient_subregion_char,
text_char_emb) # grad x input
input_grad_subregion_word = np.multiply(gradient_subregion_word,
text_word_emb)
grad_char = grad_to_saliency_char(
input_grad_subregion_char,
text_char_onehot,
text_len=text_len,
alphabet=alphabet)
grad_word = grad_to_saliency_word(
input_grad_subregion_word,
text_word_onehot,
text_len=text_len,
alphabet=alphabet)
subregion_saliency = np.clip(grad_char + grad_word, 0, 1)
# Generate saliency maps for dates
input_grad_date_char = np.multiply(gradient_date_char,
text_char_emb) # grad x input
input_grad_date_word = np.multiply(gradient_date_word, text_word_emb)
grad_char = grad_to_saliency_char(
input_grad_date_char,
text_char_onehot,
text_len=text_len,
alphabet=alphabet)
grad_word = grad_to_saliency_word(
input_grad_date_word,
text_word_onehot,
text_len=text_len,
alphabet=alphabet)
date_saliency = np.clip(grad_char + grad_word, 0, 1)
return date_saliency, subregion_saliency
def saliency_loss_mask(forward, text_char_emb, text_word_emb, padding, rng,
char_pos, char_idx):
"""Saliency map for mask."""
_, _, mask_logits, _ = forward(
text_char_emb=text_char_emb,
text_word_emb=text_word_emb,
text_char_onehot=None,
text_word_onehot=None,
padding=padding,
rngs={'dropout': rng},
is_training=False)
return mask_logits[0, char_pos, char_idx]
class SequentialRestorationSaliencyResult(NamedTuple):
text: str # predicted text string so far
pred_char_pos: int # newly restored character's position
saliency_map: np.ndarray # saliency map for the newly added character
def sequential_restoration_saliency(text_str, text_len, forward, params,
alphabet, mask_idx, vocab_char_size,
vocab_word_size):
"""Greedily, non-sequentially restores, producing per-step saliency maps."""
text_len = text_len[0] if not isinstance(text_len, int) else text_len
rng = jax.random.PRNGKey(0) # dummy, no randomness in model
mask_idx = set(mask_idx)
while mask_idx:
text_char = text_to_idx(text_str, alphabet).reshape(1, -1)
padding = jnp.where(text_char > 0, 1, 0)
text_word = text_to_word_idx(text_str, alphabet).reshape(1, -1)
_, _, mask_logits, _ = forward(
text_char=text_char,
text_word=text_word,
text_char_onehot=None,
text_word_onehot=None,
rngs={'dropout': rng},
is_training=False)
mask_pred = jax.nn.softmax(mask_logits)[0, :text_len]
mask_pred_argmax = np.dstack(
np.unravel_index(np.argsort(-mask_pred.ravel()), mask_pred.shape))[0]
# Greedily, non-sequentially take the next highest probability prediction
# out of the characters that are to be restored
for i in range(mask_pred_argmax.shape[0]):
pred_char_pos, pred_char_idx = mask_pred_argmax[i]
if pred_char_pos in mask_idx:
break
# Update sequence
text_char[0, pred_char_pos] = pred_char_idx
text_str = idx_to_text(
text_char[0], alphabet, strip_sos=False, strip_pad=False)
mask_idx.remove(pred_char_pos)
# Gradients for saliency map
text_char_onehot = jax.nn.one_hot(text_char,
vocab_char_size).astype(jnp.float32)
text_word_onehot = jax.nn.one_hot(text_word,
vocab_word_size).astype(jnp.float32)
text_char_emb = jnp.matmul(text_char_onehot,
params['params']['char_embeddings']['embedding'])
text_word_emb = jnp.matmul(text_word_onehot,
params['params']['word_embeddings']['embedding'])
gradient_mask_char, gradient_mask_word = jax.grad(
saliency_loss_mask, (1, 2))(
forward,
text_char_emb,
text_word_emb,
padding,
rng=rng,
char_pos=pred_char_pos,
char_idx=pred_char_idx)
# Use gradient x input for visualizing saliency
input_grad_mask_char = np.multiply(gradient_mask_char, text_char_emb)
input_grad_mask_word = np.multiply(gradient_mask_word, text_word_emb)
# Return visualization-ready saliency maps
saliency_map = grad_to_saliency_char(
np.clip(input_grad_mask_char + input_grad_mask_word, 0, 1),
text_char_onehot, [text_len], alphabet) # normalize, etc.
result_text = idx_to_text(text_char[0], alphabet, strip_sos=False) # no pad
yield SequentialRestorationSaliencyResult(
text=result_text[1:],
pred_char_pos=pred_char_pos - 1,
saliency_map=saliency_map[1:])
| ithaca-main | ithaca/util/eval.py |
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimizer utilities."""
from typing import Any, Callable, NamedTuple, Optional, Sequence, Tuple, Union
import jax
import jax.numpy as jnp
def linear_weight(global_step, start, end):
"""Linear weight increase."""
if end <= 0:
return 1.
t = jnp.maximum(0., global_step - start)
w = t / (end - start)
w = jnp.minimum(w, 1.)
return w
def linear_warmup_and_sqrt_decay(global_step, max_lr, warmup_steps):
"""Linear warmup and then an inverse square root decay of learning rate."""
linear_ratio = max_lr / warmup_steps
decay_ratio = jnp.power(warmup_steps * 1.0, 0.5) * max_lr
return jnp.minimum(linear_ratio * global_step,
decay_ratio * jnp.power(global_step, -0.5))
def create_learning_rate_scheduler(
factors='constant * linear_warmup * rsqrt_decay',
base_learning_rate=0.5,
warmup_steps=1000,
decay_factor=0.5,
steps_per_decay=20000,
steps_per_cycle=100000):
"""Creates learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* rsqrt_normalized_decay: divide by square root of max(step/warmup_steps, 1)
* decay_every: Every k steps decay the learning rate by decay_factor.
* cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.
Args:
factors: string, factors separated by '*' that defines the schedule.
base_learning_rate: float, the starting constant for the lr schedule.
warmup_steps: int, how many steps to warm up for in the warmup schedule.
decay_factor: float, the amount to decay the learning rate by.
steps_per_decay: int, how often to decay the learning rate.
steps_per_cycle: int, steps per cycle when using cosine decay.
Returns:
a function learning_rate(step): float -> {'learning_rate': float}, the
step-dependent lr.
"""
factors = [n.strip() for n in factors.split('*')]
def step_fn(step):
"""Step to learning rate function."""
ret = 1.0
for name in factors:
if name == 'constant':
ret *= base_learning_rate
elif name == 'linear_warmup':
ret *= jnp.minimum(1.0, step / warmup_steps)
elif name == 'rsqrt_decay':
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == 'rsqrt_normalized_decay':
ret *= jnp.sqrt(warmup_steps)
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == 'decay_every':
ret *= (decay_factor**(step // steps_per_decay))
elif name == 'cosine_decay':
progress = jnp.maximum(0.0,
(step - warmup_steps) / float(steps_per_cycle))
ret *= jnp.maximum(0.0,
0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))
else:
raise ValueError('Unknown factor %s.' % name)
return jnp.asarray(ret, dtype=jnp.float32)
return step_fn
# pylint:disable=no-value-for-parameter
OptState = NamedTuple # Transformation states are (possibly empty) namedtuples.
Params = Any # Parameters are arbitrary nests of `jnp.ndarrays`.
Updates = Params # Gradient updates are of the same type as parameters.
class GradientTransformation(NamedTuple):
"""Optax transformations consists of a function pair: (initialise, update)."""
init: Callable[ # Function used to initialise the transformation's state.
[Params], Union[OptState, Sequence[OptState]]]
update: Callable[ # Function used to apply a transformation.
[Updates, OptState, Optional[Params]], Tuple[Updates, OptState]]
class ClipByGlobalNormState(OptState):
"""The `clip_by_global_norm` transformation is stateless."""
def unitwise_norm(x):
"""Computes norms of each output unit separately."""
if len(jnp.squeeze(x).shape) <= 1: # Scalars and vectors
axis = None
keepdims = False
elif len(x.shape) in [2, 3]: # Linear layers of shape IO or multihead linear
axis = 0
keepdims = True
elif len(x.shape) == 4: # Conv kernels of shape HWIO
axis = [0, 1, 2,]
keepdims = True
else:
raise ValueError(f'Got a parameter with shape not in [1, 2, 3, 4]! {x}')
return jnp.sum(x ** 2, axis=axis, keepdims=keepdims) ** 0.5
def unitwise_clip(g_norm, max_norm, grad):
"""Applies gradient clipping unit-wise."""
trigger = g_norm < max_norm
# This little max(., 1e-6) is distinct from the normal eps and just prevents
# division by zero. It technically should be impossible to engage.
clipped_grad = grad * (max_norm / jnp.maximum(g_norm, 1e-6))
return jnp.where(trigger, grad, clipped_grad)
def adaptive_grad_clip(clipping, eps=1e-3) -> GradientTransformation:
"""Clip updates to be at most clipping * parameter_norm, unit-wise.
References:
[Brock, Smith, De, Simonyan 2021] High-Performance Large-Scale Image
Recognition Without Normalization. (https://arxiv.org/abs/2102.06171)
Args:
clipping: Maximum allowed ratio of update norm to parameter norm.
eps: epsilon term to prevent clipping of zero-initialized params.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(_):
return ClipByGlobalNormState()
def update_fn(updates, state, params):
g_norm = jax.tree_map(unitwise_norm, updates)
p_norm = jax.tree_map(unitwise_norm, params)
# Maximum allowable norm
max_norm = jax.tree_map(lambda x: clipping * jnp.maximum(x, eps), p_norm)
# If grad norm > clipping * param_norm, rescale
updates = jax.tree_map(unitwise_clip, g_norm, max_norm, updates)
return updates, state
return GradientTransformation(init_fn, update_fn)
| ithaca-main | ithaca/util/optim.py |
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Alphabet classes."""
import re
import numpy as np
class Alphabet:
"""Generic alphabet class."""
def __init__(self,
alphabet,
numerals='0',
punctuation='.',
space=' ',
missing='-',
pad='#',
unk='^',
sos='<',
sog='[',
eog=']',
wordlist_file=None,
wordlist_size=100000):
self.alphabet = list(alphabet) # alph
self.numerals = list(numerals) # num
self.punctuation = list(punctuation) # punt
self.space = space # spacing
self.missing = missing # missing char
self.pad = pad # padding (spaces to right of string)
self.unk = unk # unknown char
self.sos = sos # start of sentence
self.sog = sog # start of guess
self.eog = eog # end of guess
# Define wordlist mapping
idx2word = [self.pad, self.sos, self.unk]
if wordlist_file:
idx2word += [
w_c.split(';')[0]
for w_c in wordlist_file.read().strip().split('\n')[:wordlist_size]
]
self.idx2word = np.array(idx2word)
self.word2idx = {self.idx2word[i]: i for i in range(len(self.idx2word))}
# Define vocab mapping
self.idx2char = np.array(
[self.pad, self.sos, self.unk, self.space, self.missing] +
self.alphabet + self.numerals + self.punctuation)
self.char2idx = {self.idx2char[i]: i for i in range(len(self.idx2char))}
# Define special character indices
self.pad_idx = self.char2idx[pad]
self.sos_idx = self.char2idx[sos]
self.unk_idx = self.char2idx[unk]
self.alphabet_start_idx = self.char2idx[self.alphabet[0]]
self.alphabet_end_idx = self.char2idx[self.numerals[-1]]
def filter(self, t):
return t
def size_char(self):
return len(self.idx2char)
def size_word(self):
return len(self.idx2word)
class GreekAlphabet(Alphabet):
"""Greek alphabet class."""
def __init__(self, wordlist_file=None, wordlist_size=100000):
greek_alphabet = 'αβγδεζηθικλμνξοπρςστυφχψωϙϛ'
super().__init__(
alphabet=greek_alphabet,
wordlist_file=wordlist_file,
wordlist_size=wordlist_size)
self.tonos_to_oxia = {
# tonos : #oxia
u'\u0386': u'\u1FBB', # capital letter alpha
u'\u0388': u'\u1FC9', # capital letter epsilon
u'\u0389': u'\u1FCB', # capital letter eta
u'\u038C': u'\u1FF9', # capital letter omicron
u'\u038A': u'\u1FDB', # capital letter iota
u'\u038E': u'\u1FF9', # capital letter upsilon
u'\u038F': u'\u1FFB', # capital letter omega
u'\u03AC': u'\u1F71', # small letter alpha
u'\u03AD': u'\u1F73', # small letter epsilon
u'\u03AE': u'\u1F75', # small letter eta
u'\u0390': u'\u1FD3', # small letter iota with dialytika and tonos/oxia
u'\u03AF': u'\u1F77', # small letter iota
u'\u03CC': u'\u1F79', # small letter omicron
u'\u03B0': u'\u1FE3',
# small letter upsilon with dialytika and tonos/oxia
u'\u03CD': u'\u1F7B', # small letter upsilon
u'\u03CE': u'\u1F7D' # small letter omega
}
self.oxia_to_tonos = {v: k for k, v in self.tonos_to_oxia.items()}
def filter(self, t): # override previous filter function
# lowercase
t = t.lower()
# replace dot below
t = t.replace(u'\u0323', '')
# replace perispomeni
t = t.replace(u'\u0342', '')
t = t.replace(u'\u02C9', '')
# replace ending sigma
t = re.sub(r'([\w\[\]])σ(?![\[\]])(\b)', r'\1ς\2', t)
# replace oxia with tonos
for oxia, tonos in self.oxia_to_tonos.items():
t = t.replace(oxia, tonos)
# replace h
h_patterns = {
# input: #target
'ε': 'ἑ',
'ὲ': 'ἓ',
'έ': 'ἕ',
'α': 'ἁ',
'ὰ': 'ἃ',
'ά': 'ἅ',
'ᾶ': 'ἇ',
'ι': 'ἱ',
'ὶ': 'ἳ',
'ί': 'ἵ',
'ῖ': 'ἷ',
'ο': 'ὁ',
'ό': 'ὅ',
'ὸ': 'ὃ',
'υ': 'ὑ',
'ὺ': 'ὓ',
'ύ': 'ὕ',
'ῦ': 'ὗ',
'ὴ': 'ἣ',
'η': 'ἡ',
'ή': 'ἥ',
'ῆ': 'ἧ',
'ὼ': 'ὣ',
'ώ': 'ὥ',
'ω': 'ὡ',
'ῶ': 'ὧ'
}
# iterate by keys
for h_in, h_tar in h_patterns.items():
# look up and replace h[ and h]
t = re.sub(r'ℎ(\[?){}'.format(h_in), r'\1{}'.format(h_tar), t)
t = re.sub(r'ℎ(\]?){}'.format(h_in), r'{}\1'.format(h_tar), t)
# any h left is an ἡ
t = re.sub(r'(\[?)ℎ(\]?)', r'\1ἡ\2', t)
return t
| ithaca-main | ithaca/util/alphabet.py |
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common layers used in models.
This implementation is from the Long Range Arena:
https://github.com/google-research/long-range-arena/tree/main/lra_benchmarks/models/bigbird
"""
# pylint: disable=attribute-defined-outside-init,g-bare-generic
from typing import Any, Callable, Iterable, Optional
from flax import linen as nn
from jax import lax
from jax.nn import initializers
import jax.numpy as jnp
import numpy as np
PRNGKey = Any
Array = Any
Shape = Iterable[int]
Dtype = Any # this could be a real type?
ACTIVATION_FN_DICT = {
'relu': nn.relu,
'gelu': nn.gelu,
}
def grid_restack(all_vecs):
"""Grid restack for meta-performer.
Given multiple sequences (lists) of batch x len x dim,
reshape this such that all positions are side by side.
for example (for illustrative purposes):
inputs: [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]
outputs: [1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12]
Args:
all_vecs: list of sequences of batch x len x dim
Returns:
Array of batch x (length x num_items) x dim.
"""
cat_output = []
for pos in range(all_vecs[0].shape[1]):
pos_vecs = [x[:, None, pos, :] for x in all_vecs]
cat_output += pos_vecs
x2 = jnp.concatenate(cat_output, 1)
return x2
def shift_right(x):
"""Shift the input to the right by padding on axis 1."""
pad_widths = [(0, 0)] * len(x.shape)
pad_widths[1] = (1, 0) # Padding on axis=1
padded = jnp.pad(
x, pad_widths, mode='constant', constant_values=x.dtype.type(0))
return padded[:, :-1]
class Embed(nn.Module):
"""Embedding Module.
A parameterized function from integers [0, n) to d-dimensional vectors.
Attributes:
mode: either 'input' or 'output' -> to share input/output embedding
emb_init: embedding initializer
"""
mode: str = 'input'
emb_init: Callable = nn.initializers.normal(stddev=1.0)
@nn.compact
def __call__(self, inputs, num_embeddings, features):
"""Applies Embed module.
Args:
inputs: input data
num_embeddings: number of embedding
features: size of the embedding dimension
Returns:
output which is embedded input data
"""
embedding = self.param('embedding', self.emb_init,
(num_embeddings, features))
if self.mode == 'input':
if inputs.dtype not in [jnp.int32, jnp.int64, jnp.uint32, jnp.uint64]:
raise ValueError('Input type must be an integer or unsigned integer.')
return jnp.take(embedding, inputs, axis=0)
if self.mode == 'output':
return jnp.einsum('bld,vd->blv', inputs, embedding)
def sinusoidal_init(max_len=2048, replicate_tf=False):
"""1D Sinusoidal Position Embedding Initializer.
Args:
max_len: maximum possible length for the input
replicate_tf: replicate TF periodic encoding exactly
Returns:
output: init function returning `(1, max_len, d_feature)`
"""
def init(key, shape, dtype=np.float32):
"""Sinusoidal init."""
del key, dtype
d_feature = shape[-1]
pe = np.zeros((max_len, d_feature), dtype=np.float32)
position = np.arange(0, max_len)[:, np.newaxis]
if replicate_tf:
half_d_feature = d_feature // 2
div_term = np.exp(
np.arange(half_d_feature) * -(np.log(10000.0) / (half_d_feature - 1)))
pe[:, :half_d_feature] = np.sin(position * div_term)
pe[:, half_d_feature:] = np.cos(position * div_term)
else:
div_term = np.exp(
np.arange(0, d_feature, 2) * -(np.log(10000.0) / d_feature))
pe[:, 0::2] = np.sin(position * div_term)
pe[:, 1::2] = np.cos(position * div_term)
pe = pe[np.newaxis, :, :] # [1, max_len, d_feature]
return jnp.array(pe)
return init
class AddPositionEmbs(nn.Module):
"""Adds (optionally learned) positional embeddings to the inputs.
Attributes:
posemb_init: positional embedding initializer, if None, then use a fixed
(non-learned) sinusoidal embedding table.
max_len: maximum possible length for the input.
replicate_original: replicate original periodic encoding exactly
"""
posemb_init: Optional[Callable] = None
posemb_dim: Optional[int] = None
max_len: int = 512
combine_type: str = 'concat'
replicate_tf: bool = False
@nn.compact
def __call__(self, inputs, inputs_positions=None, cache=None):
"""Applies AddPositionEmbs module.
By default this layer uses a fixed sinusoidal embedding table. If a
learned position embedding is desired, pass an initializer to
posemb_init.
Args:
inputs: input data.
inputs_positions: input position indices for packed sequences.
cache: flax attention cache for fast decoding.
Returns:
output: `(bs, timesteps, in_dim)`
"""
# inputs.shape is (batch_size, seq_len, emb_dim)
assert inputs.ndim == 3, ('Number of dimensions should be 3,'
' but it is: %d' % inputs.ndim)
batch_size = inputs.shape[0]
length = inputs.shape[1]
if self.posemb_dim is None or self.combine_type == 'add':
self.posemb_dim = inputs.shape[-1]
pos_emb_shape = (1, self.max_len, self.posemb_dim)
if self.posemb_init is None:
# Use a fixed (non-learned) sinusoidal position embedding.
pos_embedding = sinusoidal_init(
max_len=self.max_len,
replicate_tf=self.replicate_tf,
)(None, pos_emb_shape, None)
else:
pos_embedding = self.param('pos_embedding', self.posemb_init,
pos_emb_shape)
pe = pos_embedding[:, :length, :]
# We abuse the same attention Cache mechanism to run positional embeddings
# in fast predict mode. We could use state variables instead, but this
# simplifies invocation with a single top-level cache context manager.
# We only use the cache's position index for tracking decoding position.
if cache:
if self.is_initializing():
cache.store(np.array((4, 1, 1), dtype=np.int32))
else:
cache_entry = cache.retrieve(None)
i = cache_entry.i
cache.store(cache_entry.replace(i=cache_entry.i + 1))
_, _, df = pos_embedding.shape
pe = lax.dynamic_slice(pos_embedding, jnp.array((0, i, 0)),
jnp.array((1, 1, df)))
if inputs_positions is None:
# normal unpacked case:
if self.combine_type == 'add':
return inputs + pe
elif self.combine_type == 'concat':
pe_broadcast = np.repeat(pe, batch_size, axis=0)
return lax.concatenate([inputs, pe_broadcast], 2)
else:
raise ValueError('Wrong type value.')
else:
# for packed data we need to use known position indices:
return inputs + jnp.take(pe[0], inputs_positions, axis=0)
class MlpBlock(nn.Module):
"""Transformer MLP block."""
mlp_dim: int
dtype: Any = jnp.float32
out_dim: Optional[int] = None
out_dropout: bool = True
dropout_rate: float = 0.1
deterministic: bool = False
kernel_init: Callable = nn.initializers.xavier_uniform()
bias_init: Callable = nn.initializers.normal(stddev=1e-6)
activation_fn: str = 'gelu'
@nn.compact
def __call__(self, inputs):
"""Applies Transformer MlpBlock module."""
actual_out_dim = inputs.shape[-1] if self.out_dim is None else self.out_dim
x = nn.Dense(
self.mlp_dim,
dtype=self.dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init)(
inputs)
x = ACTIVATION_FN_DICT[self.activation_fn](x)
x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=self.deterministic)
output = nn.Dense(
actual_out_dim,
dtype=self.dtype,
kernel_init=self.kernel_init,
bias_init=self.bias_init)(
x)
if self.out_dropout:
output = nn.Dropout(rate=self.dropout_rate)(
output, deterministic=self.deterministic)
return output
def classifier_head(encoded, num_classes, mlp_dim, pooling_mode='MEAN'):
"""Classifier head.
We put this here just so that all models consistently call the same function.
Args:
encoded: tensor inputs are shape of [bs, len, dim].
num_classes: int, number of classes
mlp_dim: int, dim of intermediate MLP.
pooling_mode: str, string dictating pooling op {MEAN}
Returns:
tensor of shape [bs, num_classes]
"""
if pooling_mode == 'MEAN':
encoded = jnp.mean(encoded, axis=1)
elif pooling_mode == 'SUM':
encoded = jnp.sum(encoded, axis=1)
elif pooling_mode == 'FLATTEN':
encoded = encoded.reshape((encoded.shape[0], -1))
elif pooling_mode == 'CLS':
encoded = encoded[:, 0]
else:
raise NotImplementedError('Pooling not supported yet.')
encoded = nn.Dense(mlp_dim, name='mlp')(encoded)
encoded = nn.relu(encoded)
encoded = nn.Dense(num_classes, name='logits')(encoded)
return encoded
class LayerNorm(nn.Module):
"""Layer Norm to replicate tf.contrib."""
epsilon: Optional[float] = None
dtype: Any = jnp.float32
use_bias: bool = True
use_scale: bool = True
bias_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.zeros
scale_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.ones
@nn.compact
def __call__(self, x):
if self.epsilon is None:
epsilon = 1e-12 if self.dtype != jnp.float16 else 1e-3
else:
epsilon = self.epsilon
x = jnp.asarray(x, jnp.float32)
features = x.shape[-1]
mean = jnp.mean(x, axis=-1, keepdims=True)
mean2 = jnp.mean(lax.square(x), axis=-1, keepdims=True)
var = mean2 - lax.square(mean)
mul = lax.rsqrt(var + epsilon)
if self.use_scale:
mul = mul * jnp.asarray(
self.param('scale', self.scale_init, (features,)), self.dtype)
y = x * mul
if self.use_bias:
y = y + jnp.asarray(
self.param('bias', self.bias_init, (features,)), self.dtype)
y -= mean * mul
return jnp.asarray(y, self.dtype)
| ithaca-main | ithaca/models/common_layers.py |
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer using BigBird (https://arxiv.org/abs/2007.14062).
This implementation is from the Long Range Arena:
https://github.com/google-research/long-range-arena/tree/main/lra_benchmarks/models/bigbird
"""
from typing import Any, Optional
from . import bigbird_attention
from . import common_layers
from flax import linen as nn
import jax.numpy as jnp
_DEFAULT_BLOCK_SIZE = 64
_DEFAULT_NUM_RAND_BLOCKS = 3
class BigBirdBlock(nn.Module):
"""BigBird layer (https://arxiv.org/abs/2007.14062).
Attributes:
qkv_dim: dimension of the query/key/value
mlp_dim: dimension of the mlp on top of attention block
num_heads: number of heads
dtype: the dtype of the computation (default: float32).
causal_mask: bool, mask future or not
dropout_rate: dropout rate
attention_dropout_rate: dropout rate for attention weights
deterministic: bool, deterministic or not (to apply dropout)
activation_fn: Activation function ("relu", "gelu")
block_size: Size of attention blocks.
num_rand_blocks: Number of random blocks.
connectivity_seed: Optional seed for random block sparse attention.
"""
qkv_dim: Any
mlp_dim: Any
num_heads: Any
dtype: Any = jnp.float32
causal_mask: bool = False
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
deterministic: bool = False
activation_fn: str = 'relu'
block_size: int = _DEFAULT_BLOCK_SIZE
num_rand_blocks: int = _DEFAULT_NUM_RAND_BLOCKS
connectivity_seed: Optional[int] = None
@nn.compact
def __call__(self, inputs, inputs_segmentation=None, padding_mask=None):
"""Applies BigBirdBlock module.
Args:
inputs: input data
inputs_segmentation: input segmentation info for packed examples.
padding_mask: bool, mask padding tokens, [b, l, 1]
Returns:
output after transformer block.
"""
# Attention block.
assert inputs.ndim == 3
x = common_layers.LayerNorm(dtype=self.dtype)(inputs)
x = bigbird_attention.BigBirdSelfAttention(
num_heads=self.num_heads,
dtype=self.dtype,
qkv_features=self.qkv_dim,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6),
use_bias=False,
broadcast_dropout=False,
dropout_rate=self.attention_dropout_rate,
deterministic=self.deterministic,
block_size=self.block_size,
num_rand_blocks=self.num_rand_blocks,
connectivity_seed=self.connectivity_seed)(
x,
segmentation=inputs_segmentation,
padding_mask=padding_mask,
)
x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=self.deterministic)
x = x + inputs
# MLP block.
y = common_layers.LayerNorm(dtype=self.dtype)(x)
y = common_layers.MlpBlock(
mlp_dim=self.mlp_dim,
dtype=self.dtype,
dropout_rate=self.dropout_rate,
deterministic=self.deterministic,
activation_fn=self.activation_fn)(
y)
return x + y
| ithaca-main | ithaca/models/bigbird.py |
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Big Bird attention mechanism.
See https://arxiv.org/abs/2007.14062.
This implementation is from the Long Range Arena:
https://github.com/google-research/long-range-arena/tree/main/lra_benchmarks/models/bigbird
"""
# pylint: disable=attribute-defined-outside-init,g-bare-generic
import functools
from typing import Any, Callable, Optional
from absl import logging
from flax import linen as nn
import jax
import jax.numpy as jnp
import numpy as np
def get_block_rand_mask(m, n, wm, wn, r, last_idx=-1):
"""This function creates the m by n mask for random block sparse mask.
Args:
m: input size
n: output size
wm: block input size
wn: block output size
r: number of random block per row
last_idx: if -1 then r blocks are chosen throughout the n space, if
possitive then r blocks are chooses at random upto last_ids
Returns:
blocked mask of size m//wm -2 by r
"""
if (m // wm) != (n // wn):
logging.info('Error the number of blocks needs to be same')
rand_attn = np.zeros((m // wm - 2, r), dtype=jnp.int64)
a = np.array(range(1, n // wn - 1))
last = (m // wn) - 1
if last_idx > (2 * wn):
last = (last_idx // wn) - 1
for i in range(1, m // wm - 1):
start = i - 2
end = i
if i == 1:
rand_attn[i - 1, :] = np.random.permutation(a[2:last])[:r]
elif i == 2:
rand_attn[i - 1, :] = np.random.permutation(a[3:last])[:r]
elif i == m // wm - 3:
rand_attn[i - 1, :] = np.random.permutation(a[:last - 4])[:r]
elif i == m // wm - 2:
rand_attn[i - 1, :] = np.random.permutation(a[:last - 3])[:r]
else:
if start > last:
start = last
rand_attn[i - 1, :] = np.random.permutation(a[:start])[:r]
elif (end + 1) == last:
rand_attn[i - 1, :] = np.random.permutation(a[:start])[:r]
else:
rand_attn[i - 1, :] = np.random.permutation(
np.concatenate((a[:start], a[end + 1:last])))[:r]
return rand_attn
def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_blocked_mask: 2D Tensor of shape [batch_size,
from_seq_length//from_block_size, from_block_size].
to_blocked_mask: int32 Tensor of shape [batch_size,
to_seq_length//to_block_size, to_block_size].
Returns:
float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4,
from_block_size, 3*to_block_size].
"""
exp_blocked_to_pad = jnp.concatenate([
to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:,
3:-1]
], 2)
band_pad = jnp.einsum('BLQ,BLK->BLQK', from_blocked_mask[:, 2:-2],
exp_blocked_to_pad)
band_pad = jnp.expand_dims(band_pad, 1)
return band_pad
def create_rand_mask_from_inputs(from_blocked_mask, to_blocked_mask, rand_attn):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_blocked_mask: 2D Tensor of shape [batch_size,
from_seq_length//from_block_size, from_block_size].
to_blocked_mask: int32 Tensor of shape [batch_size,
to_seq_length//to_block_size, to_block_size].
rand_attn: [batch_size, num_attention_heads,
from_seq_length//from_block_size-2, rsize]
Returns:
float Tensor of shape [batch_size, num_attention_heads,
from_seq_length//from_block_size-2,
from_block_size, 3*to_block_size].
"""
# batch_size, num_attention_heads, num_windows, _ = get_shape_list(
# rand_attn, expected_rank=4)
batch_size, num_attention_heads, num_windows, _ = rand_attn.shape
rand_pad = jnp.reshape(
# Equivalent to tf.gather(to_blocked_mask, rand_attn, batch_dims=1)
gather_1(to_blocked_mask, rand_attn),
[batch_size, num_attention_heads, num_windows, -1])
rand_pad = jnp.einsum('BLQ,BHLK->BHLQK', from_blocked_mask[:, 1:-1], rand_pad)
return rand_pad
@jax.vmap
def gather_1(params, indices):
return jnp.take(params, indices, axis=0)
@jax.vmap
def gather_2(params, indices):
return gather_1(params, indices)
def band_start_block_rand_multi_attention_pad(query_matrix, key_matrix,
value_matrix, rand_attn, band_pad,
rand_pad, seq_m_pad, seq_n_pad, b,
h, m, wm, n, wn, r, d):
"""Applies sparse block band rand attention in hopefully efficient way.
Args:
query_matrix: b, h, n, d
key_matrix: b, h, n, d
value_matrix: b, h, n, d
rand_attn: b, h, m//wm-2, r
band_pad: b, 1, m//wm-4, wm, 3*wn
rand_pad: b, h, m//wm-2, wm, r*wn
seq_m_pad: b, 1, m, 1
seq_n_pad: b, 1, 1, n
b: batch size
h: number of head
m: from_length
wm: from window size
n: to length
wn: to window size
r: number of rand blocks
d: hidden dimension
Returns:
context layer. b, m, h, -1
attention weights. [b, h, m//wm-4, wm, (5+r)*wn]
"""
blocked_query_matrix = jnp.reshape(query_matrix, (b, h, m // wm, wm, -1))
blocked_key_matrix = jnp.reshape(key_matrix, (b, h, n // wn, wn, -1))
blocked_value_matrix = jnp.reshape(value_matrix, (b, h, n // wn, wn, -1))
# tf.gather(blocked_key_matrix, rand_attn, batch_dims=2, name='gather_key'),
gathered_key = jnp.reshape(
gather_2(blocked_key_matrix, rand_attn),
(b, h, m // wm - 2, r * wn, -1)) # [b, h, n//wn-2, r, wn, -1]
# tf.gather(
# blocked_value_matrix, rand_attn, batch_dims=2, name='gather_value')
gathered_value = jnp.reshape(
gather_2(blocked_value_matrix, rand_attn),
(b, h, m // wm - 2, r * wn, -1)) # [b, h, n//wn-2, r, wn, -1]
first_product = jnp.einsum(
'BHQD,BHKD->BHQK', blocked_query_matrix[:, :, 0],
key_matrix) # [b, h, wm, -1] x [b, h, n, -1] ==> [b, h, wm, n]
first_product = first_product / jnp.sqrt(d)
first_product += (1.0 - seq_n_pad) * -10000.0
first_attn_weights = jax.nn.softmax(first_product) # [b, h, wm, n]
first_context_layer = jnp.einsum(
'BHQK,BHKD->BHQD', first_attn_weights,
value_matrix) # [b, h, wm, n] x [b, h, n, -1] ==> [b, h, wm, -1]
first_context_layer = jnp.expand_dims(first_context_layer, 2)
second_key_mat = jnp.concatenate([
blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, 1],
blocked_key_matrix[:, :, 2], blocked_key_matrix[:, :,
-1], gathered_key[:, :, 0]
], 2) # [b, h, (4+r)*wn, -1]
second_value_mat = jnp.concatenate([
blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, 1],
blocked_value_matrix[:, :, 2], blocked_value_matrix[:, :, -1],
gathered_value[:, :, 0]
], 2) # [b, h, (4+r)*wn, -1]
second_product = jnp.einsum(
'BHQD,BHKD->BHQK', blocked_query_matrix[:, :, 1], second_key_mat
) # [b, h, wm, -1] x [b, h, (4+r)*wn, -1] ==> [b, h, wm, (4+r)*wn]
second_seq_pad = jnp.concatenate([
seq_n_pad[:, :, :, :3 * wn], seq_n_pad[:, :, :, -wn:],
jnp.ones([b, 1, 1, r * wn], dtype=jnp.float32)
], 3)
second_rand_pad = jnp.concatenate(
[jnp.ones([b, h, wm, 4 * wn], dtype=jnp.float32), rand_pad[:, :, 0]], 3)
second_product = second_product / jnp.sqrt(d)
second_product += (1.0 -
jnp.minimum(second_seq_pad, second_rand_pad)) * -10000.0
second_attn_weights = jax.nn.softmax(second_product) # [b , h, wm, (4+r)*wn]
second_context_layer = jnp.einsum(
'BHQK,BHKD->BHQD', second_attn_weights, second_value_mat
) # [b, h, wm, (4+r)*wn] x [b, h, (4+r)*wn, -1] ==> [b, h, wm, -1]
second_context_layer = jnp.expand_dims(second_context_layer, 2)
exp_blocked_key_matrix = jnp.concatenate([
blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2],
blocked_key_matrix[:, :, 3:-1]
], 3) # [b, h, m//wm-4, 3*wn, -1]
exp_blocked_value_matrix = jnp.concatenate([
blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2],
blocked_value_matrix[:, :, 3:-1]
], 3) # [b, h, m//wm-4, 3*wn, -1]
middle_query_matrix = blocked_query_matrix[:, :, 2:-2]
inner_band_product = jnp.einsum(
'BHLQD,BHLKD->BHLQK', middle_query_matrix, exp_blocked_key_matrix
) # [b, h, m//wm-4, wm, -1] x [b, h, m//wm-4, 3*wn, -1]
# ==> [b, h, m//wm-4, wm, 3*wn]
inner_band_product = inner_band_product / jnp.sqrt(d)
rand_band_product = jnp.einsum(
'BHLQD,BHLKD->BHLQK', middle_query_matrix,
gathered_key[:, :,
1:-1]) # [b, h, m//wm-4, wm, -1] x [b, h, m//wm-4, r*wn, -1]
# ==> [b, h, m//wm-4, wm, r*wn]
rand_band_product = rand_band_product / jnp.sqrt(d)
first_band_product = jnp.einsum(
'BHLQD,BHKD->BHLQK', middle_query_matrix, blocked_key_matrix[:, :, 0]
) # [b, h, m//wm-4, wm, -1] x [b, h, wn, -1] ==> [b, h, m//wm-4, wm, wn]
first_band_product = first_band_product / jnp.sqrt(d)
last_band_product = jnp.einsum(
'BHLQD,BHKD->BHLQK', middle_query_matrix, blocked_key_matrix[:, :, -1]
) # [b, h, m//wm-4, wm, -1] x [b, h, wn, -1] ==> [b, h, m//wm-4, wm, wn]
last_band_product = last_band_product / jnp.sqrt(d)
inner_band_product += (1.0 - band_pad) * -10000.0
first_band_product += (1.0 -
jnp.expand_dims(seq_n_pad[:, :, :, :wn], 3)) * -10000.0
last_band_product += (1.0 -
jnp.expand_dims(seq_n_pad[:, :, :, -wn:], 3)) * -10000.0
rand_band_product += (1.0 - rand_pad[:, :, 1:-1]) * -10000.0
band_product = jnp.concatenate([
first_band_product, inner_band_product, rand_band_product,
last_band_product
], -1) # [b, h, m//wm-4, wm, (5+r)*wn]
attn_weights = jax.nn.softmax(band_product) # [b, h, m//wm-4, wm, (5+r)*wn]
context_layer = jnp.einsum(
'BHLQK,BHLKD->BHLQD', attn_weights[:, :, :, :,
wn:4 * wn], exp_blocked_value_matrix
) # [b, h, m//wm-4, wm, 3*wn] x [b, h, m//wm-4, 3*wn, -1]
# ==> [b, h, m//wm-4, wm, -1]
context_layer += jnp.einsum(
'BHLQK,BHLKD->BHLQD', attn_weights[:, :, :, :,
4 * wn:-wn], gathered_value[:, :, 1:-1]
) # [b, h, m//wm-4, wm, r*wn] x [b, h, m//wm-4, r*wn, -1]
# ==> [b, h, m//wm-4, wm, -1]
context_layer += jnp.einsum(
'BHLQK,BHKD->BHLQD', attn_weights[:, :, :, :, :wn],
blocked_value_matrix[:, :, 0]
) # [b, h, m//wm-4, wm, wn] x [b, h, wn, -1] ==> [b, h, m//wm-4, wm, -1]
context_layer += jnp.einsum(
'BHLQK,BHKD->BHLQD', attn_weights[:, :, :, :,
-wn:], blocked_value_matrix[:, :, -1]
) # [b, h, m//wm-4, wm, wn] x [b, h, wn, -1] ==> [b, h, m//wm-4, wm, -1]
second_last_key_mat = jnp.concatenate([
blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, -3],
blocked_key_matrix[:, :, -2], blocked_key_matrix[:, :, -1],
gathered_key[:, :, -1]
], 2) # [b, h, (4+r)*wn, -1]
second_last_value_mat = jnp.concatenate([
blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, -3],
blocked_value_matrix[:, :, -2], blocked_value_matrix[:, :, -1],
gathered_value[:, :, -1]
], 2) # [b, h, (4+r)*wn, -1]
second_last_product = jnp.einsum(
'BHQD,BHKD->BHQK', blocked_query_matrix[:, :, -2], second_last_key_mat
) # [b, h, wm, -1] x [b, h, (4+r)*wn, -1] ==> [b, h, wm, (4+r)*wn]
second_last_seq_pad = jnp.concatenate([
seq_n_pad[:, :, :, :wn], seq_n_pad[:, :, :, -3 * wn:],
jnp.ones([b, 1, 1, r * wn], dtype=jnp.float32)
], 3)
second_last_rand_pad = jnp.concatenate(
[jnp.ones([b, h, wm, 4 * wn], dtype=jnp.float32), rand_pad[:, :, -1]], 3)
second_last_product = second_last_product / jnp.sqrt(d)
second_last_product += (
1.0 - jnp.minimum(second_last_seq_pad, second_last_rand_pad)) * -10000.0
second_last_attn_weights = jax.nn.softmax(
second_last_product) # [b, h, wm, (4+r)*wn]
second_last_context_layer = jnp.einsum(
'BHQK,BHKD->BHQD', second_last_attn_weights, second_last_value_mat
) # [b, h, wm, (4+r)*wn] x [b, h, (4+r)*wn, -1] ==> [b, h, wm, -1]
second_last_context_layer = jnp.expand_dims(second_last_context_layer, 2)
last_product = jnp.einsum(
'BHQD,BHKD->BHQK', blocked_query_matrix[:, :, -1],
key_matrix) # [b, h, wm, -1] x [b, h, n, -1] ==> [b, h, wm, n]
last_product = last_product / jnp.sqrt(d)
last_product += (1.0 - seq_n_pad) * -10000.0
last_attn_weights = jax.nn.softmax(last_product) # [b, h, wm, n]
last_context_layer = jnp.einsum(
'BHQK,BHKD->BHQD', last_attn_weights,
value_matrix) # [b, h, wm, n] x [b, h, n, -1] ==> [b, h, wm, -1]
last_context_layer = jnp.expand_dims(last_context_layer, 2)
context_layer = jnp.concatenate([
first_context_layer, second_context_layer, context_layer,
second_last_context_layer, last_context_layer
], 2)
context_layer = jnp.reshape(context_layer, (b, h, m, -1)) * seq_m_pad
context_layer = jnp.transpose(context_layer, (0, 2, 1, 3))
return context_layer, attn_weights
def sparse_dot_product_attention(queries,
keys,
values,
connectivity_seed,
input_mask=None,
block_size=64,
num_rand_blocks=3):
"""Implements sparse dot product attention given query, key, and value.
This is the core function for applying attention based on
https://arxiv.org/abs/1706.03762. It calculates the attention weights given
query and key and combines the values using the attention weights. This
function supports multi-dimensional inputs.
Args:
queries: queries for calculating attention with shape of `[batch_size,
length, num_heads, mem_channels]`.
keys: keys for calculating attention with shape of `[batch_size, length,
num_heads, mem_channels]`.
values: values to be used in attention with shape of `[batch_size, length,
num_heads, value_channels]`.
connectivity_seed: Integer seed for generating connectivity graph.
input_mask: Optional mask for keys/values with shape `[batch_size, length]`
and the same dtype.
block_size: Size for local attention around diagonal of attention.
num_rand_blocks: int. Number of random chunks per row.
Returns:
Output of shape `[bs, length, num_heads, value_channels]`.
"""
(batch_size, to_seq_length, num_attention_heads, hidden_size) = keys.shape
from_seq_length = queries.shape[1]
seq_length = max(to_seq_length, from_seq_length)
queries = jnp.pad(queries,
((0, 0), (0, seq_length - from_seq_length), (0, 0), (0, 0)))
keys = jnp.pad(keys,
((0, 0), (0, seq_length - to_seq_length), (0, 0), (0, 0)))
values = jnp.pad(values,
((0, 0), (0, seq_length - to_seq_length), (0, 0), (0, 0)))
if input_mask is None:
input_mask = jnp.ones((batch_size, seq_length), dtype=keys.dtype)
else:
input_mask = jnp.pad(
input_mask,
tuple((0, seq_length - size) if i == 1 else (0, 0)
for i, size in enumerate(input_mask.shape)))
np.random.seed(connectivity_seed)
# pylint: disable=g-complex-comprehension
rand_attn = [
get_block_rand_mask(
seq_length,
seq_length,
block_size,
block_size,
num_rand_blocks,
last_idx=min(seq_length, 1024)) for _ in range(num_attention_heads)
]
# pylint: enable=g-complex-comprehension
rand_attn = jnp.stack(rand_attn, axis=0)
rand_attn = jnp.expand_dims(rand_attn, 0)
rand_attn = jnp.repeat(rand_attn, batch_size, 0)
# reshape and cast for blocking
blocked_input_mask = jnp.reshape(
input_mask, (batch_size, seq_length // block_size, block_size))
input_mask = jnp.reshape(input_mask, (batch_size, 1, seq_length, 1))
output_mask = jnp.reshape(input_mask, (batch_size, 1, 1, seq_length))
# create band padding
band_pad = create_band_mask_from_inputs(blocked_input_mask,
blocked_input_mask)
rand_pad = create_rand_mask_from_inputs(blocked_input_mask,
blocked_input_mask, rand_attn)
queries = jnp.transpose(queries, (0, 2, 1, 3))
keys = jnp.transpose(keys, (0, 2, 1, 3))
values = jnp.transpose(values, (0, 2, 1, 3))
# sparse mask
context_layer, _ = band_start_block_rand_multi_attention_pad(
queries, keys, values, rand_attn, band_pad, rand_pad, input_mask,
output_mask, batch_size, num_attention_heads, seq_length, block_size,
seq_length, block_size, num_rand_blocks, hidden_size)
return context_layer[:, :from_seq_length, ...]
class BigBirdAttention(nn.Module):
"""Multi-head dot-product attention.
Attributes:
num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1])
should be divisible by the number of heads.
block_size: Size for local attention around diagonal of attention.
num_rand_blocks: int. Number of random chunks per row.
dtype: the dtype of the computation (default: float32)
qkv_features: dimension of the key, query, and value.
out_features: dimension of the last projection
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rate: dropout rate
deterministic: bool, deterministic or not (to apply dropout)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: initializer for the kernel of the Dense layers.
bias_init: initializer for the bias of the Dense layers.
use_bias: bool: whether pointwise QKVO dense transforms use bias.
connectivity_seed: Seed for random block sparse attention.
"""
num_heads: int
block_size: int = 64
num_rand_blocks: int = 3
dtype: Any = jnp.float32
qkv_features: Optional[int] = None
out_features: Optional[int] = None
broadcast_dropout: bool = True
dropout_rate: float = 0.
deterministic: bool = False
precision: Any = None
kernel_init: Callable = nn.linear.default_kernel_init
bias_init: Callable = nn.initializers.zeros
use_bias: bool = True
connectivity_seed: Optional[int] = None
@nn.compact
def __call__(self,
inputs_q,
inputs_kv,
padding_mask=None,
segmentation=None,
dropout_rng=None):
"""Applies multi-head dot product attention on the input data.
Projects the inputs into multi-headed query, key, and value vectors,
applies dot-product attention and project the results to an output vector.
This can be used for encoder-decoder attention by specifying both `inputs_q`
and `inputs_kv` orfor self-attention by only specifying `inputs_q` and
setting `inputs_kv` to None.
Args:
inputs_q: input queries of shape `[bs, length, features]`.
inputs_kv: key/values of shape `[bs, length, features]` or None for
self-attention, inn which case key/values will be derived from inputs_q.
padding_mask: boolean specifying query tokens that are pad token. [b, l,
1]
segmentation: segment indices for packed inputs_q data.
dropout_rng: JAX PRNGKey: to be used for dropout
Returns:
output of shape `[bs, length, features]`.
"""
orig_seqlen = inputs_q.shape[-2]
extra_len = self.block_size - (orig_seqlen % self.block_size)
pad_width = np.array([[0, 0], [0, extra_len], [0, 0]])
mask_pad = np.array([[0, 0], [0, extra_len], [0, 0]])
padding_mask = jnp.pad(padding_mask, mask_pad, constant_values=-1e9)
inputs_q = jnp.pad(inputs_q, pad_width)
if inputs_kv is not None:
inputs_kv = jnp.pad(inputs_kv, pad_width)
if inputs_kv is None:
inputs_kv = inputs_q
features = self.out_features or inputs_q.shape[-1]
qkv_features = self.qkv_features or inputs_q.shape[-1]
assert qkv_features % self.num_heads == 0, (
'Memory dimension must be divisible by number of heads.')
head_dim = qkv_features // self.num_heads
dense = functools.partial(
nn.DenseGeneral,
axis=-1,
features=(self.num_heads, head_dim),
kernel_init=self.kernel_init,
bias_init=self.bias_init,
use_bias=self.use_bias,
precision=self.precision)
# project inputs_q to multi-headed q/k/v
# dimensions are then [bs, dims..., n_heads, n_features_per_head]
query, key, value = (dense(dtype=self.dtype, name='query')(inputs_q),
dense(dtype=self.dtype, name='key')(inputs_kv),
dense(dtype=self.dtype, name='value')(inputs_kv))
if self.connectivity_seed is None:
path = self._get_construction_frame().path
connectivity_seed = hash(path) % 2**32
else:
connectivity_seed = self.connectivity_seed
# apply attention
input_mask = None
if padding_mask is not None:
input_mask = padding_mask.astype(key.dtype)
x = sparse_dot_product_attention(
query,
key,
value,
connectivity_seed=connectivity_seed,
input_mask=input_mask,
block_size=self.block_size,
num_rand_blocks=self.num_rand_blocks)
# back to the original inputs dimensions
out = nn.DenseGeneral(
features=features,
axis=(-2, -1),
kernel_init=self.kernel_init,
bias_init=self.bias_init,
use_bias=self.use_bias,
dtype=self.dtype,
precision=self.precision,
name='out')(
x)
out = out[:, :orig_seqlen, :]
return out
class BigBirdSelfAttention(BigBirdAttention):
"""Multi-head dot-product self-attention.
Attributes:
num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1])
should be divisible by the number of heads.
block_size: Size for local attention around diagonal of attention.
num_rand_blocks: int. Number of random chunks per row.
dtype: the dtype of the computation (default: float32)
qkv_features: dimension of the key, query, and value.
out_features: dimension of the last projection
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rate: dropout rate
deterministic: bool, deterministic or not (to apply dropout)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: initializer for the kernel of the Dense layers.
bias_init: initializer for the bias of the Dense layers.
use_bias: bool: whether pointwise QKVO dense transforms use bias.
connectivity_seed: Seed for random block sparse attention.
"""
@nn.compact
def __call__(self,
inputs_q,
padding_mask=None,
segmentation=None,
dropout_rng=None):
"""Applies multi-head dot product attention on the input data.
Projects the inputs into multi-headed query, key, and value vectors,
applies dot-product attention and project the results to an output vector.
This can be used for encoder-decoder attention by specifying both `inputs_q`
and `inputs_kv` orfor self-attention by only specifying `inputs_q` and
setting `inputs_kv` to None.
Args:
inputs_q: input queries of shape `[bs, length, features]`.
padding_mask: boolean specifying query tokens that are pad token.
segmentation: segment indices for packed inputs_q data.
dropout_rng: JAX PRNGKey: to be used for dropout
Returns:
output of shape `[bs, length, features]`.
"""
return super().__call__(
inputs_q=inputs_q,
inputs_kv=None,
padding_mask=padding_mask,
segmentation=segmentation,
dropout_rng=dropout_rng,
)
| ithaca-main | ithaca/models/bigbird_attention.py |
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| ithaca-main | ithaca/models/__init__.py |
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ithaca model."""
from . import bigbird
from . import common_layers
import flax.linen as nn
import jax
import jax.numpy as jnp
class Model(nn.Module):
"""Transformer Model for sequence tagging."""
vocab_char_size: int = 164
vocab_word_size: int = 100004
output_subregions: int = 85
output_date: int = 160
output_date_dist: bool = True
output_return_emb: bool = False
use_output_mlp: bool = True
num_heads: int = 8
num_layers: int = 6
word_char_emb_dim: int = 192
emb_dim: int = 512
qkv_dim: int = 512
mlp_dim: int = 2048
max_len: int = 1024
causal_mask: bool = False
feature_combine_type: str = 'concat'
posemb_combine_type: str = 'add'
region_date_pooling: str = 'first'
learn_pos_emb: bool = True
use_bfloat16: bool = False
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
activation_fn: str = 'gelu'
model_type: str = 'bigbird'
def setup(self):
self.text_char_emb = nn.Embed(
num_embeddings=self.vocab_char_size,
features=self.word_char_emb_dim,
embedding_init=nn.initializers.normal(stddev=1.0),
name='char_embeddings')
self.text_word_emb = nn.Embed(
num_embeddings=self.vocab_word_size,
features=self.word_char_emb_dim,
embedding_init=nn.initializers.normal(stddev=1.0),
name='word_embeddings')
@nn.compact
def __call__(self,
text_char=None,
text_word=None,
text_char_onehot=None,
text_word_onehot=None,
text_char_emb=None,
text_word_emb=None,
padding=None,
is_training=True):
"""Applies Ithaca model on the inputs."""
if text_char is not None and padding is None:
padding = jnp.where(text_char > 0, 1, 0)
elif text_char_onehot is not None and padding is None:
padding = jnp.where(text_char_onehot.argmax(-1) > 0, 1, 0)
padding_mask = padding[..., jnp.newaxis]
text_len = jnp.sum(padding, 1)
if self.posemb_combine_type == 'add':
posemb_dim = None
elif self.posemb_combine_type == 'concat':
posemb_dim = self.word_char_emb_dim
else:
raise ValueError('Wrong feature_combine_type value.')
# Character embeddings
if text_char is not None:
x = self.text_char_emb(text_char)
elif text_char_onehot is not None:
x = self.text_char_emb.attend(text_char_onehot)
elif text_char_emb is not None:
x = text_char_emb
else:
raise ValueError('Wrong inputs.')
# Word embeddings
if text_word is not None:
text_word_emb_x = self.text_word_emb(text_word)
elif text_word_onehot is not None:
text_word_emb_x = self.text_word_emb.attend(text_word_onehot)
elif text_word_emb is not None:
text_word_emb_x = text_word_emb
else:
raise ValueError('Wrong inputs.')
if self.feature_combine_type == 'add':
x = x + text_word_emb_x
elif self.feature_combine_type == 'concat':
x = jax.lax.concatenate([x, text_word_emb_x], 2)
else:
raise ValueError('Wrong feature_combine_type value.')
# Positional embeddings
pe_init = common_layers.sinusoidal_init(
max_len=self.max_len) if self.learn_pos_emb else None
x = common_layers.AddPositionEmbs(
posemb_dim=posemb_dim,
posemb_init=pe_init,
max_len=self.max_len,
combine_type=self.posemb_combine_type,
name='posembed_input',
)(
x)
x = nn.Dropout(rate=self.dropout_rate)(x, deterministic=not is_training)
# Set floating point
if self.use_bfloat16:
x = x.astype(jnp.bfloat16)
dtype = jnp.bfloat16
else:
dtype = jnp.float32
if self.model_type == 'bigbird':
model_block = bigbird.BigBirdBlock
else:
raise ValueError('Wrong model type specified.')
for lyr in range(self.num_layers):
x = model_block(
qkv_dim=self.qkv_dim,
mlp_dim=self.mlp_dim,
num_heads=self.num_heads,
dtype=dtype,
causal_mask=self.causal_mask,
dropout_rate=self.dropout_rate,
attention_dropout_rate=self.attention_dropout_rate,
deterministic=not is_training,
activation_fn=self.activation_fn,
connectivity_seed=lyr,
name=f'encoderblock_{lyr}',
)(
x,
padding_mask=padding_mask,
)
x = common_layers.LayerNorm(dtype=dtype, name='encoder_norm')(x)
torso_output = x
# Bert logits
if self.use_output_mlp:
x_mask = common_layers.MlpBlock(
out_dim=self.word_char_emb_dim,
mlp_dim=self.emb_dim,
dtype=dtype,
out_dropout=False,
dropout_rate=self.dropout_rate,
deterministic=not is_training,
activation_fn=self.activation_fn)(
x)
else:
x_mask = nn.Dense(self.word_char_emb_dim)(x)
char_embeddings = self.text_char_emb.embedding
char_embeddings = nn.Dropout(rate=self.dropout_rate)(
char_embeddings, deterministic=not is_training)
logits_mask = jnp.matmul(x_mask, jnp.transpose(char_embeddings))
# Next sentence prediction
if self.use_output_mlp:
logits_nsp = common_layers.MlpBlock(
out_dim=2,
mlp_dim=self.emb_dim,
dtype=dtype,
out_dropout=False,
dropout_rate=self.dropout_rate,
deterministic=not is_training,
activation_fn=self.activation_fn)(
x)
else:
logits_nsp = nn.Dense(2)(x)
# Average over temporal dimension
if self.region_date_pooling == 'average':
x = jnp.multiply(padding_mask.astype(jnp.float32), x)
x = jnp.sum(x, 1) / text_len.astype(jnp.float32)[..., None]
elif self.region_date_pooling == 'sum':
x = jnp.multiply(padding_mask.astype(jnp.float32), x)
x = jnp.sum(x, 1)
elif self.region_date_pooling == 'first':
x = x[:, 0, :]
else:
raise ValueError('Wrong pooling type specified.')
# Date pred
if self.output_date_dist:
output_date_dim = self.output_date
else:
output_date_dim = 1
if self.use_output_mlp:
pred_date = common_layers.MlpBlock(
out_dim=output_date_dim,
mlp_dim=self.emb_dim,
dtype=dtype,
out_dropout=False,
dropout_rate=self.dropout_rate,
deterministic=not is_training,
activation_fn=self.activation_fn)(
x)
else:
pred_date = nn.Dense(output_date_dim)(x)
# Region logits
if self.use_output_mlp:
logits_subregion = common_layers.MlpBlock(
out_dim=self.output_subregions,
mlp_dim=self.emb_dim,
dtype=dtype,
out_dropout=False,
dropout_rate=self.dropout_rate,
deterministic=not is_training,
activation_fn=self.activation_fn)(
x)
else:
logits_subregion = nn.Dense(self.output_subregions)(x)
outputs = (pred_date, logits_subregion, logits_mask, logits_nsp)
if self.output_return_emb:
return outputs, torso_output
else:
return outputs
| ithaca-main | ithaca/models/model.py |
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| ithaca-main | ithaca/eval/__init__.py |
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for performing inference using Jax, including decoding.
The module is separated into two main entrypoints: attribute() and restore().
Both take a function called `forward`, a Jax function mapping from model inputs
(excluding parameters) to the model output tuple. Generated using
e.g. `functools.partial(exp.forward.apply, exp._params)`.
"""
import json
import math
import re
from typing import List, NamedTuple, Tuple
import ithaca.util.eval as eval_util
import ithaca.util.text as util_text
import jax
import numpy as np
class LocationPrediction(NamedTuple):
"""One location prediction and its associated probability."""
location_id: int
score: float
def build_json(self):
return {
'location_id': self.location_id,
'score': self.score,
}
class AttributionResults(NamedTuple):
"""Immediate model output attribution predictions and related information."""
input_text: str
# List of pairs of location ID and probability
locations: List[LocationPrediction]
# Probabilities over year range [-800, -790, -780, ..., 790, 800]
year_scores: List[float] # length 160
# Per-character saliency maps:
date_saliency: List[float]
location_saliency: List[float] # originally called subregion
def build_json(self):
return {
'input_text': self.input_text,
'locations': [l.build_json() for l in self.locations],
'year_scores': self.year_scores,
'date_saliency': self.date_saliency,
'location_saliency': self.location_saliency
}
def json(self, **kwargs):
return json.dumps(self.build_json(), **kwargs)
class Restoration(NamedTuple):
"""One restored candidate string from the beam search."""
text: str
score: float
def build_json(self):
return {'text': self.text, 'score': self.score}
class RestorationCharSaliency(NamedTuple):
"""Saliency entry for one predicted character of a prediction."""
text: str
restored_idx: int # which predicted character the saliency map corresponds to
saliency: List[float]
def build_json(self):
return {
'text': self.text,
'restored_idx': self.restored_idx,
'saliency': self.saliency
}
class RestorationResults(NamedTuple):
"""Contains all text-related restoration predictions."""
input_text: str
top_prediction: str
restored: List[int] # char indices that were missing (-)
# List of top N results from beam search:
predictions: List[Restoration]
# Saliency maps for each successive character of the best (greedy) prediction
prediction_saliency: List[RestorationCharSaliency]
def build_json(self):
return {
'input_text':
self.input_text,
'top_prediction':
self.top_prediction,
'restored':
self.restored,
'predictions': [r.build_json() for r in self.predictions],
'prediction_saliency': [
m.build_json() for m in self.prediction_saliency
],
}
def json(self, **kwargs):
return json.dumps(self.build_json(), **kwargs)
# These constants are fixed for all recent versions of the model.
MIN_TEXT_LEN = 50
TEXT_LEN = 768 # fixed model sequence length
DATE_MIN = -800
DATE_MAX = 800
DATE_INTERVAL = 10
RESTORATION_BEAM_WIDTH = 20
RESTORATION_TEMPERATURE = 1.
SEED = 1
ALPHABET_MISSING_RESTORE = '?' # missing characters to restore
def _prepare_text(
text, alphabet
) -> Tuple[str, str, str, np.ndarray, np.ndarray, List[int], np.ndarray,
List[int]]:
"""Adds start of sequence symbol, and padding.
Also strips accents if present, trims whitespace, and generates arrays ready
for input into the model.
Args:
text: Raw text input string, no padding or start of sequence symbol.
alphabet: GreekAlphabet object containing index/character mappings.
Returns:
Tuple of cleaned text (str), padded text (str), char indices (array of batch
size 1), word indices (array of batch size 1), text length (list of size 1)
"""
text = re.sub(r'\s+', ' ', text.strip())
text = util_text.strip_accents(text)
if len(text) < MIN_TEXT_LEN:
raise ValueError('Input text too short.')
if len(text) >= TEXT_LEN - 1:
raise ValueError('Input text too long.')
text_sos = alphabet.sos + text
text_len = [len(text_sos)] # includes SOS, but not padding
text_padded = text_sos + alphabet.pad * max(0, TEXT_LEN - len(text_sos))
restore_mask_idx = [
i for i, c in enumerate(text_padded) if c == ALPHABET_MISSING_RESTORE
]
text_padded = text_padded.replace(ALPHABET_MISSING_RESTORE, alphabet.missing)
text_char = util_text.text_to_idx(text_padded, alphabet).reshape(1, -1)
text_word = util_text.text_to_word_idx(text_padded, alphabet).reshape(1, -1)
padding = np.where(text_char > 0, 1, 0)
return (text, text_sos, text_padded, text_char, text_word, text_len, padding,
restore_mask_idx)
def attribute(text, forward, params, alphabet, vocab_char_size, vocab_word_size,
region_map) -> AttributionResults:
"""Computes predicted date and geographical region."""
(text, _, _, text_char, text_word, text_len, padding,
_) = _prepare_text(text, alphabet)
rng = jax.random.PRNGKey(SEED)
date_logits, subregion_logits, _, _ = forward(
text_char=text_char,
text_word=text_word,
rngs={'dropout': rng},
is_training=False)
# Generate subregion predictions:
subregion_logits = np.array(subregion_logits)
subregion_pred_probs = eval_util.softmax(subregion_logits[0]).tolist()
location_predictions = [
LocationPrediction(location_id=id, score=prob)
for prob, id in zip(subregion_pred_probs, region_map['sub']['ids'])
]
location_predictions.sort(key=lambda loc: loc.score, reverse=True)
# Generate date predictions:
date_pred_probs = eval_util.softmax(date_logits[0])
# Gradients for saliency maps
date_saliency, subregion_saliency = eval_util.compute_attribution_saliency_maps(
text_char, text_word, text_len, padding, forward, params, rng, alphabet,
vocab_char_size, vocab_word_size)
# Skip start of sequence symbol (first char) for text and saliency maps:
return AttributionResults(
input_text=text,
locations=location_predictions,
year_scores=date_pred_probs.tolist(),
date_saliency=date_saliency.tolist()[1:],
location_saliency=subregion_saliency.tolist()[1:])
def restore(text, forward, params, alphabet, vocab_char_size,
vocab_word_size) -> RestorationResults:
"""Performs search to compute text restoration. Slower, runs synchronously."""
if ALPHABET_MISSING_RESTORE not in text:
raise ValueError('At least one character must be missing.')
text, _, text_padded, _, _, text_len, _, restore_mask_idx = _prepare_text(
text, alphabet)
beam_result = eval_util.beam_search_batch_2d(
forward,
alphabet,
text_padded,
restore_mask_idx,
beam_width=RESTORATION_BEAM_WIDTH,
temperature=RESTORATION_TEMPERATURE,
rng=jax.random.PRNGKey(SEED))
# For visualization purposes, we strip out the SOS and padding, and adjust
# restored_indices accordingly
predictions = [
Restoration(
text=beam_entry.text_pred[1:].rstrip(alphabet.pad),
score=math.exp(beam_entry.pred_logprob)) for beam_entry in beam_result
]
restored_indices = [i - 1 for i in restore_mask_idx]
# Sequence of saliency maps for a greedy prediction:
saliency_steps = eval_util.sequential_restoration_saliency(
text_padded, text_len, forward, params, alphabet, restore_mask_idx,
vocab_char_size, vocab_word_size)
return RestorationResults(
input_text=text,
top_prediction=predictions[0].text,
restored=restored_indices,
predictions=predictions,
prediction_saliency=[
RestorationCharSaliency(step.text, int(step.pred_char_pos),
step.saliency_map.tolist())
for step in saliency_steps
])
| ithaca-main | ithaca/eval/inference.py |
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config for a Ithaca experiment."""
from jaxline import base_config
from ml_collections import config_dict
def get_config():
"""Return config object for training."""
config = base_config.get_base_config()
# Experiment config.
# Modify this to adapt to your custom distributed learning setup
local_batch_size = 1
num_devices = 1
config.train_batch_size = local_batch_size * num_devices
# Experiment config.
config.macros = config_dict.ConfigDict(
dict(
wordlist_size=35884, # Keeping words with freq >10
context_char_max=768,
date_max=800,
date_min=-800,
date_interval=10,
date_bins=160,
))
cm = config.macros # Alias.
config.experiment_kwargs = config_dict.ConfigDict(
dict(
config=dict(
random_seed=4,
random_mode_train=config.get_ref('random_mode_train'),
random_mode_eval=config.get_ref('random_mode_eval'),
optimizer=dict(
name='lamb',
kwargs=dict(
learning_rate=3e-4,
weight_decay=0.,
b2=0.999,
),
# Set up the learning rate schedule.
# factors='constant * linear_warmup * rsqrt_decay',
warmup=4000,
clip_adaptive=False,
clip_level=0.,
),
training=dict(
batch_size=config.get_oneway_ref('train_batch_size')),
alphabet=dict(
wordlist_path='data/iphi-wordlist.txt',
wordlist_size=cm.get_ref('wordlist_size'),
),
dataset=dict(
dataset_path='data/iphi.json',
region_main_path='data/iphi-region-main.txt',
region_sub_path='data/iphi-region-sub.txt',
context_char_min=50,
context_char_max=cm.get_ref('context_char_max'),
context_char_random=True,
char_use_guess=True,
char_mask_rate_min=0.,
char_mask_rate_max=0.5,
span_mask_eval_len=10,
span_mask_ratio=0.15,
span_mask_geometric_p=0.1,
random_sentence_swap=0.25,
random_word_delete=0.2,
random_word_swap=0.,
date_min=cm.get_ref('date_min'),
date_max=cm.get_ref('date_max'),
date_interval=cm.get_ref('date_interval'),
date_bins=cm.get_ref('date_bins'),
prepend_sos=1,
repeat_train=-1,
repeat_eval=10,
black_list=[
# 2334, 10, 293931, 14, 293752, 15, 293753, 16, 11,
# 294468, 229647, 12, 291324, 291317, 17, 232697, 293754,
# 1682, 1675, 1676, 1677, 1678, 1679, 1680, 1681, 291118,
# 291320, 291319, 292366, 34, 291960, 35, 32, 346490, 27,
# 292187, 291318, 19, 18, 37, 291321, 292189, 293756, 42,
# 46, 232710, 39, 40, 41, 291322, 293757, 293327, 28,
# 292194, 293326, 21, 293755, 291319, 291117, 38, 291959,
# 31, 232705
],
white_list=[]),
model=dict(
word_char_emb_dim=256,
emb_dim=512,
mlp_dim=2048,
num_layers=8,
num_heads=4,
vocab_char_size=34,
vocab_word_size=cm.get_ref('wordlist_size') + 4,
output_subregions=85,
output_date=cm.get_ref('date_bins'),
output_date_dist=True,
region_date_pooling='first',
use_output_mlp=True,
max_len=cm.get_ref('context_char_max'),
dropout_rate=0.1,
attention_dropout_rate=0.1,
use_bfloat16=False,
model_type='bigbird',
feature_combine_type='concat',
posemb_combine_type='concat',
),
loss=dict(
date=dict(
enabled=True,
type='dist',
weight_dist=1.25,
weight_l1=0.,
label_smoothing=0.,
step_start=0,
step_end=0,
),
region=dict(
enabled=True,
weight=2.,
label_smoothing=0.1,
step_start=0,
step_end=0,
),
mask=dict(
enabled=True,
weight=3.,
label_smoothing=0.05,
step_start=0,
step_end=0,
),
nsp=dict(
enabled=True,
weight=0.01,
step_start=0,
step_end=0,
)),
evaluation=dict(
use_jit=True,
batch_size=1,
mode='valid',
store_model_log=False,
store_model_log_steps=100,
),
),))
# Training loop config.
config.training_steps = 1_000_000
config.log_train_data_interval = 10
config.save_checkpoint_interval = 300
config.best_model_eval_metric = 'score/eval'
config.checkpoint_dir = '/tmp/ithaca_checkpoints'
config.train_checkpoint_all_hosts = False
# Prevents accidentally setting keys that aren't recognized (e.g. in tests).
config.lock()
return config
| ithaca-main | train/config.py |
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ithaca: Restoring and attributing ancient texts with deep neural networks."""
import bz2
import distutils
import functools
import glob
import os
import pickle
from absl import app
from absl import flags
from absl import logging
import dataloader
from ithaca.models.model import Model
from ithaca.util.alphabet import GreekAlphabet
from ithaca.util.loss import categorical_kl_divergence
from ithaca.util.loss import cross_entropy_label_smoothing_loss
from ithaca.util.loss import cross_entropy_loss
from ithaca.util.loss import cross_entropy_mask_loss
from ithaca.util.loss import date_loss_l1
from ithaca.util.optim import adaptive_grad_clip
from ithaca.util.optim import linear_warmup_and_sqrt_decay
from ithaca.util.optim import linear_weight
from ithaca.util.region_names import load_region_maps
import jax
import jax.numpy as jnp
from jaxline import experiment
from jaxline import platform
from jaxline import utils as jl_utils
import numpy as np
import optax
import tensorflow_datasets.public_api as tfds
FLAGS = flags.FLAGS
class Experiment(experiment.AbstractExperiment):
"""Ithaca experiment."""
# Holds a map from object properties that will be checkpointed to their name
# within a checkpoint. Currently it is assume that these are all sharded
# device arrays.
CHECKPOINT_ATTRS = {
'_params': 'params',
'_opt_state': 'opt_state',
}
def __init__(self, mode, init_rng, config):
"""Initializes experiment."""
super(Experiment, self).__init__(mode=mode)
self.mode = mode
self.init_rng = init_rng
self.config = config
# Same random key on each device.
self._rng_key = jl_utils.bcast_local_devices(self.init_rng)
# Checkpointed experiment state.
self._params = None
self._opt_state = None
# Input pipelines.
self._train_input = None
self._eval_input = None
# Forward and update functions.
self.forward = Model(**self.config.model)
self._update_func = jax.pmap(
self._update_func, axis_name='i', donate_argnums=(0, 1))
self._learning_rate_fn = functools.partial(
linear_warmup_and_sqrt_decay,
max_lr=self.config.optimizer.kwargs.learning_rate,
warmup_steps=self.config.optimizer.warmup)
self._opt_init, self._opt_update = self.optimizer()
if 'use_jit' in self.config.evaluation and self.config.evaluation.use_jit:
self._eval_batch = jax.jit(self._eval_batch)
# Create alphabet
alphabet_kwargs = dict(self.config.alphabet)
wordlist_path = alphabet_kwargs.pop('wordlist_path')
with open(wordlist_path, 'r') as f:
self._alphabet = GreekAlphabet(wordlist_file=f, **alphabet_kwargs)
# Create region mapping
self._region_map = {'main': None, 'sub': None}
if self.config.dataset.region_main_path:
with open(self.config.dataset.region_main_path, 'r') as f:
self._region_map['main'] = load_region_maps(f)
if self.config.dataset.region_sub_path:
with open(self.config.dataset.region_sub_path, 'r') as f:
self._region_map['sub'] = load_region_maps(f)
def optimizer(self):
config_opt = self.config.optimizer
kwargs = config_opt.kwargs.to_dict()
kwargs['learning_rate'] = self._learning_rate_fn
opt = getattr(optax, config_opt.name)(**kwargs)
if hasattr(config_opt, 'clip_adaptive') and config_opt.clip_adaptive:
if config_opt.clip_level > 0.:
opt = optax.chain(adaptive_grad_clip(config_opt.clip_level), opt)
elif config_opt.clip_level > 0.:
opt = optax.chain(optax.clip_by_global_norm(config_opt.clip_level), opt)
return opt
# _ _
# | |_ _ __ __ _(_)_ __
# | __| '__/ _` | | '_ \
# | |_| | | (_| | | | | |
# \__|_| \__,_|_|_| |_|
#
def step(self, global_step, rng, **unused_args):
"""See base class."""
if self._train_input is None:
self._initialize_train(rng)
batch = next(self._train_input)
(self._params, self._opt_state, scalars) = (
self._update_func(self._params, self._opt_state, global_step, batch,
rng))
scalars = jl_utils.get_first(scalars)
return scalars
def _initialize_train(self, rng):
# Check we haven't already restored params
if self._params is None:
logging.info(
'Initializing parameters rather than restoring from checkpoint.')
batch = next(self._build_train_input())
rng = jl_utils.get_first(rng)
params_rng, dropout_rng = jax.random.split(rng)
params_rng = jl_utils.bcast_local_devices(params_rng)
dropout_rng = jl_utils.bcast_local_devices(dropout_rng)
init_net = jax.pmap(
functools.partial(self.forward.init, is_training=True))
self._params = init_net({
'params': params_rng,
'dropout': dropout_rng
},
text_char=batch['text_char'],
text_word=batch['text_word'])
init_opt = jax.pmap(self._opt_init)
self._opt_state = init_opt(self._params)
self._train_input = jl_utils.py_prefetch(self._build_train_input)
self._train_input = jl_utils.double_buffer_on_gpu(self._train_input)
def _build_train_input(self):
"""See base class."""
num_devices = jax.device_count()
global_batch_size = self.config.training.batch_size
per_device_batch_size, ragged = divmod(global_batch_size, num_devices)
logging.info(
'num_devices: %d, per_device_batch_size: %d, global_batch_size: %d',
num_devices, per_device_batch_size, global_batch_size)
if ragged:
raise ValueError(
f'Global batch size {global_batch_size} must be divisible by '
f'num devices {num_devices}')
config_dataset = self.config.dataset
with open(config_dataset.dataset_path) as dataset_file:
ds = dataloader.loader_tf(
per_device_batch_size,
config_dataset,
self._region_map,
alphabet=self._alphabet,
dataset_file=dataset_file,
mode='train')
ds = ds.batch(jax.local_device_count())
return iter(tfds.as_numpy(ds))
def _loss_fn(self, params, batch, global_step, rng):
text_char = batch['text_char']
text_word = batch['text_word']
text_unmasked = batch['text_unmasked']
text_mask = batch['text_mask']
next_sentence_mask = batch['next_sentence_mask']
next_sentence_label = batch['next_sentence_label']
subregion = batch['region_sub_id']
date_min = batch['date_min']
date_max = batch['date_max']
date_dist = batch['date_dist']
date_available = batch['date_available']
eps = 1e-6
(date_pred, subregion_logits, mask_logits, nsp_logits) = self.forward.apply(
params,
text_char=text_char,
text_word=text_word,
text_char_onehot=None,
text_word_onehot=None,
is_training=True,
rngs={'dropout': rng})
date_loss = 0.
subregion_loss = 0.
subregion_accuracy = 0.
mask_loss = 0.
mask_accuracy = 0.
nsp_loss = 0.
nsp_accuracy = 0.
# Date loss
if self.config.loss.date.enabled:
if self.config.loss.date.label_smoothing > 0:
date_dist_prob = jnp.exp(date_dist) # logprob to prob
date_dist_prob_smooth = date_dist_prob * jax.random.uniform(
rng,
shape=date_dist_prob.shape,
dtype=date_dist_prob.dtype,
minval=1 - self.config.loss.date.label_smoothing,
maxval=1 + self.config.loss.date.label_smoothing)
date_dist_prob_smooth /= date_dist_prob_smooth.sum(axis=-1)[:,
jnp.newaxis]
date_dist_prob_smooth = jnp.clip(date_dist_prob_smooth, 1e-6, 1)
date_dist = jnp.log(date_dist_prob_smooth)
date_loss = 0.
if 'l1' in self.config.loss.date.type.split('+'):
date_pred_x = jnp.arange(
self.config.dataset.date_min +
self.config.dataset.date_interval / 2,
self.config.dataset.date_max +
self.config.dataset.date_interval / 2,
self.config.dataset.date_interval).reshape(-1, 1)
date_pred_val = jnp.dot(jax.nn.softmax(date_pred, axis=-1), date_pred_x)
date_loss_l1_ = jax.vmap(date_loss_l1)(date_pred_val, date_min,
date_max, date_available)
jnp.nan_to_num(date_loss_l1_, copy=False)
date_loss += (
jnp.mean(date_loss_l1_, axis=0) * self.config.loss.date.weight_l1)
if 'dist' in self.config.loss.date.type.split('+'):
date_loss_dist_ = categorical_kl_divergence(date_dist, date_pred)
date_loss_dist_ *= date_available
jnp.nan_to_num(date_loss_dist_, copy=False)
date_loss += (
jnp.mean(date_loss_dist_, axis=0) *
self.config.loss.date.weight_dist)
date_loss *= linear_weight(global_step, self.config.loss.date.step_start,
self.config.loss.date.step_end)
# Region and subregion loss
if self.config.loss.region.enabled:
subregion_loss = jnp.mean(
cross_entropy_label_smoothing_loss(
subregion_logits,
subregion,
label_smoothing=self.config.loss.region.label_smoothing), 0)
jnp.nan_to_num(subregion_loss, copy=False)
subregion_loss *= self.config.loss.region.weight
subregion_accuracy = jnp.mean(
jnp.argmax(subregion_logits, -1) == subregion)
w = linear_weight(global_step, self.config.loss.region.step_start,
self.config.loss.region.step_end)
subregion_loss *= w
# Mask loss
if self.config.loss.mask.enabled:
mask_loss = jnp.sum(
cross_entropy_label_smoothing_loss(
mask_logits,
text_unmasked,
text_mask,
label_smoothing=self.config.loss.mask.label_smoothing), 1) # [B]
assert mask_loss.ndim == 1
jnp.nan_to_num(mask_loss, copy=False)
mask_loss = jnp.mean(mask_loss, 0) * self.config.loss.mask.weight # []
mask_all_accuracy = (jnp.argmax(mask_logits, -1) == text_unmasked).astype(
mask_logits.dtype)
mask_accuracy = jnp.divide(
jnp.sum(
jnp.multiply(mask_all_accuracy,
text_mask.astype(mask_logits.dtype))),
jnp.sum(text_mask) + eps)
mask_loss *= linear_weight(global_step, self.config.loss.mask.step_start,
self.config.loss.mask.step_end)
# NSP loss
if self.config.loss.nsp.enabled:
nsp_loss = jnp.sum(
jax.vmap(jax.vmap(cross_entropy_mask_loss))(nsp_logits,
next_sentence_label,
next_sentence_mask),
1) # [B]
assert nsp_loss.ndim == 1
jnp.nan_to_num(nsp_loss, copy=False)
nsp_loss = jnp.mean(nsp_loss, 0) * self.config.loss.nsp.weight
nsp_all_accuracy = (jnp.argmax(
nsp_logits, -1) == next_sentence_label).astype(nsp_logits.dtype)
nsp_accuracy = jnp.divide(
jnp.sum(
jnp.multiply(nsp_all_accuracy,
next_sentence_mask.astype(nsp_logits.dtype))),
jnp.sum(next_sentence_mask) + eps)
nsp_loss *= linear_weight(global_step, self.config.loss.nsp.step_start,
self.config.loss.nsp.step_end)
loss = date_loss + subregion_loss + mask_loss + nsp_loss
scaled_loss = loss / jax.device_count()
# NOTE: We use scaled_loss for grads and unscaled for logging.
return scaled_loss, (loss, date_loss, subregion_loss, subregion_accuracy,
mask_loss, mask_accuracy, nsp_loss, nsp_accuracy)
def _update_func(self, params, opt_state, global_step, batch, rng):
"""Applies an update to parameters and returns new state."""
# This function computes the gradient of the first output of loss_fn and
# passes through the other arguments unchanged.
grad_loss_fn = jax.grad(self._loss_fn, has_aux=True)
scaled_grads, (loss, date_loss, subregion_loss, subregion_accuracy,
mask_loss, mask_accuracy, nsp_loss,
nsp_accuracy) = grad_loss_fn(params, batch, global_step, rng)
scaled_grads = jax.tree_map(jnp.nan_to_num, scaled_grads)
grads = jl_utils.tree_psum(scaled_grads, axis_name='i')
# Compute and apply updates via our optimizer.
learning_rate = self._learning_rate_fn(global_step)
updates, opt_state = self._opt_update(grads, opt_state, params=params)
params = optax.apply_updates(params, updates)
# Scalars to log (note: we log the mean across all hosts/devices).
scalars = {
'loss/train': loss,
'loss/date': date_loss,
'loss/subregion': subregion_loss,
'loss/mask': mask_loss,
'loss/nsp': nsp_loss,
'accuracy/subregion': subregion_accuracy,
'accuracy/mask': mask_accuracy,
'accuracy/nsp': nsp_accuracy,
'opt/learning_rate': learning_rate,
'opt/grad_norm': optax.global_norm(grads),
'opt/param_norm': optax.global_norm(params),
}
scalars = jax.lax.pmean(scalars, axis_name='i')
return params, opt_state, scalars
# _
# _____ ____ _| |
# / _ \ \ / / _` | |
# | __/\ V / (_| | |
# \___| \_/ \__,_|_|
#
def evaluate(self, global_step, rng, **unused_kwargs):
"""See base class."""
if self._eval_input is None:
self._initialize_eval()
global_step = np.array(jl_utils.get_first(global_step))
summary, outputs = self._eval_epoch(jl_utils.get_first(rng))
for k, v in summary.items():
summary[k] = np.array(v)
score = summary['score/eval']
logging.info('[Step %d] eval_score=%.2f', global_step, score)
# Log outputs
checkpoint_dir = jl_utils.get_checkpoint_dir(FLAGS.config,
jax.process_index())
outputs_path = os.path.join(checkpoint_dir, 'best_outputs.pkl.bz2')
score_path = os.path.join(checkpoint_dir, 'best_score.txt')
model_log_path = os.path.join(checkpoint_dir, 'model_log')
best_model_log_path = os.path.join(checkpoint_dir, 'best_model_log')
# Check for preexisting outputs
best_score = None
best_step = None
if os.path.exists(score_path):
with open(score_path, 'r') as f:
tok = f.read().strip().split(' ')
best_step = int(tok[0])
best_score = float(tok[1])
# Store outputs if score is better
if best_score is None or (score > best_score and global_step > best_step):
best_score = score
with open(score_path, 'w') as f:
f.write(f'{global_step} {best_score}')
with open(outputs_path, 'wb') as f:
outputs_pkl = pickle.dumps(outputs, protocol=2)
outputs_pkl_bz2 = bz2.compress(outputs_pkl)
f.write(outputs_pkl_bz2)
if self.config.evaluation.store_model_log:
if os.path.isdir(best_model_log_path):
map(os.remove, glob.glob(best_model_log_path + '/*'))
else:
os.makedirs(best_model_log_path)
distutils.dir_util.copy_tree(model_log_path, best_model_log_path)
logging.info('[Step %d] Writing eval outputs: %s.', global_step,
outputs_path)
# Log best score
summary['score/eval_best'] = best_score
return summary
def _initialize_eval(self):
self._eval_input = self._build_eval_input()
def _build_eval_input(self):
"""Builds the evaluation input pipeline."""
config_dataset = self.config.dataset
with open(config_dataset.dataset_path) as dataset_file:
ds = dataloader.loader_tf(
self.config.evaluation.batch_size,
config_dataset,
self._region_map,
alphabet=self._alphabet,
dataset_file=dataset_file,
mode=self.config.evaluation.mode)
return tfds.as_numpy(ds)
def _eval_batch(self, params, batch, rng):
"""Evaluates a batch."""
phi_id = batch['id']
text_char = batch['text_char']
text_word = batch['text_word']
text_unmasked = batch['text_unmasked']
text_mask = batch['text_mask']
next_sentence_mask = batch['next_sentence_mask']
next_sentence_label = batch['next_sentence_label']
subregion = batch['region_sub_id']
date_min = batch['date_min']
date_max = batch['date_max']
date_dist = batch['date_dist']
date_available = batch['date_available']
# with hlogging.context() as log:
(date_pred, subregion_logits, mask_logits, nsp_logits) = self.forward.apply(
params,
text_char=text_char,
text_word=text_word,
text_char_onehot=None,
text_word_onehot=None,
is_training=False,
rngs={'dropout': rng})
# Log model weights
model_log = {}
subregion_loss = 0.
subregion_accuracy = 0.
date_loss = 0.
date_l1_loss = 0.
nsp_loss = 0.
nsp_accuracy = 0.
# eps = 1e-6
date_count = 0
mask_count = 0
nsp_count = 0
# Date loss
if self.config.loss.date.enabled:
date_pred_x = jnp.arange(
self.config.dataset.date_min + self.config.dataset.date_interval / 2,
self.config.dataset.date_max + self.config.dataset.date_interval / 2,
self.config.dataset.date_interval).reshape(-1, 1)
date_pred_val = jnp.dot(jax.nn.softmax(date_pred, axis=-1), date_pred_x)
date_l1_loss = jnp.sum(
jax.vmap(date_loss_l1)(date_pred_val, date_min, date_max,
date_available),
axis=0)
if 'l1' in self.config.loss.date.type.split('+'):
date_loss += date_l1_loss * self.config.loss.date.weight_l1
if 'dist' in self.config.loss.date.type.split('+'):
date_loss_dist_ = categorical_kl_divergence(date_dist, date_pred)
date_loss_dist_ *= date_available
date_loss += (
jnp.sum(date_loss_dist_, axis=0) *
self.config.loss.date.weight_dist)
date_count = jnp.sum(date_available)
# Region and subregion loss
if self.config.loss.region.enabled:
subregion_loss = jnp.sum(
cross_entropy_loss(subregion_logits, subregion), 0)
subregion_loss *= self.config.loss.region.weight
subregion_accuracy = jnp.mean(
jnp.argmax(subregion_logits, -1) == subregion)
# Mask loss
if self.config.loss.mask.enabled:
mask_loss = jnp.sum(
cross_entropy_label_smoothing_loss(
mask_logits, text_unmasked, text_mask, label_smoothing=0),
1) # [B]
# mask_loss /= jnp.sum(text_mask, axis=1) + eps # [B]
assert mask_loss.ndim == 1
mask_loss = jnp.mean(mask_loss, 0) * self.config.loss.mask.weight # []
mask_all_accuracy = (jnp.argmax(mask_logits, -1) == text_unmasked).astype(
mask_logits.dtype)
mask_accuracy = jnp.sum(
jnp.multiply(mask_all_accuracy, text_mask.astype(mask_logits.dtype)))
mask_count = jnp.sum(text_mask)
# NSP loss
if self.config.loss.nsp.enabled:
nsp_loss = jnp.sum(
jax.vmap(jax.vmap(cross_entropy_mask_loss))(nsp_logits,
next_sentence_label,
next_sentence_mask),
1) # [B]
assert nsp_loss.ndim == 1
nsp_loss = jnp.sum(nsp_loss, 0) * self.config.loss.nsp.weight
nsp_all_accuracy = (jnp.argmax(
nsp_logits, -1) == next_sentence_label).astype(nsp_logits.dtype)
nsp_accuracy = jnp.sum(
jnp.multiply(nsp_all_accuracy,
next_sentence_mask.astype(nsp_logits.dtype)))
nsp_count = jnp.sum(next_sentence_mask)
# Outputs
scalars = {
'score/eval':
(mask_accuracy + subregion_accuracy - date_l1_loss * 0.01),
'loss/eval': mask_loss + date_loss + subregion_loss,
'loss/date': date_loss,
'loss/date_l1': date_l1_loss,
'loss/subregion': subregion_loss,
'loss/mask': mask_loss,
'loss/nsp': nsp_loss,
'count/date': date_count,
'count/nsp': nsp_count,
'count/mask': mask_count,
'accuracy/subregion': subregion_accuracy,
'accuracy/mask': mask_accuracy,
'accuracy/nsp': nsp_accuracy,
}
outputs = {
'outputs/id': phi_id,
'outputs/date_pred': date_pred.astype('float16'),
'outputs/date_min': date_min,
'outputs/date_max': date_max,
'outputs/date_dist': date_dist.astype('float16'),
'outputs/date_available': date_available,
'outputs/subregion_logits': subregion_logits.astype('float16'),
'outputs/subregion': subregion,
}
return scalars, outputs, model_log
def _eval_epoch(self, rng):
"""Evaluates an epoch."""
summary = {}
outputs = {}
total_num_sequences = 0
# Prepare directories for storing model log
checkpoint_dir = jl_utils.get_checkpoint_dir(FLAGS.config,
jax.process_index())
model_log_path = os.path.join(checkpoint_dir, 'model_log')
if self.config.evaluation.store_model_log:
if os.path.isdir(model_log_path):
map(os.remove, glob.glob(model_log_path + '/*'))
else:
os.makedirs(model_log_path)
# Checkpoints broadcast for each local device
params = jl_utils.get_first(self._params)
# Model log buffer initialisation
model_log_buffer = []
def _flush_model_log_buffer(model_log_buffer):
"""Writes model log to bz2 pickle files."""
while model_log_buffer:
model_log_batch_path, model_log_pkl_bz2 = model_log_buffer.pop(0)
with open(model_log_batch_path, 'wb') as f:
f.write(model_log_pkl_bz2)
# Converting to numpy here allows us to reset the generator
for batch in self._eval_input:
# Make sure that the input has batch_dim=1
assert batch['text_char'].shape[0] == 1
summary_batch, outputs_batch, model_log_batch = self._eval_batch(
params, batch, rng)
# Append batch values to dictionary
for k, v in summary_batch.items():
summary[k] = summary.get(k, 0) + v
for k, v in outputs_batch.items():
outputs.setdefault(k, []).append(v)
total_num_sequences += self.config.evaluation.batch_size
# Store model log per batch
if self.config.evaluation.store_model_log:
# Append to buffer
model_log_batch_path = os.path.join(
model_log_path,
str(outputs_batch['outputs/id'][0]) + '.pkl.bz2')
model_log_pkl = pickle.dumps(model_log_batch, protocol=2)
model_log_pkl_bz2 = bz2.compress(model_log_pkl)
model_log_buffer += [(model_log_batch_path, model_log_pkl_bz2)]
# Flush model log buffer
if (len(model_log_buffer) %
self.config.evaluation.store_model_log_steps == 0):
_flush_model_log_buffer(model_log_buffer)
# Flush remaining model log buffer
if self.config.evaluation.store_model_log:
_flush_model_log_buffer(model_log_buffer)
# Normalise and concatenate
summary['loss/date'] /= summary['count/date']
summary['loss/date_l1'] /= summary['count/date']
summary['loss/mask'] /= summary['count/mask']
summary['accuracy/mask'] /= summary['count/mask']
summary['loss/nsp'] /= summary['count/nsp']
summary['accuracy/nsp'] /= summary['count/nsp']
summary['loss/subregion'] /= total_num_sequences
summary['accuracy/subregion'] /= total_num_sequences
summary['score/eval'] = (
summary['accuracy/mask'] + summary['accuracy/subregion'] -
summary['loss/date_l1'] * 0.01)
summary['loss/eval'] = (
summary['loss/mask'] + summary['loss/date'] + summary['loss/subregion'])
for k, v in outputs.items():
outputs[k] = np.concatenate(v, axis=0)
return summary, outputs
if __name__ == '__main__':
flags.mark_flag_as_required('config')
app.run(functools.partial(platform.main, Experiment))
| ithaca-main | train/experiment.py |
# Copyright 2021 the Ithaca Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataloader functions."""
import json
import random
import re
from absl import logging
from ithaca.util.dates import date_range_to_dist
from ithaca.util.text import random_mask_span
from ithaca.util.text import random_sentence_swap
from ithaca.util.text import random_word_delete
from ithaca.util.text import random_word_swap
from ithaca.util.text import text_to_idx
from ithaca.util.text import text_to_word_idx
import numpy as np
import tensorflow.compat.v1 as tf
def generate_sample(config, alphabet, region_map, sample, mode='train'):
"""Generates a new TF dataset sample."""
# Get text
text = sample['text']
# Next sentence prediction
sentences = text.split('.')
# Strip spaces
sentences = list(map(str.strip, sentences))
# Filter blank sentences
sentences = list(filter(None, sentences))
# Generate indexes
sentence_idx = np.arange(len(sentences), dtype=np.int32)
# Random sentence shuffling
if (mode == 'train' and config.random_sentence_swap > 0):
# Shuffle indexes
sentence_idx = random_sentence_swap(sentence_idx,
config.random_sentence_swap)
# Reshuffle sentences
sentences = np.array(sentences)[sentence_idx].tolist()
# Random word swap
if mode == 'train' and config.random_word_swap > 0:
sentences = [
random_word_swap(s, config.random_word_swap) for s in sentences
]
# Random word delete
if mode == 'train' and config.random_word_delete > 0:
sentences = [
random_word_delete(s, config.random_word_delete) for s in sentences
]
# Join text
text = '. '.join(sentences) + '.'
# Generate mask and labels
next_sentence_dots = np.array(
[pos for pos, char in enumerate(text[:-1]) if char == '.'],
dtype=np.int32)
next_sentence_mask = np.zeros(len(text), dtype=bool)
next_sentence_label = np.zeros(len(text), dtype=np.int32)
if sentence_idx.size > 1:
next_sentence_mask[next_sentence_dots] = True
next_sentence_label[next_sentence_dots] = (
sentence_idx[:-1] == (sentence_idx[1:] - 1))
# Computer start for prepending start of sentence character
start_sample_idx = int(config.prepend_sos)
if (mode in ['train', 'valid'] and config.context_char_random and
len(text) >= config.context_char_min):
# During training pick random context length
context_char_len = np.random.randint(
config.context_char_min,
min(len(text), config.context_char_max - start_sample_idx) + 1)
start_idx = 0
if context_char_len < len(text):
start_idx = np.random.randint(0, len(text) - context_char_len + 1)
text = text[start_idx:start_idx + context_char_len - start_sample_idx]
next_sentence_mask = next_sentence_mask[start_idx:start_idx +
context_char_len - start_sample_idx]
next_sentence_label = next_sentence_label[start_idx:start_idx +
context_char_len -
start_sample_idx]
elif (config.context_char_max and len(text) >
(config.context_char_max - start_sample_idx)):
# Clip text by maximum length
start_idx = np.random.randint(
0,
len(text) - (config.context_char_max - start_sample_idx) + 1)
text = text[start_idx:start_idx + config.context_char_max -
start_sample_idx]
next_sentence_mask = next_sentence_mask[start_idx:start_idx +
config.context_char_max -
start_sample_idx]
next_sentence_label = next_sentence_label[start_idx:start_idx +
config.context_char_max -
start_sample_idx]
# Prepend start of sentence character
if config.prepend_sos:
text = alphabet.sos + text
next_sentence_mask = [False] + next_sentence_mask
next_sentence_label = [0] + next_sentence_label
# Unmasked text
text_unmasked_idx = text_to_idx(text, alphabet)
text_unmasked_word_idx = text_to_word_idx(text, alphabet)
# Mask text
text_mask = np.zeros(len(text), dtype=bool)
if mode in ['train', 'valid']:
text_list = list(text)
# Non missing idx (avoid removing start of sentence character)
non_missing_idx = []
for i in range(start_sample_idx, len(text_list)):
if text_list[i] not in [alphabet.missing] + alphabet.punctuation:
non_missing_idx.append(i)
# Skip sample if there are no usable characters
if not non_missing_idx:
return
char_mask_idx = []
if config.char_mask_rate_max > 0.:
# Compute rate
char_mask_rate = np.random.uniform(config.char_mask_rate_min,
config.char_mask_rate_max)
# Fix masking in valid mode for comparing experiments
span_mask_geometric_p = config.span_mask_geometric_p
mask_num_total = int(char_mask_rate * len(non_missing_idx))
mask_num_span = int(mask_num_total * config.span_mask_ratio)
if mode == 'valid' and config.span_mask_eval_len > 0:
span_mask_geometric_p = None
mask_num_total = min(config.span_mask_eval_len, len(non_missing_idx))
mask_num_span = mask_num_total
mask_num_char = mask_num_total - mask_num_span
# Mask random indices
if mask_num_char > 0:
char_mask_idx = np.random.choice(
non_missing_idx, mask_num_char, replace=False).tolist()
# Mask random spans
if mask_num_span > 0:
count_span = 0
span_mask_idx = []
while (len(span_mask_idx) < mask_num_span and count_span < 10000):
span_mask_idx.extend(
random_mask_span(
text,
geometric_p=span_mask_geometric_p,
limit_chars=mask_num_span - len(span_mask_idx)))
count_span += 1
char_mask_idx.extend(span_mask_idx)
# Mask text
for idx in set(char_mask_idx):
text_mask[idx] = True
text_list[idx] = alphabet.missing
text = ''.join(text_list)
# Text missing mask
text_missing_mask = np.array(list(text)) == alphabet.missing
# Convert to indices
text_idx = text_to_idx(text, alphabet)
text_idx_len = len(text_idx)
text_word_idx = text_to_word_idx(text, alphabet)
text_word_idx_len = len(text_word_idx)
assert text_idx_len == text_word_idx_len
# PHI id
phi_id = int(sample['id'])
# Map region ids to local ids
region_main_id = region_map['main']['ids_inv'][int(sample['region_main_id'])]
region_sub_id = region_map['sub']['ids_inv'][int(sample['region_sub_id'])]
# Dates
if (sample['date_min'] and sample['date_max'] and
int(sample['date_min']) <= int(sample['date_max']) and
int(sample['date_min']) >= config.date_min and
int(sample['date_max']) < config.date_max):
date_available = True
date_min = float(sample['date_min'])
date_max = float(sample['date_max'])
date_dist = date_range_to_dist(date_min, date_max, config.date_min,
config.date_max, config.date_interval,
config.date_bins)
else:
date_available = False
date_min = 0.
date_max = 0.
date_dist = date_range_to_dist(None, None, config.date_min, config.date_max,
config.date_interval, config.date_bins)
return {
'id': phi_id, # 'text_str': text,
'text_char': text_idx,
'text_mask': text_mask,
'text_missing_mask': text_missing_mask,
'text_word': text_word_idx,
'text_len': text_idx_len,
'text_unmasked': text_unmasked_idx,
'text_unmasked_word': text_unmasked_word_idx,
'next_sentence_mask': next_sentence_mask,
'next_sentence_label': next_sentence_label,
'region_main_id': region_main_id,
'region_sub_id': region_sub_id,
'date_available': date_available,
'date_min': date_min,
'date_max': date_max,
'date_dist': date_dist,
}
def loader_tf(batch_size,
config,
region_map,
alphabet=None,
dataset_file=None,
mode='train'):
"""TF dataloader."""
# Load dataset
dataset_tmp = {int(d['id']): d for d in json.load(dataset_file)}
logging.info('Loaded dataset inscriptions: %d.', len(dataset_tmp))
# Check if white_list enabled
if hasattr(config, 'white_list') and config.white_list:
dataset = []
for d in dataset_tmp.values():
if int(d['id']) in config.white_list:
dataset.append(d)
del dataset_tmp
else:
# Find duplicate inscriptions
rev_dataset = {}
black_list = set()
if hasattr(config, 'black_list') and config.black_list:
logging.info('Ignore list inscriptions: %d.', len(config.black_list))
black_list.update(config.black_list)
for key in sorted(dataset_tmp.keys()):
value = dataset_tmp[key]
rev_dataset.setdefault(value['text'], set()).add(key)
if len(rev_dataset[value['text']]) > 1:
black_list.add(int(value['id']))
del rev_dataset
logging.info('Inscriptions filtered: %d.', len(black_list))
# Create deduplicated dataset
dataset = []
for d in dataset_tmp.values():
if int(d['id']) not in black_list:
dataset.append(d)
del dataset_tmp
del black_list
logging.info('Final dataset inscriptions: %d.', len(dataset))
# Breaks dataset correlated order.
random.shuffle(dataset)
# Sample generator function
def generate_samples():
dataset_idxs = list(range(len(dataset)))
random.shuffle(dataset_idxs)
for dataset_i in dataset_idxs:
sample = dataset[dataset_i]
# Skip if region does not exist in map
if (int(sample['region_main_id']) not in region_map['main']['ids_inv'] or
int(sample['region_sub_id']) not in region_map['sub']['ids_inv']):
continue
# Replace guess signs with missing chars
if hasattr(config, 'char_use_guess') and not config.char_use_guess:
sample['text'] = re.sub(r'\[(.*?)\]', lambda m: '-' * len(m.group(1)),
sample['text'])
sample['text'] = sample['text'].replace(alphabet.sog,
'').replace(alphabet.eog, '')
# Filter by text length
if len(sample['text'].replace(alphabet.missing,
'')) < config.context_char_min:
continue
# Last digit 3 -> test, 4 -> valid, the rest are the training set
sample_id = int(sample['id'])
if ((sample_id % 10 == 3 and mode == 'test') or
(sample_id % 10 == 4 and mode == 'valid') or
(sample_id % 10 != 3 and sample_id % 10 != 4 and mode == 'train') or
(hasattr(config, 'white_list') and config.white_list)):
s = generate_sample(config, alphabet, region_map, sample, mode=mode)
if s:
yield s
# Create dataset from generator.
with tf.device('/cpu:0'):
ds = tf.data.Dataset.from_generator(
generate_samples,
output_signature={
'id':
tf.TensorSpec(shape=(), dtype=tf.int32),
'text_char':
tf.TensorSpec(shape=(None), dtype=tf.int32),
'text_mask':
tf.TensorSpec(shape=(None), dtype=tf.bool),
'text_missing_mask':
tf.TensorSpec(shape=(None), dtype=tf.bool),
'text_word':
tf.TensorSpec(shape=(None), dtype=tf.int32),
'text_unmasked':
tf.TensorSpec(shape=(None), dtype=tf.int32),
'text_unmasked_word':
tf.TensorSpec(shape=(None), dtype=tf.int32),
'next_sentence_mask':
tf.TensorSpec(shape=(None), dtype=tf.bool),
'next_sentence_label':
tf.TensorSpec(shape=(None), dtype=tf.int32),
'text_len':
tf.TensorSpec(shape=(), dtype=tf.int32),
'region_main_id':
tf.TensorSpec(shape=(), dtype=tf.int32),
'region_sub_id':
tf.TensorSpec(shape=(), dtype=tf.int32),
'date_available':
tf.TensorSpec(shape=(), dtype=tf.bool),
'date_min':
tf.TensorSpec(shape=(), dtype=tf.float32),
'date_max':
tf.TensorSpec(shape=(), dtype=tf.float32),
'date_dist':
tf.TensorSpec(shape=(config.date_bins), dtype=tf.float32),
})
# Shuffle and repeat.
if mode == 'train':
if config.repeat_train == -1:
ds = ds.repeat()
elif config.repeat_train >= 1:
ds = ds.repeat(config.repeat_train)
else:
if config.repeat_eval == -1:
ds = ds.repeat()
elif config.repeat_eval >= 1:
ds = ds.repeat(config.repeat_eval)
# Batch and pad.
max_len = config.context_char_max
ds = ds.padded_batch(
batch_size,
padded_shapes={
'id': [],
'text_char': [max_len],
'text_mask': [max_len],
'text_missing_mask': [max_len],
'text_word': [max_len],
'text_unmasked': [max_len],
'text_unmasked_word': [max_len],
'next_sentence_mask': [max_len],
'next_sentence_label': [max_len],
'text_len': [],
'region_main_id': [],
'region_sub_id': [],
'date_available': [],
'date_min': [],
'date_max': [],
'date_dist': [config.date_bins]
},
padding_values={
'id': 0,
'text_char': alphabet.pad_idx,
'text_mask': False,
'text_missing_mask': True,
'text_word': alphabet.pad_idx,
'text_unmasked': alphabet.pad_idx,
'text_unmasked_word': alphabet.pad_idx,
'next_sentence_mask': False,
'next_sentence_label': 0,
'text_len': 0,
'region_main_id': 0,
'region_sub_id': 0,
'date_available': False,
'date_min': 0.,
'date_max': 0.,
'date_dist': 0.
})
return ds
| ithaca-main | train/dataloader.py |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""When building as a dynamic .so provide a hook to load it at runtime."""
import tensorflow.compat.v1 as tf
# TODO: Store the returned object so application code can use it.
tf.load_op_library(
'multidim_image_augmentation/python/ops/_augmentation_ops.so')
| multidim-image-augmentation-master | multidim_image_augmentation/__init__.py |
# Lint as: python2, python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for deformation augmentation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from multidim_image_augmentation import augmentation_ops
def vectorized_random_uniform(minvals, maxvals, name=None):
"""creates a tensor with uniform random values.
Args:
minvals: 1-D Tensor with minimum values.
maxvals: 1-D Tensor with maximum values.
name: (optional) Name for the operation.
Returns:
1-D Tensor with uniform random values.
"""
with tf.variable_scope(name, "vectorized_random_uniform", [minvals, maxvals]):
ranges = tf.subtract(maxvals, minvals, name="ranges")
samples = tf.random.uniform(
ranges.shape, dtype=ranges.dtype, name="samples")
samples_scaled = tf.multiply(ranges, samples, name="samples_scaled")
samples_scaled_offset = tf.add(samples_scaled,
minvals,
name="samples_scaled_offset")
return samples_scaled_offset
def create_centered_identity_transformation_field(shape, spacings):
"""Create 2D or 3D centered identity transformation field.
Args:
shape: 2- or 3-element list. The shape of the transformation field.
spacings: 2- or 3-element list. The spacings of the transformation field.
Returns:
2D case: 3-D Tensor (x0, x1, comp) describing a 2D vector field
3D case: 4-D Tensor (x0, x1, x2, comp) describing a 3D vector field
"""
coords = []
for i, size in enumerate(shape):
spacing = spacings[i]
coords.append(tf.linspace(
-(size - 1) / 2 * spacing,
(size - 1) / 2 * spacing,
size))
permutation = np.roll(np.arange(len(coords) + 1), -1)
return tf.transpose(tf.meshgrid(*coords, indexing="ij"), permutation)
def create_control_grid_for_cubic_interp(transformed_image_shape,
transformed_image_spacings_um,
control_grid_spacings_pix):
"""Create a control grid with optimal size for cubic interpolation.
The control grid will have two extra points in every direction to allow an
interpolation without border artefacts.
Args:
transformed_image_shape: 2- or 3-element list describing the shape of the
target image.
transformed_image_spacings_um: 2- or 3-element tensor describing the spacing
of the target image.
control_grid_spacings_pix: 2- or 3-element list describing the control grid
spacings.
Returns:
2D case: 3-D Tensor (x0, x1, comp) describing a 2D vector field.
3D case: 4-D Tensor (x0, x1, x2, comp) describing a 3D vector field.
"""
grid_shape = np.zeros(len(transformed_image_shape), dtype=int)
for comp in range(len(transformed_image_shape)):
spacing_pix = float(control_grid_spacings_pix[comp])
num_elem = float(transformed_image_shape[comp])
if num_elem % 2 == 0:
grid_shape[comp] = np.ceil((num_elem - 1) / (2 * spacing_pix) +
0.5) * 2 + 2
else:
grid_shape[comp] = np.ceil((num_elem - 1) / (2 * spacing_pix)) * 2 + 3
control_grid_spacings_um = tf.multiply(
tf.constant(control_grid_spacings_pix, dtype=tf.float32),
transformed_image_spacings_um)
control_grid = create_centered_identity_transformation_field(
grid_shape, control_grid_spacings_um)
control_grid.set_shape(np.append(grid_shape, len(control_grid_spacings_pix)))
return control_grid
def create_2x2_rotation_matrix(radians):
"""Creates a 2D rotation matrix.
For an angle a this is
[[cos(a), -sin(a)],
[sin(a), cos(a)]]
Args:
radians: rotation angle in radians.
Returns:
2-D Tensor with 2x2 elements, the rotation matrix.
"""
rotation = [[tf.cos(radians), -tf.sin(radians)],
[tf.sin(radians), tf.cos(radians)]]
rotation = tf.convert_to_tensor(rotation, name="rotation_matrix")
return rotation
def create_2x2_shearing_matrix(shearing_coefs):
"""Creates a 2D shearing matrix.
Args:
shearing_coefs: 2-element list with the shearing coefficients
(off-diagonal elements of the matrix: s01, s10) to create the matrix
[[ 1 , s01],
[s10, 1 ]]
Returns:
2-D Tensor with 2x2 elements, the shearing matrix
"""
shearing = [[1, shearing_coefs[0]], [shearing_coefs[1], 1]]
shearing = tf.convert_to_tensor(shearing, name="shearing_matrix")
return shearing
def create_2d_deformation_field(
raw_image_center_pos_pix, raw_image_element_size_um,
net_input_spatial_shape, net_input_element_size_um,
control_grid_spacings_pix, deformations_magnitudes_um, rotation_angle,
scale_factors, mirror_factors, shearing_coefs, cropping_offset_pix):
"""Creates a 2D deformation field.
Creates a dense 2D deformation field for affine and elastic deformations. The
created 2D vector field (represented as a 3-D Tensor with (x0, x1, comp))
has the same spatial shape as the output (net_input) image and contains the
absolute positions of the corresponding pixels in the input (raw) image. The
process of creating the deformation field has four steps:
1. Setup a grid of control points.
2. Add a random offset to each control point drawn from a normal
distribution to model the random elastic deformation.
3. Apply the affine transformation to the control points.
4. Compute a dense transformation field using cubic bspline interpolation.
A more detailed description of the process can be found in the doc directory.
Args:
raw_image_center_pos_pix: 1-D Tensor with 2 elements of type tf.float32. The
position of the center of the raw image in pixels from the upper, left
corner.
raw_image_element_size_um: 1-D Tensor with 2 elements of type tf.float32.
The pixel spacing (in micrometers) of the raw image.
net_input_spatial_shape: List with 2 elements. The shape of the image that
will be fed into the network (excluding channel dimension).
net_input_element_size_um: Tensor with 2 elements. The pixel spacing (in
micrometers) of the image that will be fed into the network.
control_grid_spacings_pix: List with 2 elements. The control grid spacing in
pixels.
deformations_magnitudes_um: 1-D Tensor with 2 elements. The magnitudes for
the random deformations. Will set the standard deviation (in micrometers)
of a random normal distribution from which deformations will be generated.
rotation_angle: Rotation angle in radians as a float (or single element
Tensor of floating point type). In the absence of mirroring, a positive
angle produces a counter-clockwise rotation of image contents.
scale_factors: 1-D Tensor with 2 elements of type tf.float32. Scale factors
in x0, x1 directions.
mirror_factors: 1-D Tensor with 2 elements. Mirror factors in x0, x1
directions. Each factor should be 1 or -1.
shearing_coefs: 1-D Tensor with 2 elements of type tf.float32. The shearing
coefficients (s01, s10) to create the shearing matrix:
[[ 1 , s01], [s10, 1]].
cropping_offset_pix: 1-D Tensor with 2 elements of type tf.float32. Cropping
position (center of the cropped patch in the raw image) in pixels relative
to the image origin (the origin is specified above as
raw_image_center_pos_pix).
Returns:
3-D Tensor (x0, x1, comp) containing a 2D vector field.
"""
# Set up the centered control grid for identity transform in real world
# coordinates.
control_grid = create_control_grid_for_cubic_interp(
transformed_image_shape=net_input_spatial_shape,
transformed_image_spacings_um=net_input_element_size_um,
control_grid_spacings_pix=control_grid_spacings_pix)
# Add random deformation.
control_grid += deformations_magnitudes_um * tf.random.normal(
shape=control_grid.shape)
# Apply affine transformation and transform units to raw image pixels.
scale_to_pix = 1. / raw_image_element_size_um
affine = tf.matmul(
create_2x2_rotation_matrix(rotation_angle),
tf.diag(scale_factors * tf.to_float(mirror_factors) * scale_to_pix))
affine_shearing = tf.matmul(affine,
create_2x2_shearing_matrix(shearing_coefs))
control_grid = tf.reshape(
tf.matmul(tf.reshape(control_grid, [-1, 2]), affine_shearing),
control_grid.get_shape().as_list())
# Translate to cropping position.
control_grid += raw_image_center_pos_pix + cropping_offset_pix
# Create the dense deformation field for the image.
dense_deformation_field = augmentation_ops.cubic_interpolation2d(
control_grid, control_grid_spacings_pix, net_input_spatial_shape)
return dense_deformation_field
def create_3x3_rotation_matrix(radians):
"""Creates a 3D rotation matrix.
Args:
radians: 1-D Tensor with 3 elements, (a0, a1, a2) with the 3 rotation
angles in radians, where a0 is the rotation around the x0 axis, etc.
Returns:
2-D Tensor with 3x3 elements, the rotation matrix.
"""
with tf.variable_scope("rotation_dim_0"):
rotation_dim_0 = [[1.0, 0.0, 0.0],
[0.0, tf.cos(radians[0]), -tf.sin(radians[0])],
[0.0, tf.sin(radians[0]), tf.cos(radians[0])]]
rotation_dim_0 = tf.convert_to_tensor(
rotation_dim_0, name="rotation_matrix")
with tf.variable_scope("rotation_dim_1"):
rotation_dim_1 = [[tf.cos(radians[1]), 0.0, tf.sin(radians[1])],
[0.0, 1.0, 0.0],
[-tf.sin(radians[1]), 0.0, tf.cos(radians[1])]]
rotation_dim_1 = tf.convert_to_tensor(
rotation_dim_1, name="rotation_matrix")
with tf.variable_scope("rotation_dim_2"):
rotation_dim_2 = [[tf.cos(radians[2]), -tf.sin(radians[2]), 0.0],
[tf.sin(radians[2]), tf.cos(radians[2]), 0.0],
[0.0, 0.0, 1.0]]
rotation_dim_2 = tf.convert_to_tensor(
rotation_dim_2, name="rotation_matrix")
with tf.variable_scope("rotation"):
rotation = tf.matmul(rotation_dim_0, rotation_dim_1)
rotation = tf.matmul(rotation, rotation_dim_2)
return rotation
def create_3x3_shearing_matrix(shearing_coefs):
"""Creates a 3D shearing matrix.
Args:
shearing_coefs: 6-element list with the shearing coefficients
(off-diagonal elements of the matrix: s01, s02, s10, s12, s20, s21) to
create the matrix
[[ 1 , s01, s02],
[s10, 1 , s12],
[s20, s21, 1 ]]
Returns:
2-D Tensor with 3x3 elements, the shearing matrix.
"""
shearing = [[1., shearing_coefs[0], shearing_coefs[1]],
[shearing_coefs[2], 1., shearing_coefs[3]],
[shearing_coefs[4], shearing_coefs[5], 1.]]
shearing = tf.convert_to_tensor(shearing, name="shearing_matrix")
return shearing
def create_3d_deformation_field(
raw_image_center_pos_pix, raw_image_element_size_um,
net_input_spatial_shape, net_input_element_size_um,
control_grid_spacings_pix, deformations_magnitudes_um, rotation_angles,
scale_factors, mirror_factors, shearing_coefs, cropping_offset_pix):
"""Create a 3D deformation field.
Creates a dense 3D deformation field for affine and elastic deformations. The
created 3D vector field (represented as a 4-D Tensor with (x0, x1, x2, comp))
has the same spatial shape as the output image and contains the absolute
position of the corresponding voxel in the input (raw) image. The process of
creating the deformation field has four steps:
1. Setup a grid of control points
2. Add a random offset to each control point drawn from a normal
distribution to model the random elastic deformation
3. Apply the affine transformation to the control points
4. Compute a dense transformation field using cubic bspline interpolation
A more detailled description of the process can be found in the doc
directory.
Args:
raw_image_center_pos_pix: 1-D Tensor with 3 elements. The position of the
origin in the raw image in pixels from the upper, left, front corner.
raw_image_element_size_um: 1-D Tensor with 3 elements. The pixel spacing
(in micrometers) of the raw image.
net_input_spatial_shape: 1-D Tensor with 3 elements. The shape of the
image that will be fed into the network.
net_input_element_size_um: 1-D Tensor with 3 elements. The pixel spacing
(in micrometers) of the image that will be fed into the network.
control_grid_spacings_pix: 1-D Tensor with 3 elements. The control grid
spacing in pixels.
deformations_magnitudes_um: 1-D Tensor with 3 elements. The magnitudes
for the random deformations, the standard deviation (in micrometers) of a
random normal distribution.
rotation_angles: 1-D Tensor with 3 elements, (a0, a1, a2) with the 3
rotation angles in radians, where a0 is the rotation around the x0 axis,
etc.
scale_factors: 1-D Tensor with 3 elements. Scale factors in x0, x1, and x2
directions.
mirror_factors: 1-D Tensor with 3 elements. Mirror factors in x0, x1, and
x2 direction. Each factor should be 1 or -1.
shearing_coefs: 1-D Tensor with 6 elements. The shearing coefficients
(off-diagonal elements of the matrix: s01, s02, s10, s12, s20, s21) to
create the shearing matrix
[[ 1 , s01, s02],
[s10, 1 , s12],
[s20, s21, 1 ]]
cropping_offset_pix: 1-D Tensor with 3 elements. Cropping position (center
of the cropped patch in the raw image) in pixels relative to the image
origin (the origin is specified above as raw_image_center_pos_pix).
Returns:
4-D Tensor (x0, x1, x2, comp) describing a 3D vector field.
"""
# Set up the centered control grid for identity transform in real world
# coordinates.
control_grid = create_control_grid_for_cubic_interp(
net_input_spatial_shape, net_input_element_size_um,
control_grid_spacings_pix)
# Add random deformation.
control_grid += deformations_magnitudes_um * tf.random.normal(
shape=control_grid.shape)
# Apply affine transformation and transform units to raw image pixels.
scale_to_pix = 1. / raw_image_element_size_um
affine = tf.matmul(
create_3x3_rotation_matrix(rotation_angles),
tf.diag(scale_factors * mirror_factors * scale_to_pix))
affine_shearing = tf.matmul(
affine, create_3x3_shearing_matrix(shearing_coefs))
control_grid = tf.reshape(
tf.matmul(tf.reshape(control_grid, [-1, 3]), affine_shearing),
control_grid.shape)
# Translate to cropping position.
control_grid += raw_image_center_pos_pix + cropping_offset_pix
# Create the dense deformation field for the image.
dense_deformation_field = augmentation_ops.cubic_interpolation3d(
control_grid, control_grid_spacings_pix, net_input_spatial_shape)
return dense_deformation_field
| multidim-image-augmentation-master | multidim_image_augmentation/deformation_utils.py |
# Lint as: python2, python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from multidim_image_augmentation import deformation_utils
_ARRAY_COMPARE_TOLERANCE = 1e-5
class ControlGridTest(tf.test.TestCase):
def test_create_control_grid_for_cubic_interp_2d(self):
with self.session():
grid = deformation_utils.create_control_grid_for_cubic_interp(
transformed_image_shape=[20, 30],
transformed_image_spacings_um=tf.constant([0.1, 0.1]),
control_grid_spacings_pix=[9, 9])
self.assertAllEqual([6, 8, 2], grid.eval().shape)
def test_create_control_grid_for_cubic_interp_3d(self):
with self.session():
grid = deformation_utils.create_control_grid_for_cubic_interp(
transformed_image_shape=[10, 20, 30],
transformed_image_spacings_um=tf.constant([0.1, 0.1, 0.1]),
control_grid_spacings_pix=[9, 9, 9])
self.assertAllEqual([4, 6, 8, 3], grid.eval().shape)
def test_create_control_grid_for_cubic_interp_3d_single_slice(self):
with self.session():
grid = deformation_utils.create_control_grid_for_cubic_interp(
transformed_image_shape=[1, 20, 30],
transformed_image_spacings_um=tf.constant([0.1, 0.1, 0.1]),
control_grid_spacings_pix=[1, 9, 9])
self.assertAllEqual([3, 6, 8, 3], grid.eval().shape)
class Create2DDeformationFieldTest(tf.test.TestCase):
def test_applies_cropping_offset(self):
deformation_field = deformation_utils.create_2d_deformation_field(
raw_image_center_pos_pix=tf.constant([1.0, 1.0]),
raw_image_element_size_um=tf.constant([1.0, 1.0]),
net_input_spatial_shape=[3, 3],
net_input_element_size_um=tf.constant([1.0, 1.0]),
control_grid_spacings_pix=[2.0, 2.0],
deformations_magnitudes_um=tf.constant([0.0, 0.0]),
rotation_angle=tf.constant(0.0),
scale_factors=tf.constant([1.0, 1.0]),
mirror_factors=tf.constant([1, 1]),
shearing_coefs=tf.constant([0.0, 0.0]),
cropping_offset_pix=tf.constant([2.0, 3.0]))
expected_output = np.array([[[2, 3], [2, 4], [2, 5]],
[[3, 3], [3, 4], [3, 5]],
[[4, 3], [4, 4], [4, 5]]])
with self.session() as sess:
np.testing.assert_allclose(
expected_output,
sess.run(deformation_field),
atol=_ARRAY_COMPARE_TOLERANCE)
def test_applies_rotation(self):
deformation_field = deformation_utils.create_2d_deformation_field(
raw_image_center_pos_pix=tf.constant([1.0, 1.0]),
raw_image_element_size_um=tf.constant([1.0, 1.0]),
net_input_spatial_shape=[3, 3],
net_input_element_size_um=tf.constant([1.0, 1.0]),
control_grid_spacings_pix=[2.0, 2.0],
deformations_magnitudes_um=tf.constant([0.0, 0.0]),
rotation_angle=tf.constant(np.pi / 4.),
scale_factors=tf.constant([1.0, 1.0]),
mirror_factors=tf.constant([1, 1]),
shearing_coefs=tf.constant([0.0, 0.0]),
cropping_offset_pix=tf.constant([0.0, 0.0]))
expected_output = np.array([[[-0.4142135624, 1.],
[0.2928932188, 1.7071067812],
[1., 2.4142135624]],
[[0.2928932188, 0.2928932188],
[1., 1.],
[1.7071067812, 1.7071067812]],
[[1., -0.4142135624],
[1.7071067812, 0.2928932188],
[2.4142135624, 1]]])
with self.session() as sess:
np.testing.assert_allclose(
expected_output,
sess.run(deformation_field),
atol=_ARRAY_COMPARE_TOLERANCE)
def test_applies_shear(self):
deformation_field = deformation_utils.create_2d_deformation_field(
raw_image_center_pos_pix=tf.constant([1.0, 1.0]),
raw_image_element_size_um=tf.constant([1.0, 1.0]),
net_input_spatial_shape=[3, 3],
net_input_element_size_um=tf.constant([1.0, 1.0]),
control_grid_spacings_pix=[2.0, 2.0],
deformations_magnitudes_um=tf.constant([0.0, 0.0]),
rotation_angle=tf.constant(0.0),
scale_factors=tf.constant([1.0, 1.0]),
mirror_factors=tf.constant([1, 1]),
shearing_coefs=tf.constant([0.0, 0.1]),
cropping_offset_pix=tf.constant([0.0, 0.0]))
expected_output = np.array([[[-0.1, 0], [0, 1], [0.1, 2]],
[[0.9, 0], [1, 1], [1.1, 2]],
[[1.9, 0], [2, 1], [2.1, 2]]])
with self.session() as sess:
np.testing.assert_allclose(
expected_output,
sess.run(deformation_field),
atol=_ARRAY_COMPARE_TOLERANCE)
def test_applies_mirror(self):
deformation_field = deformation_utils.create_2d_deformation_field(
raw_image_center_pos_pix=tf.constant([1.0, 1.0]),
raw_image_element_size_um=tf.constant([1.0, 1.0]),
net_input_spatial_shape=[3, 3],
net_input_element_size_um=tf.constant([1.0, 1.0]),
control_grid_spacings_pix=[2.0, 2.0],
deformations_magnitudes_um=tf.constant([0.0, 0.0]),
rotation_angle=tf.constant(0.0),
scale_factors=tf.constant([1.0, 1.0]),
mirror_factors=tf.constant([-1, 1]),
shearing_coefs=tf.constant([0.0, 0.0]),
cropping_offset_pix=tf.constant([0.0, 0.0]))
expected_output = np.array([[[2., 0.], [2., 1.], [2., 2.]],
[[1., 0.], [1., 1.], [1., 2.]],
[[0., 0.], [0., 1.], [0., 2.]]])
with self.session() as sess:
np.testing.assert_allclose(
expected_output,
sess.run(deformation_field),
atol=_ARRAY_COMPARE_TOLERANCE)
def test_applies_scale(self):
deformation_field = deformation_utils.create_2d_deformation_field(
raw_image_center_pos_pix=tf.constant([1.0, 1.0]),
raw_image_element_size_um=tf.constant([1.0, 1.0]),
net_input_spatial_shape=[3, 3],
net_input_element_size_um=tf.constant([1.0, 1.0]),
control_grid_spacings_pix=[2.0, 2.0],
deformations_magnitudes_um=tf.constant([0.0, 0.0]),
rotation_angle=tf.constant(0.0),
scale_factors=tf.constant([2.0, 1.0]),
mirror_factors=tf.constant([1, 1]),
shearing_coefs=tf.constant([0.0, 0.0]),
cropping_offset_pix=tf.constant([0.0, 0.0]))
expected_output = np.array([[[-1., 0.], [-1., 1.], [-1., 2.]],
[[1., 0.], [1., 1.], [1., 2.]],
[[3., 0.], [3., 1.], [3., 2.]]])
with self.session() as sess:
np.testing.assert_allclose(
expected_output,
sess.run(deformation_field),
atol=_ARRAY_COMPARE_TOLERANCE)
def test_applies_multiple_transforms_together(self):
deformation_field = deformation_utils.create_2d_deformation_field(
raw_image_center_pos_pix=tf.constant([1.0, 1.0]),
raw_image_element_size_um=tf.constant([1.0, 1.0]),
net_input_spatial_shape=[3, 3],
net_input_element_size_um=tf.constant([1.0, 1.0]),
control_grid_spacings_pix=[2.0, 2.0],
deformations_magnitudes_um=tf.constant([0.0, 0.0]),
rotation_angle=tf.constant(np.pi / 2.),
scale_factors=tf.constant([1.0, 2.0]),
mirror_factors=tf.constant([1, -1]),
shearing_coefs=tf.constant([0.1, 0.0]),
cropping_offset_pix=tf.constant([3.0, 5.0]))
expected_output = np.array([[[3., 3.9], [4., 4.], [5., 4.1]],
[[3., 5.9], [4., 6.], [5., 6.1]],
[[3., 7.9], [4., 8.], [5., 8.1]]])
with self.session() as sess:
np.testing.assert_allclose(
expected_output,
sess.run(deformation_field),
atol=_ARRAY_COMPARE_TOLERANCE)
def test_oddEvenErrorHandling(self):
with tf.Session():
deform = deformation_utils.create_2d_deformation_field(
np.array([101, 101]) / 2,
raw_image_element_size_um=tf.constant([1., 1.]),
net_input_spatial_shape=[50, 101],
net_input_element_size_um=tf.constant([2., 1.]),
control_grid_spacings_pix=[10, 10],
deformations_magnitudes_um=tf.constant((0., 0.)),
rotation_angle=tf.constant(0.),
scale_factors=tf.constant((1., 1.)),
mirror_factors=tf.constant((1., 1.)),
shearing_coefs=tf.constant((0., 0., 0., 0.)),
cropping_offset_pix=tf.constant((0., 0.)))
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
"factor must be odd as input and output size is even"):
deform.eval()
if __name__ == "__main__":
tf.test.main()
| multidim-image-augmentation-master | multidim_image_augmentation/deformation_utils_test.py |
# Lint as: python2, python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from multidim_image_augmentation import augmentation_ops
class ApplyDeformation2DTest(tf.test.TestCase):
def test_IdentityTransform(self):
with self.session():
src = np.random.random([10, 7, 3]).astype(np.float32)
deformation = np.ndarray([10, 7, 2], dtype=np.float32)
for x0 in range(deformation.shape[0]):
for x1 in range(deformation.shape[1]):
deformation[x0, x1, 0] = x0
deformation[x0, x1, 1] = x1
result = augmentation_ops.apply_deformation2d(
src, deformation, [])
self.assertEqual(result.get_shape(), src.shape)
trg = result.eval()
self.assertAllEqual(trg, src)
def test_ExtrapolationMirror(self):
with self.session():
src = np.array([[[0], [1], [2], [3], [4]]]).astype(np.float32)
deform = np.array([[[0, -10], [0, -9], [0, -8], [0, -7], [0, -6],
[0, -5], [0, -4], [0, -3], [0, -2], [0, -1], [0, 0],
[0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6],
[0, 7], [0, 8], [0, 9], [0, 10]]]).astype(np.float32)
trg = augmentation_ops.apply_deformation2d(
src, deform, []).eval()
self.assertAllEqual(
np.array([[[2], [1], [0], [1], [2], [3], [4], [3], [2], [1], [0],
[1], [2], [3], [4], [3], [2], [1], [0], [1], [2]]]), trg)
def test_ExtrapolationZero(self):
with self.session():
src = np.array([[[10], [11], [12], [13], [14]]]).astype(np.float32)
deform = np.array([[[0, -10], [0, -9], [0, -8], [0, -7], [0, -6],
[0, -5], [0, -4], [0, -3], [0, -2], [0, -1], [0, 0],
[0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6],
[0, 7], [0, 8], [0, 9], [0, 10]]]).astype(np.float32)
# padding is not used but should not fail the test.
padding = np.array([42])
trg = augmentation_ops.apply_deformation2d(
src, deform, padding, extrapolation="zero_padding").eval()
self.assertAllEqual(
np.array([[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [10],
[11], [12], [13], [14], [0], [0], [0], [0], [0], [0]]]),
trg)
def test_ExtrapolationZeroMultichannel(self):
with self.session():
src = np.array([[[10, 9, 8, 7], [11, 10, 9, 8], [12, 11, 10, 9],
[13, 12, 11, 10], [14, 13, 12, 11]]]).astype(np.float32)
deform = np.array([[[0, -10], [0, -9], [0, -8], [0, -7], [0, -6],
[0, -5], [0, -4], [0, -3], [0, -2], [0, -1], [0, 0],
[0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6],
[0, 7], [0, 8], [0, 9], [0, 10]]]).astype(np.float32)
trg = augmentation_ops.apply_deformation2d(
src, deform, [], extrapolation="zero_padding").eval()
self.assertAllEqual(
np.array([[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0],
[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0],
[0, 0, 0, 0], [0, 0, 0, 0], [10, 9, 8, 7], [11, 10, 9, 8],
[12, 11, 10, 9], [13, 12, 11, 10], [14, 13, 12, 11],
[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0],
[0, 0, 0, 0], [0, 0, 0, 0]]]), trg)
def test_ExtrapolationConst(self):
with self.session():
src = np.array([[[10], [11], [12], [13], [14]]]).astype(np.float32)
deform = np.array([[[0, -10], [0, -9], [0, -8], [0, -7], [0, -6],
[0, -5], [0, -4], [0, -3], [0, -2], [0, -1], [0, 0],
[0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6],
[0, 7], [0, 8], [0, 9], [0, 10]]]).astype(np.float32)
trg = augmentation_ops.apply_deformation2d(
src,
deform,
padding_constant=np.array([42]),
extrapolation="const_padding").eval()
self.assertAllEqual(
np.array([[[42], [42], [42], [42], [42], [42], [42], [42], [42],
[42], [10], [11], [12], [13], [14], [42], [42], [42],
[42], [42], [42]]]), trg)
def test_ExtrapolationConstMultichannel(self):
with self.session():
src = np.array([[[10, 9, 8, 7], [11, 10, 9, 8], [12, 11, 10, 9],
[13, 12, 11, 10], [14, 13, 12, 11]]]).astype(np.float32)
deform = np.array([[[0, -10], [0, -9], [0, -8], [0, -7], [0, -6],
[0, -5], [0, -4], [0, -3], [0, -2], [0, -1], [0, 0],
[0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6],
[0, 7], [0, 8], [0, 9], [0, 10]]]).astype(np.float32)
trg = augmentation_ops.apply_deformation2d(
src,
deform,
extrapolation="const_padding",
padding_constant=np.array([1, 2, 3, 4])).eval()
self.assertAllEqual(
np.array([[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4],
[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4],
[1, 2, 3, 4], [1, 2, 3, 4], [10, 9, 8, 7], [11, 10, 9, 8],
[12, 11, 10, 9], [13, 12, 11, 10], [14, 13, 12, 11],
[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4],
[1, 2, 3, 4], [1, 2, 3, 4]]]), trg)
class ApplyDeformation3DTest(tf.test.TestCase):
def test_IdentityTransform(self):
with self.session():
src = np.random.random([4, 10, 7, 3]).astype(np.float32)
deformation = np.ndarray([4, 10, 7, 3], dtype=np.float32)
for x0 in range(deformation.shape[0]):
for x1 in range(deformation.shape[1]):
for x2 in range(deformation.shape[2]):
deformation[x0, x1, x2, 0] = x0
deformation[x0, x1, x2, 1] = x1
deformation[x0, x1, x2, 2] = x2
result = augmentation_ops.apply_deformation3d(
src, deformation, [])
self.assertEqual(result.get_shape(), src.shape)
trg = result.eval()
self.assertAllEqual(trg, src)
def test_InterpolationNearest(self):
with self.session():
src = np.array([[[[0], [10], [20], [30]]]]).astype(np.float32)
deform = np.array([[[[0, 0, 0.5], [0, 0, 2.7]]]]).astype(np.float32)
trg = augmentation_ops.apply_deformation3d(
src, deform, [], interpolation="nearest").eval()
self.assertAllEqual(
np.array([[[[10], [30]]]]), trg)
def test_InterpolationMixedNearestLinear(self):
with self.session():
src = np.array([[[[0], [10], [20], [30]]],
[[[5], [15], [25], [35]]]]).astype(np.float32)
deform = np.array([[[[0, 0, 0.5], [0, 0, 2.7]],
[[0, 1, 1.5], [1, 0, 2.1]]]]).astype(np.float32)
trg = augmentation_ops.apply_deformation3d(
src,
deform, [],
interpolation="mixed_nearest_linear",
extrapolation="zero_padding").eval()
self.assertAllClose(np.array([[[[5], [27]], [[0], [26]]]]), trg)
def test_ExtrapolationMirror(self):
with self.session():
src = np.array([[[[0], [1], [2], [3], [4]]]]).astype(np.float32)
deform = np.array([[[[0, 0, -10], [0, 0, -9], [0, 0, -8], [0, 0, -7],
[0, 0, -6], [0, 0, -5], [0, 0, -4], [0, 0, -3],
[0, 0, -2], [0, 0, -1], [0, 0, 0], [0, 0, 1],
[0, 0, 2], [0, 0, 3], [0, 0, 4], [0, 0, 5],
[0, 0, 6], [0, 0, 7], [0, 0, 8], [0, 0, 9],
[0, 0, 10]]]]).astype(np.float32)
trg = augmentation_ops.apply_deformation3d(
src, deform, []).eval()
self.assertAllEqual(
np.array([[[[2], [1], [0], [1], [2], [3], [4], [3], [2], [1], [0],
[1], [2], [3], [4], [3], [2], [1], [0], [1], [2]]]]), trg)
def test_ExtrapolationZero(self):
with self.session():
src = np.array([[[[10], [11], [12], [13], [14]]]]).astype(np.float32)
deform = np.array([[[[0, 0, -10], [0, 0, -9], [0, 0, -8], [0, 0, -7],
[0, 0, -6], [0, 0, -5], [0, 0, -4], [0, 0, -3],
[0, 0, -2], [0, 0, -1], [0, 0, 0], [0, 0, 1],
[0, 0, 2], [0, 0, 3], [0, 0, 4], [0, 0, 5],
[0, 0, 6], [0, 0, 7], [0, 0, 8], [0, 0, 9],
[0, 0, 10]]]]).astype(np.float32)
# padding is not used but should not fail the test.
padding = np.array([42])
trg = augmentation_ops.apply_deformation3d(
src, deform, padding, extrapolation="zero_padding").eval()
self.assertAllEqual(
np.array([[[[0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [10],
[11], [12], [13], [14], [0], [0], [0], [0], [0], [0]]]]),
trg)
def test_ExtrapolationConst(self):
with self.session():
src = np.array([[[[10], [11], [12], [13], [14]]]]).astype(np.float32)
deform = np.array([[[[0, 0, -10], [0, 0, -9], [0, 0, -8], [0, 0, -7],
[0, 0, -6], [0, 0, -5], [0, 0, -4], [0, 0, -3],
[0, 0, -2], [0, 0, -1], [0, 0, 0], [0, 0, 1],
[0, 0, 2], [0, 0, 3], [0, 0, 4], [0, 0, 5],
[0, 0, 6], [0, 0, 7], [0, 0, 8], [0, 0, 9],
[0, 0, 10]]]]).astype(np.float32)
trg = augmentation_ops.apply_deformation3d(
src,
deform,
padding_constant=np.array([42]),
extrapolation="const_padding").eval()
self.assertAllEqual(
np.array([[[[42], [42], [42], [42], [42], [42], [42], [42], [42],
[42], [10], [11], [12], [13], [14], [42], [42], [42],
[42], [42], [42]]]]), trg)
def test_One_Hot_Encoding(self):
with self.session():
src = np.array([[[[4], [3], [1], [0], [2]]]]).astype(np.float32)
deform = np.array([[[[0, 0, -.5], [0, 0, 0], [0, 0, 0.3], [0, 0, 1],
[0, 0, 1.5], [0, 0, 2.5], [0, 0, 4],
[0, 0, 5]]]]).astype(np.float32)
trg_graph = augmentation_ops.apply_deformation3d(
src,
deform, [],
extrapolation="zero_padding",
conversion="indexed_to_one_hot",
output_num_channels=5)
trg = trg_graph.eval()
self.assertAllEqual([1, 1, 8, 5], trg_graph.shape)
self.assertAllEqual([1, 1, 8, 5], trg.shape)
expected = np.array([[[[0.5, 0, 0, 0, 0.5], [0, 0, 0, 0, 1],
[0, 0, 0, 0.3, 0.7], [0, 0, 0, 1, 0],
[0, 0.5, 0, 0.5, 0], [0.5, 0.5, 0, 0, 0],
[0, 0, 1, 0, 0], [1, 0, 0, 0, 0]]]]).astype(float)
for x2 in range(8):
for ch in range(5):
self.assertAlmostEqual(
expected[0, 0, x2, ch],
trg[0, 0, x2, ch],
msg="expected {}, but got {} at x2={}, ch={}".format(
expected[0, 0, x2, ch], trg[0, 0, x2, ch], x2, ch))
def test_outputSpatialShape(self):
with self.session():
src = np.random.random([4, 10, 7, 3]).astype(np.float32)
deformation = np.ndarray([4, 10, 7, 3], dtype=np.float32)
for x0 in range(deformation.shape[0]):
for x1 in range(deformation.shape[1]):
for x2 in range(deformation.shape[2]):
deformation[x0, x1, x2, 0] = x0
deformation[x0, x1, x2, 1] = x1
deformation[x0, x1, x2, 2] = x2
result = augmentation_ops.apply_deformation3d(
src, deformation, [],
output_spatial_shape=[-1, 6, 5])
self.assertEqual(result.get_shape(), [4, 6, 5, 3])
trg = result.eval()
self.assertAllEqual(trg, src[:, 2:-2, 1:-1, :])
if __name__ == "__main__":
tf.test.main()
| multidim-image-augmentation-master | multidim_image_augmentation/python/kernel_tests/apply_deformation_op_test.py |
# Lint as: python2, python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from multidim_image_augmentation import augmentation_ops
class CubicInterpolationTest(tf.test.TestCase):
def test_1DInterpolation(self):
with self.session():
grid = np.ndarray([5, 2], dtype=np.float32)
for x0 in range(grid.shape[0]):
for channel in range(grid.shape[1]):
grid[x0, channel] = x0 * grid.shape[1] + channel
dense = augmentation_ops.cubic_interpolation1d(
input=grid, factor=10, output_length=21).eval()
precision = 5
self.assertAlmostEqual(grid[1, 0], dense[0, 0], precision)
self.assertAlmostEqual(grid[2, 0], dense[10, 0], precision)
self.assertAlmostEqual(grid[3, 0], dense[20, 0], precision)
self.assertAlmostEqual(grid[1, 1], dense[0, 1], precision)
self.assertAlmostEqual(grid[2, 1], dense[10, 1], precision)
self.assertAlmostEqual(grid[3, 1], dense[20, 1], precision)
def test_1DInterpolationFull(self):
with self.session():
grid = np.ndarray([5, 2], dtype=np.float32)
for x0 in range(grid.shape[0]):
for channel in range(grid.shape[1]):
grid[x0, channel] = x0 * grid.shape[1] + channel
dense_op = augmentation_ops.cubic_interpolation1d(grid, 10)
self.assertAllEqual([41, 2], dense_op.get_shape().as_list())
dense = dense_op.eval()
precision = 5
self.assertAlmostEqual(grid[0, 0], dense[0, 0], precision)
self.assertAlmostEqual(grid[1, 0], dense[10, 0], precision)
self.assertAlmostEqual(grid[2, 0], dense[20, 0], precision)
self.assertAlmostEqual(grid[3, 0], dense[30, 0], precision)
self.assertAlmostEqual(grid[4, 0], dense[40, 0], precision)
self.assertAlmostEqual(grid[3, 1], dense[30, 1], precision)
self.assertAlmostEqual(grid[2, 1], dense[20, 1], precision)
self.assertAlmostEqual(grid[0, 1], dense[0, 1], precision)
def test_OddEvenError(self):
with self.session():
odd_even = augmentation_ops.cubic_interpolation1d(
np.ndarray([1, 1]), 2, output_length=2)
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
"output size and input size must both be odd or both be even"):
odd_even.eval()
def test_EvenOddError(self):
with self.session():
even_odd = augmentation_ops.cubic_interpolation1d(
np.ndarray([2, 1]), 2, output_length=1)
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
"output size and input size must both be odd or both be even"):
even_odd.eval()
def test_AllEvenError(self):
with self.session():
all_even = augmentation_ops.cubic_interpolation1d(
np.ndarray([2, 1]), 2, output_length=2)
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
"factor must be odd as input and output size is even"):
all_even.eval()
if __name__ == "__main__":
tf.test.main()
| multidim-image-augmentation-master | multidim_image_augmentation/python/kernel_tests/cubic_interpolation1d_op_test.py |
# Lint as: python2, python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from multidim_image_augmentation import augmentation_ops
class CubicInterpolationTest(tf.test.TestCase):
def test_2DInterpolation(self):
with self.session():
grid = np.ndarray([5, 5, 2], dtype=np.float32)
c = 0
for x0 in range(grid.shape[0]):
for x1 in range(grid.shape[1]):
for channel in range(grid.shape[2]):
grid[x0, x1, channel] = c
c += 1
dense = augmentation_ops.cubic_interpolation2d(
input=grid, factors=[10, 10], output_spatial_shape=[21, 21]).eval()
precision = 5
self.assertAlmostEqual(grid[1, 1, 0], dense[0, 0, 0], precision)
self.assertAlmostEqual(grid[2, 2, 0], dense[10, 10, 0], precision)
self.assertAlmostEqual(grid[3, 3, 0], dense[20, 20, 0], precision)
self.assertAlmostEqual(grid[1, 1, 1], dense[0, 0, 1], precision)
self.assertAlmostEqual(grid[2, 2, 1], dense[10, 10, 1], precision)
self.assertAlmostEqual(grid[3, 3, 1], dense[20, 20, 1], precision)
def testFactorAttrLengthErrors(self):
with self.session():
with self.assertRaisesWithPredicateMatch(ValueError,
"factors must be rank 2, got 3"):
augmentation_ops.cubic_interpolation2d(
np.ndarray([1, 1, 1]),
factors=[3, 4, 5],
output_spatial_shape=[8, 9]).eval()
def testOutputSpatialLengthAttrLengthErrors(self):
with self.session():
with self.assertRaisesWithPredicateMatch(
ValueError, "output_spatial_shape must be rank 2, got 3"):
augmentation_ops.cubic_interpolation2d(
np.ndarray([1, 1, 1]),
factors=[3, 4],
output_spatial_shape=[7, 8, 9]).eval()
if __name__ == "__main__":
tf.test.main()
| multidim-image-augmentation-master | multidim_image_augmentation/python/kernel_tests/cubic_interpolation2d_op_test.py |
# Lint as: python2, python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
from multidim_image_augmentation import augmentation_ops
class RandomLUTControlPointsTest(tf.test.TestCase):
def testBasic1(self):
with self.session():
graph = augmentation_ops.random_lut_control_points(
new_black_range=[-0.1, 0.1],
new_white_range=[0.9, 1.1],
slope_min=0.7,
slope_max=1.4,
num_control_point_insertions=2)
for _ in range(10):
lut_control_points = graph.eval()
self.assertEqual(lut_control_points.shape[0], 5)
tf.logging.info(lut_control_points)
slopes = (lut_control_points[1:] - lut_control_points[0:-1]) / 0.25
for i in range(4):
self.assertGreaterEqual(slopes[i], 0.7)
self.assertLessEqual(slopes[i], 1.4)
def testBasic2(self):
with self.session():
graph = augmentation_ops.random_lut_control_points(
new_black_range=[-0.1, 0.1],
new_white_range=[0.9, 1.1],
slope_min=0.7,
slope_max=1.4,
num_control_point_insertions=3)
for _ in range(10):
lut_control_points = graph.eval()
self.assertEqual(lut_control_points.shape[0], 9)
slopes = (lut_control_points[1:] - lut_control_points[0:-1]) / 0.125
for i in range(4):
self.assertGreaterEqual(slopes[i], 0.7)
self.assertLessEqual(slopes[i], 1.4)
def testNotOptimizedAway(self):
with self.session() as sess:
lut = augmentation_ops.random_lut_control_points(
new_black_range=[-0.1, 0.1],
new_white_range=[0.9, 1.1],
slope_min=0.7,
slope_max=1.4,
num_control_point_insertions=2)
graph = lut + 1
for _ in range(10):
lut_control_points1 = sess.run(graph)
lut_control_points2 = sess.run(graph)
for i in range(len(lut_control_points2)):
self.assertNotEqual(lut_control_points1[i], lut_control_points2[i])
if __name__ == "__main__":
tf.test.main()
| multidim-image-augmentation-master | multidim_image_augmentation/python/kernel_tests/random_lut_controlpoints_op_test.py |
# Lint as: python2, python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from multidim_image_augmentation import augmentation_ops
class ApplyTabulatedFunctionTest(tf.test.TestCase):
def testShape(self):
image = np.random.random([10, 7, 3]).astype(np.float32)
tabulated_functions = np.random.random([3, 256]).astype(np.float32)
out_image = augmentation_ops.apply_tabulated_functions(
image, tabulated_functions)
self.assertAllEqual(out_image.get_shape(), image.shape)
def testBasicUsage(self):
with self.session():
image = np.array([[[5], [2], [4], [0], [3], [5], [2], [4], [0],
[3]]]).astype(np.float32)
tabulated_functions = np.array([[10, 11, 12, 13, 14, 15, 16,
17]]).astype(np.float32)
out_image = augmentation_ops.apply_tabulated_functions(
image, tabulated_functions)
self.assertAllEqual(
np.array([[[15], [12], [14], [10], [13], [15], [12], [14], [10],
[13]]]), out_image.eval())
def testInterpolationExtrapolation(self):
with self.session():
image = np.array([[[-1], [2.7], [4.2], [0.3], [8.5]]]).astype(np.float32)
tabulated_functions = np.array([[10, 11, 12, 13, 14, 15, 16,
17]]).astype(np.float32)
out_image = augmentation_ops.apply_tabulated_functions(
image, tabulated_functions)
self.assertAllClose(
np.array([[[9], [12.7], [14.2], [10.3], [18.5]]]), out_image.eval())
def testMult(self):
with self.session():
image = np.array([[[5, 2], [2, 4], [4, 1], [0, 1],
[3, 0]]]).astype(np.float32)
tabulated_functions = np.array([[10, 11, 12, 13, 14, 15, 16, 17],
[0, 10, 20, 30, 40, 50, 60,
70]]).astype(np.float32)
out_image = augmentation_ops.apply_tabulated_functions(
image, tabulated_functions)
self.assertAllEqual(
np.array([[[15, 20], [12, 40], [14, 10], [10, 10], [13, 0]]]),
out_image.eval())
if __name__ == "__main__":
tf.test.main()
| multidim-image-augmentation-master | multidim_image_augmentation/python/kernel_tests/apply_tabulated_functions_op_test.py |
# Lint as: python2, python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
from multidim_image_augmentation import augmentation_ops
class CubicInterpolationTest(tf.test.TestCase):
def test_3DInterpolation(self):
with self.session():
grid = np.ndarray([5, 5, 5, 2], dtype=np.float32)
c = 0
for x0 in range(grid.shape[0]):
for x1 in range(grid.shape[1]):
for x2 in range(grid.shape[2]):
for channel in range(grid.shape[3]):
grid[x0, x1, x2, channel] = c
c += 1
dense = augmentation_ops.cubic_interpolation3d(
input=grid,
factors=[10, 10, 10],
output_spatial_shape=[21, 21, 21]).eval()
precision = 4
self.assertAlmostEqual(grid[1, 1, 1, 0], dense[0, 0, 0, 0], precision)
self.assertAlmostEqual(grid[1, 1, 3, 0], dense[0, 0, 20, 0], precision)
self.assertAlmostEqual(grid[1, 3, 1, 0], dense[0, 20, 0, 0], precision)
self.assertAlmostEqual(grid[3, 1, 1, 0], dense[20, 0, 0, 0], precision)
self.assertAlmostEqual(grid[2, 2, 2, 0], dense[10, 10, 10, 0], precision)
self.assertAlmostEqual(grid[3, 3, 3, 0], dense[20, 20, 20, 0], precision)
self.assertAlmostEqual(grid[1, 1, 1, 1], dense[0, 0, 0, 1], precision)
self.assertAlmostEqual(grid[1, 1, 3, 1], dense[0, 0, 20, 1], precision)
self.assertAlmostEqual(grid[1, 3, 1, 1], dense[0, 20, 0, 1], precision)
self.assertAlmostEqual(grid[3, 1, 1, 1], dense[20, 0, 0, 1], precision)
self.assertAlmostEqual(grid[2, 2, 2, 1], dense[10, 10, 10, 1], precision)
self.assertAlmostEqual(grid[3, 3, 3, 1], dense[20, 20, 20, 1], precision)
def test_3DInterpolationSingleSlice(self):
with self.session():
grid = np.ndarray([3, 5, 5, 2], dtype=np.float32)
c = 0
for x0 in range(grid.shape[0]):
for x1 in range(grid.shape[1]):
for x2 in range(grid.shape[2]):
for channel in range(grid.shape[3]):
grid[x0, x1, x2, channel] = c
c += 1
dense = augmentation_ops.cubic_interpolation3d(
input=grid,
factors=[1, 10, 10],
output_spatial_shape=[1, 21, 21],
).eval()
precision = 4
self.assertAlmostEqual(grid[1, 1, 1, 0], dense[0, 0, 0, 0], precision)
self.assertAlmostEqual(grid[1, 1, 3, 0], dense[0, 0, 20, 0], precision)
self.assertAlmostEqual(grid[1, 3, 1, 0], dense[0, 20, 0, 0], precision)
if __name__ == "__main__":
tf.test.main()
| multidim-image-augmentation-master | multidim_image_augmentation/python/kernel_tests/cubic_interpolation3d_op_test.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base config for experiments, and base sweep object.
Please don't touch this file! Change the parameters in your own sweep.
"""
import dataclasses
from typing import Any, Iterator, Mapping, Optional
@dataclasses.dataclass
class TrainConfig:
model_init_seed: int = 1
learning_rate: float = 1e-4
seq_length: int = 20
seq_length_fixed: bool = True
batch_size: int = 128
clip_grad_norm: float = 1.
reset_predictor_init_state: bool = True
gradient_chunk_length: Optional[int] = None
@dataclasses.dataclass
class ModelConfig:
model_type: str = 'rnn'
architecture_kwargs: dict[str, Any] = dataclasses.field(default_factory=dict)
@dataclasses.dataclass
class ScheduleConfig:
training_steps: int = 100000 # Number of gradient steps.
num_saves: int = 30 # Total number of parameters to save.
first_save: int = 0 # Num steps before first saving.
saving_schedule_type: str = 'log' # Either "linear" or "log".
ckpt_frequency: int = 2000 # Frequency of checkpointing, in gradient steps.
@dataclasses.dataclass
class LoggerConfig:
log_frequency: int = 250 # Frequency of logging, in gradient steps.
log_remotely: bool = True # Whether to add data to Bigtable.
@dataclasses.dataclass
class DataConfig:
"""Config for the data distribution.
This class may be inherited and enhanced by experiments in the 'experiments'
folder.
"""
@dataclasses.dataclass
class EvalConfig:
"""Config for the evaluator.
This class may be inherited and enhanced by experiments in the 'experiments'
folder.
"""
batch_size: int = 128 # Batch size used for evaluation.
@dataclasses.dataclass
class ExperimentConfig:
"""Config for supervised learning experiments."""
name: str = 'Supervised online learning'
seed: int = 1
data: DataConfig = dataclasses.field(default_factory=DataConfig)
eval: EvalConfig = dataclasses.field(default_factory=EvalConfig)
model: ModelConfig = dataclasses.field(default_factory=ModelConfig)
train: TrainConfig = dataclasses.field(default_factory=TrainConfig)
schedule: ScheduleConfig = dataclasses.field(default_factory=ScheduleConfig)
logger: LoggerConfig = dataclasses.field(default_factory=LoggerConfig)
@dataclasses.dataclass
class ExperimentSweep:
"""A sweep to be passed to the experiment launcher, with parameter sweeps."""
general_sweep: Iterator[dict[str, Any]]
specific_sweeps: Mapping[str, Mapping[str, Iterator[Mapping[str, Any]]]]
base_config: ExperimentConfig = dataclasses.field(
default_factory=ExperimentConfig
)
| nonstationary_mbml-main | base_config.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Predictors used in the project.
The interface is general and accept any 'unrolling' predictor, basically
implementing a jax.lax.scan function under the cover.
"""
import abc
from typing import Any, Callable, Optional
import chex
import haiku as hk
import jax
import jax.numpy as jnp
from nonstationary_mbml.experiments import distributions
class Predictor(abc.ABC):
"""Predictors used for predictions."""
@abc.abstractmethod
def initial_state(
self,
params: Any,
rng: chex.PRNGKey,
batch_size: int,
) -> Any:
"""Sample an initial state for the predictor.
Can be independent of params as well as rng.
Args:
params: the parameters of the predictor, can be anything
rng: a random key
batch_size: the number of initial states to return
Returns:
init_state: a list or array of size batch_size, containing the states of
the predictor
"""
@abc.abstractmethod
def init_params(self, rng: chex.PRNGKey, batch_init: chex.Array,
state_init: chex.Array) -> hk.Params:
"""Initialise parameters.
Args:
rng: Seed operation.
batch_init: Dummy data to create network's parameters.
state_init: Dummy state to create network's parameters.
Returns:
Parameters of the network.
"""
@abc.abstractmethod
def unroll(
self,
params: Any,
rng: chex.PRNGKey,
batch: chex.Array,
init_state: Any,
) -> chex.Array:
"""Unroll our predictor on a batch of trajectories.
Args:
params: the parameters of the predictor, can be anything
rng: a random key
batch: a (batch_size, seq_length,)+obs_shape tensor, containing the
observations for the predictor
init_state: the initial state of the predictor
Returns:
predictions: a (batch_size, seq_length, parameter_size) tensor, containing
the output of the predictor, i.e., predictions of the next value in the
sequence
"""
class InContextPredictor(Predictor):
"""A predictor without state that only looks at the current context."""
def __init__(self, predictor: Callable[[chex.Array], chex.Array]):
self._predictor_init, self._predictor_apply = hk.transform(predictor)
def initial_state(self, params: hk.Params, rng: chex.PRNGKey,
batch_size: int) -> Optional[Any]:
# No state for this predictor
return None
def init_params(self, rng: chex.PRNGKey, batch_init: chex.Array,
state_init: Optional[chex.Array]) -> hk.Params:
del state_init
return self._predictor_init(rng, batch_init)
def unroll(self, params: hk.Params, rng: chex.PRNGKey, batch: chex.Array,
init_state: Optional[chex.Array]) -> chex.Array:
del init_state
output = self._predictor_apply(params, rng, batch)
return output, None # pytype: disable=bad-return-type # numpy-scalars
class RNNPredictor(Predictor):
"""A predictor implementing an RNN.
This class doesn't inherit ScanPredictor because it is using its own haiku
behaviour.
"""
def __init__(self, unroll_factory, initial_state_factory):
self._init_params, self._unroll = hk.transform(unroll_factory)
_, self._initial_state = hk.transform(initial_state_factory)
def initial_state(
self,
params: Any,
rng: chex.PRNGKey,
batch_size: int,
) -> Any:
return self._initial_state(params, rng, batch_size)
def init_params(self, rng: chex.PRNGKey, batch_init: chex.Array,
state_init: Optional[chex.Array]) -> hk.Params:
return self._init_params(rng, batch_init, state_init)
def unroll(
self,
params: Any,
rng: chex.PRNGKey,
batch: chex.Array,
init_state: Any,
) -> chex.Array:
return self._unroll(params, rng, x=batch, initial_state=init_state)
class ScanPredictor(Predictor, abc.ABC):
"""Implementation of predictors using jax.lax.scan and an update function.
The prior is the output in the initial state.
The only things the predictor has to provide is how to update its
state, and what to output from this state.
"""
@abc.abstractmethod
def output_from_state(
self,
rng: chex.PRNGKey,
state: chex.Array,
) -> chex.Array:
"""Returns what the predictor will output at a given state."""
@abc.abstractmethod
def update_state(
self,
rng: chex.PRNGKey,
state: chex.Array,
x: chex.Array,
) -> chex.Array:
"""Returns state at time t+1 based on state at time t."""
def unroll(
self,
params: Any,
rng: chex.PRNGKey,
batch: chex.Array,
init_state: chex.Array,
jittable: bool = True,
) -> chex.Array:
del params
def scan_update_output(
state: chex.Array,
x: chex.Array,
) -> tuple[chex.Array, chex.Array]:
new_state = self.update_state(rng, state, x)
return new_state, self.output_from_state(rng, new_state)
# Change to time-major layout since lax.scan unrolls over leading dimension.
batch = batch.swapaxes(0, 1)
if jittable:
_, predictions = jax.lax.scan(scan_update_output, init_state, batch)
else:
state = init_state
predictions = []
for x in batch:
state, pred = scan_update_output(state, x)
predictions.append(pred)
predictions = jnp.stack(predictions, axis=0)
predictions = predictions.swapaxes(0, 1)
return predictions
class OptimalPredictor(ScanPredictor, abc.ABC):
"""Abstract class for optimal predictors.
The predictor must also define what prior and posterior distribution it uses.
They are chosen carefully to mathematically match.
Params are the parameters of the prior distribution.
"""
def init_params(self, rng: chex.PRNGKey, batch_init: chex.Array,
state_init: chex.Array) -> hk.Params:
raise NotImplementedError(
'Optimal predictors do not provide parameter initialization.'
)
def initial_state(
self,
params: list[tuple[float, ...]],
rng: chex.PRNGKey,
batch_size: int,
) -> chex.Array:
"""Computes the initial state based on parameters.
Args:
params: the parameter tuples of the parameter distributions used to sample
the true parameters of the observations.
rng: an unused random key
batch_size: the number of states returned
Returns:
state: a (batch_size, parameter_size) array
"""
state = jnp.concatenate([jnp.array(p) for p in params], axis=0)
state = jnp.stack([state] * batch_size, axis=0)
return state.astype(jnp.float32)
def unpack_state(self, state: chex.Array) -> tuple[chex.Array, ...]:
"""Splits a (batch_size, parameter_size) array to (batch_size, 1) elements.
"""
state = jnp.expand_dims(state, axis=-1)
return distributions.split_params(state)
def pack_state(self, *state_elements: tuple[chex.Array, ...]) -> chex.Array:
"""Converts individual state elements into a single array.
Args:
*state_elements: parameter_size arguments, each of shape (batch_size, 1)
Returns:
state: array of shape (batch_size, parameter_size)
"""
return jnp.concatenate(state_elements, axis=-1)
class OptimalCategoricalPredictor(OptimalPredictor):
"""Optimal bayesian predictor for Categorical distributions.
State is (alpha_1, ..., alpha_n), parameters of a Dirichlet(n) distribution
(conjugate prior).
The outputs are the parameters for a Dirichlet(n) distribution.
"""
def update_state(
self,
rng: chex.PRNGKey,
state: chex.Array,
x: chex.Array,
) -> chex.Array:
return state + x
def output_from_state(
self,
rng: chex.PRNGKey,
state: chex.Array,
) -> chex.Array:
parameters = state / jnp.sum(state, axis=-1, keepdims=True)
return jnp.log(parameters)
| nonstationary_mbml-main | predictors.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Factories to initialise predictors based on different neural architectures.
To create a new predictor you need to register a function that returns a
predictor based on a configuration and output size. The predictor factory should
be uniquely identified by its name.
"""
import functools
from typing import Any, Callable, Optional, Sequence
import einops
import haiku as hk
import jax
import jax.numpy as jnp
from nonstationary_mbml import predictors
from nonstationary_mbml.models import basic
from nonstationary_mbml.models import positional_encodings as pos_encs_lib
from nonstationary_mbml.models import transformer
_Config = dict[str, Any]
# A function that can be used to create a Predictor.
_PredictorFactory = Callable[[int, _Config], predictors.Predictor]
# Maps names to the correct agent factory.
PREDICTOR_FACTORIES: dict[str, _PredictorFactory] = {}
def _register_predictor_factory(
name: str,
) -> Callable[[_PredictorFactory], _PredictorFactory]:
"""Decorator for registering a function as a factory using the `name` id."""
if name.lower() != name:
raise ValueError(
'Please use lower-case names to register the predictor factories.'
)
def wrap(fn: _PredictorFactory) -> _PredictorFactory:
PREDICTOR_FACTORIES[name] = fn
return fn
return wrap
class MLPWrappedRNN(hk.RNNCore):
"""A wrapper for RNNs to add MLP layers."""
def __init__(
self,
core: type[hk.RNNCore],
before_mlp_layers: Sequence[int] = (),
after_mlp_layers: Sequence[int] = (),
**core_kwargs
):
super().__init__()
self._core = core(**core_kwargs)
self._before_mlp = hk.nets.MLP(before_mlp_layers)
self._after_mlp = hk.nets.MLP(after_mlp_layers)
def __call__(self, inputs: Any, prev_state: Any) -> tuple[Any, Any]:
before_mlp_output = self._before_mlp(inputs)
core_output, next_state = self._core(before_mlp_output, prev_state)
after_mlp_output = self._after_mlp(core_output)
return after_mlp_output, next_state
def initial_state(self, batch_size: Optional[int]) -> Any:
return self._core.initial_state(batch_size)
class SlidingWindowTransformer:
"""A Transformer model that can handle large histories using a sliding window.
"""
def __init__(self, output_size: int, context_length: int,
architecture_config: _Config):
self._transformer = transformer.make_transformer_encoder(
output_size=output_size,
return_all_outputs=True,
causal_masking=True,
**architecture_config,
)
self._context_length = context_length
self._output_size = output_size
def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
batch_size, history_len, num_features = x.shape[:3]
history_batch_size = history_len // self._context_length
x_batched_history = einops.rearrange(
x,
'b (h c) f -> b h c f',
b=batch_size,
h=history_batch_size,
c=self._context_length,
f=num_features,
)
out = jax.vmap(self._transformer, in_axes=1, out_axes=1)(x_batched_history)
return einops.rearrange( # pytype: disable=bad-return-type # numpy-scalars
out,
'b h c o -> b (h c) o',
b=batch_size,
h=history_batch_size,
c=self._context_length,
o=self._output_size,
)
def _make_rnn_predictor(
output_size: int,
architecture_config: _Config,
rnn_core: type[hk.RNNCore],
) -> predictors.Predictor:
"""Returns an RNN predictor based on config."""
unroll_factory = basic.make_rnn(
output_size=output_size,
rnn_core=rnn_core,
return_all_outputs=True,
return_all_states=True,
input_window=1,
**architecture_config,
)
def initial_state_factory(batch_size: int):
return rnn_core(**architecture_config).initial_state(batch_size)
return predictors.RNNPredictor(unroll_factory, initial_state_factory)
_register_predictor_factory('rnn')(
functools.partial(_make_rnn_predictor, rnn_core=MLPWrappedRNN)
)
@_register_predictor_factory('transformer')
def _make_transformer_predictor(
output_size: int, architecture_config: _Config
) -> predictors.Predictor:
"""Returns Transformer predictor based on config."""
positional_encodings_params = {}
if 'positional_encodings_params' in architecture_config:
positional_encodings_params = architecture_config[
'positional_encodings_params'
]
architecture_config['positional_encodings_params'] = (
pos_encs_lib.POS_ENC_PARAMS_TABLE[
architecture_config['positional_encodings']
](**positional_encodings_params)
)
architecture_config['positional_encodings'] = pos_encs_lib.POS_ENC_TABLE[
architecture_config['positional_encodings']
]
predictor = transformer.make_transformer_encoder(
output_size=output_size,
return_all_outputs=True,
causal_masking=True,
**architecture_config,
)
return predictors.InContextPredictor(predictor)
@_register_predictor_factory('sliding_window_transformer')
def _make_sliding_window_transformer_predictor(
output_size: int, architecture_config: _Config
) -> predictors.Predictor:
"""Returns Transformer predictor based on config."""
positional_encodings_params = {}
if 'positional_encodings_params' in architecture_config:
positional_encodings_params = architecture_config[
'positional_encodings_params'
]
architecture_config['positional_encodings_params'] = (
pos_encs_lib.POS_ENC_PARAMS_TABLE[
architecture_config['positional_encodings']
](**positional_encodings_params)
)
architecture_config['positional_encodings'] = pos_encs_lib.POS_ENC_TABLE[
architecture_config['positional_encodings']
]
context_len = architecture_config['context_length']
model_kwargs = {
k: v for k, v in architecture_config.items() if k != 'context_length'
}
predictor = SlidingWindowTransformer(output_size, context_len, model_kwargs)
return predictors.InContextPredictor(predictor)
| nonstationary_mbml-main | predictor_factories.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the training loop that performs parameter updates."""
import copy
import functools
import math
import random
from typing import Any, Callable, Optional
import chex
import haiku as hk
import jax
from jax import numpy as jnp
from jax import random as jrandom
import numpy as np
import optax
import tqdm
import tree
from nonstationary_mbml import base_config as config_lib
from nonstationary_mbml import base_constants
from nonstationary_mbml import predictor_factories
from nonstationary_mbml import predictors
def _make_whole_loss_fn(
predictor: predictors.Predictor,
) -> Callable[[hk.Params, chex.PRNGKey, chex.Array, Any], tuple[float, Any]]:
"""Returns the loss function for update_parameters_whole_sequence."""
def loss_fn(params: hk.Params, rng: chex.PRNGKey, inputs: chex.Array,
init_state: Any) -> tuple[float, Any]:
"""Returns the loss for the model and the last state.
Args:
params: The parameters of the model, usually a neural network.
rng: The random seed used to unroll the model (dropout for instance).
inputs: The input array, of shape (B, T, F). B batch dimension, T time
dimension, F feature dimension.
init_state: The initial state of the model. Can be anything, but usually
will be an ArrayTree (like LSTM state).
"""
output, states = predictor.unroll(params, rng, inputs, init_state)
last_state = _get_last_state(states)
predictions = output[:, :-1]
targets = inputs[:, 1:]
losses = optax.softmax_cross_entropy(predictions, targets)
return jnp.mean(losses), last_state
return loss_fn
@functools.partial(jax.jit, static_argnames=('grad_fn', 'optimizer'))
def update_parameters_whole_sequence(
params: hk.Params,
rng: chex.PRNGKey,
batch: chex.Array,
grad_fn: Callable[[hk.Params, chex.PRNGKey, chex.Array, chex.Array],
tuple[tuple[chex.Array, chex.Array], chex.ArrayTree]],
optimizer: optax.GradientTransformation,
opt_state: optax.OptState,
init_state: Any,
) -> tuple[dict[str, Any], hk.Params, optax.OptState]:
"""Returns updated params and extra logs (like loss, last state etc).
Backpropagation is done on the whole sequence. The whole function is jitted.
Args:
params: The current parameters of the network.
rng: The random seed, used for dropout for instance.
batch: The data batch.
grad_fn: A gradient function, which takes some parameters, a random seed,
the data to compute the gradient on, and an initial state for the model.
It returns the gradient of the parameters for this batch of data, and
extra values.
optimizer: An optax optimizer.
opt_state: The optimizer state.
init_state: The initial state of the network (for an RNN for instance). Can
be None.
"""
(loss, last_state), grad = grad_fn(params, rng, batch, init_state)
updates, new_opt_state = optimizer.update(grad, opt_state)
new_params = optax.apply_updates(params, updates)
log_dict = {
'loss': loss,
'last_state': last_state,
'grad_norm_unclipped': optax.global_norm(grad),
}
return log_dict, new_params, new_opt_state
@functools.partial(jax.jit, static_argnames=('optimizer'))
def _compute_updates_from_chunks(
params: hk.Params,
losses: list[float],
grads: list[chex.ArrayTree],
optimizer: optax.GradientTransformation,
opt_state: optax.OptState,
):
"""Returns updates from the list of gradients of the chunks."""
# Compute the mean of losses across chunks.
loss = jnp.mean(jnp.array(losses))
# Compute the mean of gradients across chunks.
avg_grads_fn = lambda *g: functools.reduce(jax.lax.add, g) / len(g)
grad = jax.tree_util.tree_map(avg_grads_fn, *grads)
# Classical update of parameters with the mean of gradients.
updates, new_opt_state = optimizer.update(grad, opt_state)
new_params = optax.apply_updates(params, updates)
return loss, grad, new_params, new_opt_state
def _make_chunks_loss_fn(
predictor: predictors.Predictor,
) -> Callable[
[hk.Params, chex.PRNGKey, chex.Array, chex.Array, Any, bool],
tuple[float, Any],
]:
"""Returns the loss function for update_parameters_in_chunks."""
def loss_fn(params, rng, inputs, targets, init_state, last_chunk: bool):
"""Returns the loss for the model and the last state.
Args:
params: The parameters of the model, usually a neural network.
rng: The random seed used to unroll the model (dropout for instance).
inputs: The input array, of shape (B, T, F). B batch dimension, T time
dimension, F feature dimension.
targets: The targets array, also of shape (B, T, F).
init_state: The initial state of the model. Can be anything, but usually
will be an ArrayTree (like LSTM state).
last_chunk: Whether the loss is computed for the last chunk or not.
"""
output, states = predictor.unroll(params, rng, inputs, init_state)
last_state = _get_last_state(states)
if last_chunk:
output = output[:, :-1]
losses = optax.softmax_cross_entropy(output, targets)
return jnp.mean(losses), last_state
return loss_fn
def update_parameters_in_chunks(
params: hk.Params, rng: chex.PRNGKey, batch: chex.Array, grad_fn: Callable[
[hk.Params, chex.PRNGKey, chex.Array, chex.Array, chex.Array, bool],
tuple[tuple[chex.Array, chex.Array],
chex.ArrayTree]], optimizer: optax.GradientTransformation,
opt_state: optax.OptState, init_state: Any,
chunk_length: int) -> tuple[dict[str, Any], hk.Params, optax.OptState]:
"""Returns updated params and extra logs (like loss, last state etc).
Backpropagation is done on chunks of the sequence, then averaged. The whole
function itself is not jitted, due to memory issues with long sequences.
Only the gradient computation of the chunks and the averaging is jitted.
Args:
params: The current parameters of the network.
rng: The random seed, used for dropout for instance.
batch: The data batch.
grad_fn: A gradient function, which takes some parameters, a random seed,
the data to compute the gradient on, and an initial state for the model.
It returns the gradient of the parameters for this batch of data, and
extra values.
optimizer: An optax optimizer.
opt_state: The optimizer state.
init_state: The initial state of the network (for an RNN for instance). Can
be None.
chunk_length: Size of the chunks to consider. If lower than 1 or larger than
seq_length, the passed value is clipped to this range.
"""
seq_length = batch.shape[1]
rngs = hk.PRNGSequence(rng)
losses, grads = [], []
init_state = copy.deepcopy(init_state)
n_chunks = math.ceil(seq_length / chunk_length)
for i in range(n_chunks):
inputs = batch[:, i * chunk_length:(i + 1) * chunk_length]
targets = batch[:, i * chunk_length + 1:(i + 1) * chunk_length + 1]
last_chunk = (i == n_chunks - 1)
(loss, last_state), grad = grad_fn(params, next(rngs), inputs, targets,
init_state, last_chunk)
# Update the initial state for the next batch with the last state.
init_state = last_state
losses.append(loss)
grads.append(grad)
# Compute updates. This part is jitted.
loss, grad, new_params, new_opt_state = _compute_updates_from_chunks(
params, losses, grads, optimizer, opt_state)
log_dict = {
'loss': loss,
'last_state': last_state,
'grad_norm_unclipped': optax.global_norm(grad),
}
return log_dict, new_params, new_opt_state
def _get_last_state(states: chex.ArrayTree) -> Optional[chex.ArrayTree]:
"""Returns the last state from an array tree of states."""
if states is not None:
return tree.map_structure(lambda x: x[:, -1], states)
return None
def train(config: config_lib.ExperimentConfig,
build_data_generator: base_constants.DataGeneratorBuilder,
build_evaluator: base_constants.EvaluatorBuilder,
use_tqdm: bool = False) -> None:
"""Trains a model.
Nothing is returned.
We choose to pass the data generator and evaluator to the train function
directly rather than via the config, as that would mean adding new fields
to the main constants file for each folder under experiments/. With this
design, one can reuse the training loop in train.py in any other folder,
without needing to change the constants file in this folder.
Args:
config: An experiment config, containing the hyperparameters.
build_data_generator: A function to build a data generator.
build_evaluator: A function to build an evaluator.
use_tqdm: Whether to use a progress bar during training.
"""
random.seed(config.seed)
np.random.seed(config.seed)
rng_seq = hk.PRNGSequence(config.seed)
data_generator = build_data_generator(config.data)
if config.train.seq_length_fixed:
sample_batch = functools.partial(
data_generator.sample,
batch_size=config.train.batch_size,
seq_length=config.train.seq_length,
)
else:
sample_batch = functools.partial(
data_generator.sample, batch_size=config.train.batch_size
)
frames_per_batch = config.train.batch_size * config.train.seq_length
if config.train.seq_length_fixed:
dummy_input, _ = sample_batch(rng=jrandom.PRNGKey(0))
else:
dummy_input, _ = sample_batch(
rng=jrandom.PRNGKey(0), seq_length=config.train.seq_length
)
# No need to use the full batch size for params/config initialization.
# Spares time and memory to only use batch_size = 1.
dummy_input = dummy_input[:1]
predictor = predictor_factories.PREDICTOR_FACTORIES[
config.model.model_type.lower()
](
dummy_input.shape[-1],
config.model.architecture_kwargs,
)
evaluator = build_evaluator(predictor, config.eval)
if config.train.gradient_chunk_length is None:
loss_fn = _make_whole_loss_fn(predictor)
update_parameters = update_parameters_whole_sequence
else:
loss_fn = _make_chunks_loss_fn(predictor)
chunk_length = np.clip(config.train.gradient_chunk_length, 1,
config.train.seq_length)
update_parameters = functools.partial(
update_parameters_in_chunks, chunk_length=chunk_length)
# Optimizer setup.
optimizer = optax.adam(config.train.learning_rate)
max_grad_norm = config.train.clip_grad_norm
if max_grad_norm > 0:
# First clip, *then* pass to optimizer.
optimizer = optax.chain(optax.clip_by_global_norm(max_grad_norm), optimizer)
# Update parameters setup.
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
if config.train.gradient_chunk_length is not None:
# We jit the grad function here because the whole update_parameters function
# itself is not jitted.
grad_fn = jax.jit(grad_fn, static_argnames=('last_chunk',))
model_init_rng = jrandom.PRNGKey(config.train.model_init_seed)
dummy_hidden_state = predictor.initial_state(
None, model_init_rng, config.train.batch_size
)
params = predictor.init_params(
model_init_rng, dummy_input, dummy_hidden_state
)
opt_state = optimizer.init(params)
predictor_init_state = predictor.initial_state(
params, None, config.train.batch_size
)
predictor_eval_init_state = predictor.initial_state(
params, None, config.eval.batch_size
)
range_fn = tqdm.trange if use_tqdm else range
for step in range_fn(config.schedule.training_steps + 1):
if config.train.seq_length_fixed:
data_batch, _ = sample_batch(rng=next(rng_seq))
else:
log_length = random.randint(
0, math.floor(math.log2(config.train.seq_length)))
data_batch, _ = sample_batch(
rng=next(rng_seq), seq_length=2**log_length)
train_log_dict, params, opt_state = update_parameters(
params=params,
rng=next(rng_seq),
batch=data_batch,
grad_fn=grad_fn,
optimizer=optimizer,
opt_state=opt_state,
init_state=predictor_init_state,
)
if not config.train.reset_predictor_init_state:
predictor_init_state = train_log_dict['last_state']
if (
0 < config.logger.log_frequency
and step % config.logger.log_frequency == 0
):
eval_log_dict = evaluator.step(
predictor_params=params,
predictor_state=predictor_eval_init_state,
rng=next(rng_seq),
)
train_log_dict = jax.device_get(train_log_dict)
eval_log_dict = jax.device_get(eval_log_dict)
log_dict = {**train_log_dict, **eval_log_dict} # Merge the dicts.
del log_dict['last_state'] # We don't want to log the state.
del log_dict['logits'] # We don't want to log the logits.
log_dict['num_frames'] = step * frames_per_batch
log_dict['step'] = step
print(log_dict)
| nonstationary_mbml-main | train.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Constants for online_learning."""
import abc
from typing import Any
import chex
from typing_extensions import Protocol
from nonstationary_mbml import base_config as config_lib
from nonstationary_mbml import predictors
class DataGenerator(abc.ABC):
"""Abstract data generation class."""
@abc.abstractmethod
def sample(
self,
rng: chex.PRNGKey,
batch_size: int,
seq_length: int,
) -> tuple[chex.Array, chex.Array]:
"""Samples a batch of data.
Args:
rng: The random key to use in the random generation algorithm.
batch_size: The number of sequences to return.
seq_length: The length of the sequences to return.
Returns:
batch: The batch of data, of shape (batch_size, seq_length, feature_size).
parameters: The parameters used to sample this batch. Can just be the
random seed if not applicable.
"""
class DataGeneratorBuilder(Protocol):
def __call__(self, config: config_lib.DataConfig) -> DataGenerator:
"""Returns a data generator from a config."""
class Evaluator(abc.ABC):
"""Abstract evaluator class."""
@abc.abstractmethod
def step(
self, predictor_params: Any, predictor_state: Any, rng: chex.PRNGKey
) -> dict[str, Any]:
"""Evaluates the predictor and returns a log dict."""
class EvaluatorBuilder(Protocol):
def __call__(
self, predictor: predictors.Predictor, eval_config: config_lib.EvalConfig
) -> Evaluator:
"""Returns an evaluator from a training predictor, a loss_fn and a config.
Args:
predictor: The predictor being trained. Most likely a neural network.
Parameters will be passed in the main loop, not when building the
evaluator.
eval_config: The evaluation config. Depends on the experiment being run.
"""
| nonstationary_mbml-main | base_constants.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script for launching locally."""
from absl import app
from nonstationary_mbml import base_config as config_lib
from nonstationary_mbml import train
from nonstationary_mbml.experiments import config as meta_learning_config_lib
from nonstationary_mbml.experiments import constants
from nonstationary_mbml.experiments import evaluator
def main(unused_argv) -> None:
config = config_lib.ExperimentConfig()
config.data = meta_learning_config_lib.DataConfig()
config.data.iid_distribution = 'categorical'
config.data.iid_distribution_kwargs['size'] = 2
config.data.parameter_distribution = 'dirichlet'
config.data.parameter_distribution_params = (0.5, 0.5)
config.data.parameter_distribution_kwargs['size'] = 2
config.eval = meta_learning_config_lib.EvalConfig()
config.eval.seq_length = 20
config.eval.data = config.data
config.train.seq_length = 20
config.model.model_type = 'sliding_window_transformer'
config.model.architecture_kwargs = {
'context_length': 10,
'positional_encodings': 'ALIBI'
}
train.train(config, constants.build_data_generator, evaluator.build_evaluator) # pytype: disable=wrong-arg-types
if __name__ == '__main__':
app.run(main)
| nonstationary_mbml-main | experiments/local_launch.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base config for meta_learning experiments."""
import dataclasses
import math
from typing import Any, Optional, Sequence
from nonstationary_mbml import base_config as config_lib
from nonstationary_mbml.models import stack_rnn
@dataclasses.dataclass
class DataConfig(config_lib.DataConfig):
"""Config for the data distribution.
Please distinguish between 'kwargs' which are values passed to the objects
at initialization, and 'params' which are passed at sample time.
"""
# The iid distribution is used to sample the sequence values.
# For instance, a coin toss can be modeled with a Bernoulli distribution,
# which is equivalent to a categorical distribution of dimension 2.
# Thus, the values would be:
# - iid_distribution = `categorical`
# - iid_distribution_kwargs.size = 2
iid_distribution: str = ''
iid_distribution_kwargs: dict[str,
Any] = dataclasses.field(default_factory=dict)
# The parameter distribution is used to sample the parameters of the above
# iid distribution. For instance, a coin toss can be modeled with a Bernoulli
# distribution that requires a single parameter `p`, which could be sample
# from a beta distribution (the conjugate prior). This is equivalent to using
# a categorical distribution of dimension 2 with a Dirichlet prior of
# dimension 2.
# Thus, the values would be:
# - parameter_distribution = 'dirichlet'
# - parameter_distributions_kwargs = {'size': 2}
# - parameter_distributions_params = (1., 1.)
parameter_distribution: str = ''
parameter_distribution_kwargs: dict[str, Any] = dataclasses.field(
default_factory=dict)
parameter_distribution_params: tuple[float, ...] = dataclasses.field(
default_factory=tuple)
# The trajector generator is used to generate the sequences.
# See constants.py for all choices.
trajectory_generator: str = 'static'
trajectory_generator_kwargs: dict[str, Any] = dataclasses.field(
default_factory=dict)
@dataclasses.dataclass
class EvalConfig(config_lib.EvalConfig):
"""Config for the evaluator."""
# Sequence length used for evaluation, None means same as training.
seq_length: Optional[int] = None
# Chunk size to use at evaluation time. None means use the sequence length.
chunk_length: Optional[int] = None
# See constants.py for possible optimal predictors.
optimal_predictors: Optional[Sequence[str]] = None
optimal_predictors_kwargs: dict[str, dict[str, Any]] = dataclasses.field(
default_factory=dict
)
data: Optional[DataConfig] = None # Which data distrib to use for evaluation.
@dataclasses.dataclass
class ExperimentConfig(config_lib.ExperimentConfig):
"""Needed inheritance to avoid typing error."""
name: str = '[MBML Nonstationary Distributions]'
eval: EvalConfig = dataclasses.field(default_factory=EvalConfig)
data: DataConfig = dataclasses.field(default_factory=DataConfig)
@dataclasses.dataclass
class ExperimentSweep(config_lib.ExperimentSweep):
"""Needed inheritance to avoid typing error."""
base_config: ExperimentConfig = dataclasses.field(
default_factory=ExperimentConfig
)
def post_process_config(config: ExperimentConfig) -> None:
"""Processes a config at launch time, in place."""
# Setting the stack size for the stack-RNN.
if config.model.model_type == 'rnn':
if config.model.architecture_kwargs['core'] == stack_rnn.StackRNNCore:
if config.model.architecture_kwargs['stack_size'] is None:
config.model.architecture_kwargs['stack_size'] = config.train.seq_length
# Setting the context size for the Transformer.
if config.model.model_type == 'sliding_window_transformer':
if config.model.architecture_kwargs['context_length'] is None:
if config.train.gradient_chunk_length is None:
config.model.architecture_kwargs[
'context_length'] = config.train.seq_length
else:
config.model.architecture_kwargs[
'context_length'] = config.train.gradient_chunk_length
# Setting the eval data in case it was set to None.
if config.eval.data is None:
config.eval.data = config.data
# Setting the eval length in case it was set to None.
if config.eval.seq_length is None:
config.eval.seq_length = config.train.seq_length
if 'ptw' in config.eval.optimal_predictors:
if config.eval.optimal_predictors_kwargs['ptw']['depth'] is None:
config.eval.optimal_predictors_kwargs['ptw']['depth'] = math.ceil(
math.log2(config.eval.seq_length)
)
| nonstationary_mbml-main | experiments/config.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""PTW predictor classes."""
import abc
import copy
from typing import Any, Sequence
import chex
import numpy as np
from nonstationary_mbml import predictors
def _leading_zeros(x):
"""Compute the number of leading zeros of x in binary."""
if x == 0:
return 32
n = 0
if x <= 0x0000FFFF:
n = n + 16
x = x << 16
if x <= 0x00FFFFFF:
n = n + 8
x = x << 8
if x <= 0x0FFFFFFF:
n = n + 4
x = x << 4
if x <= 0x3FFFFFFF:
n = n + 2
x = x << 2
if x <= 0x7FFFFFFF:
n = n + 1
return n
def _mscb(d, t):
"""Most significant change bit between t-1 and t-2 in d-length binary."""
if t == 1:
return 0
assert t <= 2**d
return d - (32 - _leading_zeros((t - 1) ^ (t - 2)))
# given log(x) and log(y), compute log(x+y). uses the following identity:
# log(x + y) = log(x) + log(1 + y/x) = log(x) + log(1+exp(log(y)-log(x)))
def array_log_add(x: chex.Array, y: chex.Array) -> chex.Array:
idx = x > y
x[idx], y[idx] = y[idx], x[idx]
rval = y - x
idx2 = rval < 100.0
rval[idx2] = np.log1p(np.exp(rval[idx2]))
rval += x
return rval
class KT:
"""KT class."""
def __init__(self):
self.counts = [0, 0]
self.marg = 0.0
def _prob(self, x: chex.Array):
num = self.counts[x.argmax()] + 0.5
den = self.counts[0] + self.counts[1] + 1
return num / den
def update(self, x: chex.Array) -> None:
assert len(x) == 2
assert x.sum() == 1
self.marg += np.log(self._prob(x))
self.counts[x.argmax()] += 1
class ArrayKT:
"""ArrayKT class."""
def __init__(self, batch_size: int, depth: int) -> None:
self.depth = depth
self.batch_size = batch_size
self.counts = np.zeros((batch_size, depth, 2), dtype=np.int64)
self.marg = np.zeros((batch_size, depth))
def _prob(self, x: chex.Array, d: int) -> chex.Array:
batch_size = x.shape[0]
num = self.counts[range(batch_size), d, x.argmax(-1)] + 0.5
den = self.counts[:, d].sum(-1) + 1
return num / den
def update(self, x: chex.Array, d: int) -> None:
self.marg[:, d] += np.log(self._prob(x, d))
self.counts[:, d] += x
def reset(self, drange: Sequence[int]) -> None:
self.counts[:, drange] = 0
self.marg[:, drange] = 0.0
@chex.dataclass
class PTWState:
b: chex.Array
w: chex.Array
kt: ArrayKT
t: int
class PTWPredictor(predictors.Predictor, abc.ABC):
"""Partition tree weighting predictor.
WARNING:
PTW outputs a prediction before seeing the first token, which is
inconsistent with our predictor interface. Thus, we omit the first
prediction and append a dummy output at the end.
Attributes:
d: depth
"""
def __init__(self, depth: int) -> None:
self.d = depth
def init_params(self, *args, **kwargs):
pass
def initial_state(self, params: chex.Array, rng: chex.Array,
batch_size: int) -> PTWState:
return PTWState(
b=np.zeros((batch_size, self.d + 1)),
w=np.zeros((batch_size, self.d + 1)),
kt=ArrayKT(batch_size, self.d + 1),
t=0,
)
def update_state(self, rng: chex.PRNGKey, state: chex.Array,
x: chex.Array) -> chex.Array:
d = self.d
t = state.t # pytype: disable=attribute-error # numpy-scalars
i = _mscb(d, t + 1)
state.b[:, i] = state.w[:, i + 1] # pytype: disable=attribute-error # numpy-scalars
# Doing the reset
state.b[:, i + 1:d + 1] = 0 # pytype: disable=attribute-error # numpy-scalars
state.w[:, i + 1:d + 1] = 0 # pytype: disable=attribute-error # numpy-scalars
state.kt.reset(range(i + 1, d + 1)) # pytype: disable=attribute-error # numpy-scalars
state.kt.update(x, d) # pytype: disable=attribute-error # numpy-scalars
state.w[:, d] = state.kt.marg[:, d] # pytype: disable=attribute-error # numpy-scalars
for j in range(d - 1, -1, -1):
state.kt.update(x, j) # pytype: disable=attribute-error # numpy-scalars
lhs = np.log(0.5) + state.kt.marg[:, j] # pytype: disable=attribute-error # numpy-scalars
rhs = np.log(0.5) + state.w[:, j + 1] + state.b[:, j] # pytype: disable=attribute-error # numpy-scalars
wi = array_log_add(lhs, rhs)
state.w[:, j] = wi # pytype: disable=attribute-error # numpy-scalars
state.t = state.t + 1 # pytype: disable=attribute-error # numpy-scalars
return state
def output_from_state(self, rng: chex.PRNGKey,
state: chex.Array) -> chex.Array:
wx = state.w[:, 0] # pytype: disable=attribute-error # numpy-scalars
cp_state = copy.deepcopy(state)
batch_size = wx.shape[0]
ones = np.repeat(np.asarray([[1, 0]]), batch_size, axis=0)
cp_state = self.update_state(rng, cp_state, ones)
output = cp_state.w[:, 0] - wx # pytype: disable=attribute-error # numpy-scalars
output = np.stack([output, np.log(1 - np.exp(output))], axis=-1)
return output
def unroll(
self,
params: Any,
rng: chex.PRNGKey,
batch: chex.Array,
init_state: chex.Array,
) -> chex.Array:
# Params are not used in this predictor.
del params
def scan_update_output(
state: chex.Array,
x: chex.Array,
) -> tuple[chex.Array, chex.Array]:
pred = self.output_from_state(rng, state)
new_state = self.update_state(rng, state, x)
return new_state, pred
batch = batch.astype(np.int64)
# Change to time-major layout since lax.scan unrolls over leading dimension.
batch = batch.swapaxes(0, 1)
state = copy.deepcopy(init_state)
predictions = []
for x in batch:
state, pred = scan_update_output(state, x)
predictions.append(pred)
predictions = np.stack(predictions, axis=0)
# PTW outputs a prediction before seeing the first token, which is
# inconsistent with our predictor interface. Thus, we omit the first
# prediction and append a dummy output at the end.
predictions = np.concatenate(
[predictions[1:],
np.full_like(predictions[:1], np.nan)], axis=0)
predictions = predictions.swapaxes(0, 1)
return predictions
| nonstationary_mbml-main | experiments/ptw_predictors.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Constants for online_learning."""
from nonstationary_mbml.experiments import config as config_lib
from nonstationary_mbml.experiments import distributions
from nonstationary_mbml.experiments import live_and_die_predictors
from nonstationary_mbml.experiments import ptw_predictors
from nonstationary_mbml.experiments import trajectory_generators
DISTRIBUTIONS = {
'beta': distributions.BetaDistribution,
'gamma': distributions.GammaDistribution,
'exponential': distributions.ExponentialDistribution,
'constant': distributions.ConstantDistribution,
'dirichlet': distributions.DirichletDistribution,
'categorical': distributions.CategoricalDistribution,
'uniform': distributions.UniformDistribution,
}
TRAJECTORY_GENERATORS = {
'static':
trajectory_generators.StaticTrajectoryGenerator,
'regular_shift':
trajectory_generators.RegularShiftTrajectoryGenerator,
'random_shift':
trajectory_generators.RandomShiftNoMemoryTrajectoryGenerator,
'ptw':
trajectory_generators.PTWRandomShiftTrajectoryGenerator,
'iid_ptw_cat':
trajectory_generators.IIDPTWRandomShiftCategoricalTrajectoryGenerator,
'lin':
trajectory_generators.LINTrajectoryGenerator,
}
OPTIMAL_PREDICTORS = {
'ptw': ptw_predictors.PTWPredictor,
'lin': live_and_die_predictors.LADPredictor,
}
# The following function follows the protocol constants.DataGeneratorBuilder.
def build_data_generator(
config: config_lib.DataConfig) -> trajectory_generators.TrajectoryGenerator:
"""Returns a data generator from a meta_learning data config."""
iid_distribution = DISTRIBUTIONS[config.iid_distribution](
**config.iid_distribution_kwargs)
return TRAJECTORY_GENERATORS[config.trajectory_generator](
gen_distribution=iid_distribution,
parameter_distribution=DISTRIBUTIONS[config.parameter_distribution](
**config.parameter_distribution_kwargs),
parameter_distribution_params=config.parameter_distribution_params,
**config.trajectory_generator_kwargs)
| nonstationary_mbml-main | experiments/constants.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Probability distributions used to generate trajectories.
A distribution can produce samples of shape (batch_size, ...) with multiple
parameters at once, of shape (batch_size, parameter_size).
It also has a density method, returning the positive density for given sets of
points, in the support of the distribution.
For now, the distributions are only one dimension, sufficient for our
experiments. It means that you can only produce iid samples, no multivariate
distributions.
"""
import abc
import chex
import jax
import jax.nn as jnn
import jax.numpy as jnp
_EPSILON = 1e-7
# Constant to be added to avoid inputs close to asymptotes
# e.g. to ensure that the variance of a Gaussian >= _EPSILON
def split_params(parameters: chex.Array) -> tuple[chex.Array, ...]:
"""Returns a tuple of batches of individual parameters.
Args:
parameters: a (batch_size, parameter_size, ...) tensor
Returns:
param_tuple: tuple whose elements are (batch_size, ...) tensors
"""
parameters = jnp.swapaxes(parameters, 1, 0)
param_tuple = tuple(x for x in parameters)
return param_tuple
def broadcast_params_with_ones(
parameters: chex.Array,
num_dims_to_match: int,
) -> chex.Array:
"""Expand dimension of parameters num_dims times.
Args:
parameters: a (batch_size, parameter_size) tensor
num_dims_to_match: the number of dimensions to match
Returns:
broadcasted: a (batch_size, parameter_size, 1, 1, ...) tensor, with shape
length of num_dims_to_match
"""
num_dims_to_add = num_dims_to_match - len(parameters.shape)
return parameters[(...,) + (None,) * num_dims_to_add]
class Distribution(abc.ABC):
"""Abstract class for random distributions."""
@property
@abc.abstractmethod
def parameter_size(self) -> int:
"""The number of parameters expected by the distribution."""
@property
@abc.abstractmethod
def feature_size(self) -> int:
"""The size of an iid sample of the distribution."""
@abc.abstractmethod
def sample(
self,
rng: chex.PRNGKey,
parameters: chex.Array,
shape: tuple[int, ...],
) -> chex.Array:
"""Sample data with given parameters.
Args:
rng: the random key
parameters: a (batch_size, parameter_size) tensor used to sample the data
shape: the shape of the output tensor. Must be of the form (batch_size,
..., feature_size).
Returns:
samples: an output_shape tensor, dtype float64
"""
@abc.abstractmethod
def density(
self,
parameters: chex.Array,
x: chex.Array,
logits: bool = False,
) -> chex.Array:
"""Returns the evaluation of the density function at x.
Args:
parameters: a (batch_size, parameter_size) tensor of density parameters.
x: (batch_size, ..., feature_size) tensor of points to be evaluated
logits: If True, interprets the `parameters` arg as the logits and
performs the appropriate conversions internally. Default is False.
Returns:
densities: a (batch_size, ..., 1) tensor, containing the values of the
density function evaluated at each entry of x. dtype float32
"""
def log_density(
self,
parameters: chex.Array,
x: chex.Array,
logits: bool = False,
) -> chex.Array:
"""Computes the log of the density at x. Override where appropriate."""
return jnp.log(self.density(parameters, x, logits=logits) + _EPSILON)
@abc.abstractmethod
def mean(self, parameters: chex.Array) -> chex.Array:
"""(batch_size, feature_size) array of the means of the given parameters."""
@abc.abstractmethod
def std(self, parameters: chex.Array) -> chex.Array:
"""(batch_size, feature_size) array of the std devs of the given parameters.
"""
@abc.abstractmethod
def entropy(self, parameters: chex.Array) -> chex.Array:
"""(batch_size, 1) array of the entropies of the given parameters."""
@abc.abstractmethod
def logits_to_params(self, logits: chex.Array) -> chex.Array:
"""Given the final pre-activation output, compute appropriate parameters.
E.g., for a gaussian, map the 2nd column into positive values above a
certain epsilon, to avoid divergence due to zero variance.
Args:
logits: a (batch_size, parameter_size) pre-activation output of the final
layer of a neural net.
Returns:
a (batch_size, parameter_size) tensor of valid parameters for the
distribution.
"""
def _validate_parameters_shape(
self,
parameters: chex.Array,
output_shape: tuple[int, ...],
) -> None:
"""Checks that `parameters` has shape (batch_size, parameter_size)."""
expected_shape = (output_shape[0], self.parameter_size)
if parameters.shape != expected_shape:
raise ValueError("Parameters shape mismatch. "
f"Expected {expected_shape}, got {parameters.shape}.")
def _validate_output_shape(
self,
parameters: chex.Array,
output_shape: tuple[int, ...],
) -> None:
"""Checks that `output_shape` has form (batch_size, ..., feature_size)."""
leading_and_trailing_dims = (output_shape[0], output_shape[-1])
batch_size = parameters.shape[0]
if leading_and_trailing_dims != (batch_size, self.feature_size):
raise ValueError(f"Bad shape. "
f"Expected ({batch_size}, ..., {self.feature_size}). "
f"Got {output_shape}")
class PrecisionGaussianDistribution(Distribution):
"""Gaussian Distribution parameterised by precision.
The parameters of this distribution are the mean and 1/var = rho.
This parameterisation results in a stable log_density computation.
"""
parameter_size = 2
feature_size = 1
def sample(
self,
rng: chex.PRNGKey,
parameters: chex.Array,
shape: tuple[int, ...],
) -> chex.Array:
self._validate_parameters_shape(parameters, shape)
self._validate_output_shape(parameters, shape)
parameters = broadcast_params_with_ones(parameters, len(shape) + 1)
mu, rho = split_params(parameters)
sigma = 1 / jnp.sqrt(rho)
batch = mu + sigma * jax.random.normal(rng, shape=shape)
return batch
def log_density(
self,
parameters: chex.Array,
x: chex.Array,
logits: bool = False,
) -> chex.Array:
self._validate_parameters_shape(parameters, x.shape)
parameters = broadcast_params_with_ones(parameters, len(x.shape) + 1)
if logits:
mu, log_rho = split_params(parameters)
rho = jnp.exp(log_rho)
else:
mu, rho = split_params(parameters)
log_rho = jnp.log(rho + _EPSILON)
return 1 / 2 * (log_rho - jnp.log(2 * jnp.pi) - (rho * (x - mu)**2))
def density(
self,
parameters: chex.Array,
x: chex.Array,
logits: bool = False,
) -> chex.Array:
return jnp.exp(self.log_density(parameters, x))
def logits_to_params(self, logits: chex.Array) -> chex.Array:
mean = logits[..., 0]
rho = jnp.exp(logits[..., 1])
return jnp.stack([mean, rho], axis=-1)
def mean(self, parameters: chex.Array) -> chex.Array:
# Index with list to not lose the dimension.
return parameters[..., [0]]
def std(self, parameters: chex.Array) -> chex.Array:
return 1 / jnp.sqrt(parameters[..., [1]])
def entropy(self, parameters: chex.Array) -> chex.Array:
_, rho = split_params(parameters)
return 1 / 2 * (jnp.log(2 * jnp.pi * jnp.e) - jnp.log(rho))
def kl(self, p: chex.Array, q: chex.Array) -> chex.Array:
"""Computes the KL between the Gaussians parameterised by p and q.
Args:
p: [..., 2] tensor of means and precisions.
q: [..., 2] tensor of means and precisions.
Returns:
[...,] tensor of KL between p and q.
"""
mu_p, mu_q = p[..., 0], q[..., 0]
var_p, var_q = 1 / p[..., 1], 1 / q[..., 1]
std_p, std_q = jnp.sqrt(var_p), jnp.sqrt(var_q)
return jnp.log(std_q / std_p) + (var_p +
(mu_p - mu_q)**2) / (2 * var_q) - 1 / 2
class BetaDistribution(Distribution):
"""Beta distribution.
Parameters are alpha and beta.
The pdf is p(x; alpha, beta) = x^(alpha-1) * (1-x)^(beta-1) / B(alpha, beta)
where B(alpha, beta) = G(alpha)*G(beta) / G(alpha + beta) and
G is the Gamma function.
"""
parameter_size = 2
feature_size = 1
def sample(
self,
rng: chex.PRNGKey,
parameters: chex.Array,
shape: tuple[int, ...],
) -> chex.Array:
self._validate_parameters_shape(parameters, shape)
self._validate_output_shape(parameters, shape)
parameters = broadcast_params_with_ones(parameters, len(shape) + 1)
alpha, beta = split_params(parameters)
batch = jax.random.beta(rng, alpha, beta, shape)
return batch.astype(jnp.float32)
def density(
self,
parameters: chex.Array,
x: chex.Array,
logits: bool = False,
) -> chex.Array:
self._validate_parameters_shape(parameters, x.shape)
parameters = self.logits_to_params(parameters) if logits else parameters
parameters = broadcast_params_with_ones(parameters, len(x.shape) + 1)
alpha, beta = split_params(parameters)
return jax.scipy.stats.beta.pdf(x, alpha, beta)
def logits_to_params(self, logits: chex.Array) -> chex.Array:
raise NotImplementedError()
def mean(self, parameters: chex.Array) -> chex.Array:
alpha, beta = split_params(parameters)
return alpha / (alpha + beta)
def std(self, parameters: chex.Array) -> chex.Array:
a, b = split_params(parameters)
variance = a * b / ((a + b)**2 * (a + b + 1))
return jnp.sqrt(variance)
def entropy(self, parameters: chex.Array) -> chex.Array:
raise NotImplementedError()
class GammaDistribution(Distribution):
"""Gamma distribution.
Parameters are alpha (shape) and beta (rate).
The pdf is p(x; a, b) = b^a / G(a) * x^(a-1) exp(-b*x)
where G is the Gamma function.
"""
parameter_size = 2
feature_size = 1
def sample(
self,
rng: chex.PRNGKey,
parameters: chex.Array,
shape: tuple[int, ...],
) -> chex.Array:
self._validate_parameters_shape(parameters, shape)
self._validate_output_shape(parameters, shape)
parameters = broadcast_params_with_ones(parameters, len(shape) + 1)
alpha, beta = split_params(parameters)
# jax.random.gamma samples from Gamma(alpha, 1). To obtain a sample from
# Gamma(alpha, beta), we rescale:
batch = jax.random.gamma(rng, alpha, shape) / beta
return batch.astype(jnp.float32)
def density(
self,
parameters: chex.Array,
x: chex.Array,
logits: bool = False,
) -> chex.Array:
self._validate_parameters_shape(parameters, x.shape)
parameters = self.logits_to_params(parameters) if logits else parameters
parameters = broadcast_params_with_ones(parameters, len(x.shape) + 1)
alpha, beta = split_params(parameters)
# jax.scipy.stats.gamma.pdf is the pdf of Gamma(alpha, 1). To obtain the
# pdf for Gamma(alpha, beta), we reparameterise:
return jax.scipy.stats.gamma.pdf(beta * x, alpha) * beta
def logits_to_params(self, logits: chex.Array) -> chex.Array:
raise NotImplementedError()
def mean(self, parameters: chex.Array) -> chex.Array:
alpha, beta = split_params(parameters)
return alpha / beta
def std(self, parameters: chex.Array) -> chex.Array:
alpha, beta = split_params(parameters)
return jnp.sqrt(alpha) / beta
def entropy(self, parameters: chex.Array) -> chex.Array:
raise NotImplementedError()
class LomaxDistribution(Distribution):
"""Lomax distribution.
Parameters are alpha (shape) and lambda (scale).
alpha > 0 and scale > 0.
The pdf is p(x; a, l) = (a * l**a) / (x + l)**(a + 1)
"""
parameter_size = 2
feature_size = 1
def sample(
self,
rng: chex.PRNGKey,
parameters: chex.Array,
shape: tuple[int, ...],
) -> chex.Array:
self._validate_parameters_shape(parameters, shape)
self._validate_output_shape(parameters, shape)
parameters = broadcast_params_with_ones(parameters, len(shape) + 1)
alpha, lamb = split_params(parameters)
# jax.random.pareto samples from Pareto(alpha). To obtain a sample from
# Lomax(alpha, lambda), we rescale:
batch = (jax.random.pareto(rng, alpha, shape) + 1) * lamb
return batch.astype(jnp.float32)
def log_density(
self,
parameters: chex.Array,
x: chex.Array,
logits: bool = False,
) -> chex.Array:
self._validate_parameters_shape(parameters, x.shape)
parameters = broadcast_params_with_ones(parameters, len(x.shape) + 1)
if logits:
log_alpha, log_lamb = split_params(parameters)
alpha, lamb = jnp.exp(log_alpha), jnp.exp(log_lamb)
else:
alpha, lamb = split_params(parameters)
log_alpha, log_lamb = jnp.log(alpha + _EPSILON), jnp.log(lamb + _EPSILON)
log_dens_num = log_alpha + alpha * log_lamb
log_dens_den = (alpha + 1) * jnp.log(x + lamb)
return log_dens_num - log_dens_den
def density(
self,
parameters: chex.Array,
x: chex.Array,
logits: bool = False,
) -> chex.Array:
return jnp.exp(self.log_density(parameters, x, logits=logits))
def logits_to_params(self, logits: chex.Array) -> chex.Array:
return jnp.exp(logits)
def mean(self, parameters: chex.Array) -> chex.Array:
alpha, lamb = split_params(parameters)
return jnp.where(alpha > 1, lamb / (alpha - 1), jnp.nan)
def std(self, parameters: chex.Array) -> chex.Array:
raise NotImplementedError()
def entropy(self, parameters: chex.Array) -> chex.Array:
raise NotImplementedError()
def kl(self, p: chex.Array, q: chex.Array) -> chex.Array:
"""Computes approximate KL between p and q.
Since Lomax KL is analytically intractable, we approximate the KL using
the Exponential distribution, converting the Lomax parameters to Exponential
parameters with the same mean. When doing so, the alpha parameter is clipped
to be >1, as the Lomax mean is undefined for alpha ≤ 1.
This is a hacky approximation. Use at your own risk.
Args:
p: [..., 2] parameter array for distribution p.
q: [..., 2] parameter array for distribution q.
Returns:
a [...,] array of approximated KL between p and q.
"""
lambda_p = _approximate_lomax_params_to_exponential_params(p) + _EPSILON
lambda_q = _approximate_lomax_params_to_exponential_params(q) + _EPSILON
return jnp.log(lambda_p / lambda_q) + (lambda_q / lambda_p) - 1
def _approximate_lomax_params_to_exponential_params(params):
"""Given Lomax params, returns the Exponential params with the same mean."""
alpha, beta = jnp.maximum(1 + _EPSILON, params[..., 0]), params[..., 1]
return (alpha - 1) / (beta + _EPSILON)
class DirichletDistribution(Distribution):
"""Dirichlet distribution with given parameter_size and feature_size."""
def __init__(self, size):
self._size = size
@property
def parameter_size(self) -> int:
return self._size
@property
def feature_size(self) -> int:
return self._size
def sample(
self,
rng: chex.PRNGKey,
parameters: chex.Array,
shape: tuple[int, ...],
) -> chex.Array:
self._validate_parameters_shape(parameters, shape)
self._validate_output_shape(parameters, shape)
parameters = broadcast_params_with_ones(parameters, len(shape))
# jax.random.dirichlet expects the alphas to be in axis=-1, so we rearrange:
alphas = jnp.moveaxis(parameters, source=1, destination=-1)
# a sample from jax.random.dirichlet has shape (shape + alphas.shape[-1]),
# so we remove the trailing dimension to preserve the final output shape:
shape = shape[:-1]
batch = jax.random.dirichlet(rng, alphas, shape)
return batch.astype(jnp.float32)
def density(
self,
parameters: chex.Array,
x: chex.Array,
logits: bool = False,
) -> chex.Array:
raise NotImplementedError()
def logits_to_params(self, logits: chex.Array) -> chex.Array:
raise NotImplementedError()
def mean(self, parameters: chex.Array) -> chex.Array:
return parameters / jnp.sum(parameters, axis=1, keepdims=True)
def std(self, parameters: chex.Array) -> chex.Array:
raise NotImplementedError()
def entropy(self, parameters: chex.Array) -> chex.Array:
raise NotImplementedError()
class CategoricalDistribution(Distribution):
"""Categorical Distribution with given parameter_size and feature_size.
The support of CategoricalDistribution(n) are one-hot vectors of size n.
The parameter vector contains the probabilities corresponding to classes.
"""
def __init__(self, size):
self._size = size
@property
def parameter_size(self) -> int:
return self._size
@property
def feature_size(self) -> int:
return self._size
def sample(
self,
rng: chex.PRNGKey,
parameters: chex.Array,
shape: tuple[int, ...],
) -> chex.Array:
self._validate_parameters_shape(parameters, shape)
self._validate_output_shape(parameters, shape)
batch_size = shape[0]
keys = jax.random.split(rng, batch_size)
# jax.random.categorical expects logits, so we convert to log space.
# we add an epsilon for stability. Regrettably, the epsilon means
# that we can sample from classes with zero probability.
log_probabilities = jnp.log(parameters + _EPSILON)
# sample each trajectory in the batch separately to work around weird
# shape behaviours in jax.random.categorical.
def unbatched_categorical(
log_p: chex.Array,
rng: chex.PRNGKey,
) -> chex.Array:
# Sample every trajectory in the batch separately but not the feature size
# dimension since it will be one-hot encoded.
return jax.random.categorical(rng, log_p, shape=shape[1:-1])
batch = jax.vmap(unbatched_categorical)(log_probabilities, keys)
return jnn.one_hot(batch, self.feature_size, dtype=jnp.float32)
def density(
self,
parameters: chex.Array,
x: chex.Array,
logits: bool = False,
) -> chex.Array:
self._validate_parameters_shape(parameters, x.shape)
parameters = self.logits_to_params(parameters) if logits else parameters
parameters = broadcast_params_with_ones(parameters, len(x.shape))
# align parameter axis to feature axis.
probabilities = jnp.moveaxis(parameters, 1, -1)
# since x is one-hot, this gives us the likelihoods of the observed data
# with zeros in non-observed entries. The sum removes these zeros.
return jnp.sum(probabilities * x, axis=-1, keepdims=True)
def mean(self, parameters: chex.Array) -> chex.Array:
raise NotImplementedError()
def std(self, parameters: chex.Array) -> chex.Array:
raise NotImplementedError()
def entropy(self, parameters: chex.Array) -> chex.Array:
return -jnp.sum(parameters * jnp.log(parameters), axis=-1, keepdims=True)
def kl(self, p: chex.Array, q: chex.Array) -> chex.Array:
"""Computes the KL between the Categoricals parameterised by p and q.
Args:
p: [..., parameter_size] tensor of means and precisions.
q: [..., parameter_size] tensor of means and precisions.
Returns:
[...,] tensor of KL between p and q.
"""
return jnp.sum(p * jnp.log(p / q), axis=-1)
def logits_to_params(self, logits: chex.Array) -> chex.Array:
return jax.nn.softmax(logits, axis=-1)
class ExponentialDistribution(Distribution):
"""Exponential implementation of abstract Distribution."""
parameter_size = 1
feature_size = 1
def sample(
self,
rng: chex.PRNGKey,
parameters: chex.Array,
shape: tuple[int, ...],
) -> chex.Array:
self._validate_parameters_shape(parameters, shape)
self._validate_output_shape(parameters, shape)
parameters = broadcast_params_with_ones(parameters, len(shape) + 1)
(tau,) = split_params(parameters)
batch = 1 / tau * jax.random.exponential(rng, shape)
return batch.astype(jnp.float32)
def density(
self,
parameters: chex.Array,
x: chex.Array,
logits: bool = False,
) -> chex.Array:
self._validate_parameters_shape(parameters, x.shape)
parameters = self.logits_to_params(parameters) if logits else parameters
parameters = broadcast_params_with_ones(parameters, len(x.shape) + 1)
(tau,) = split_params(parameters)
return (tau * jnp.exp(-tau * x)) * (x >= 0)
def logits_to_params(self, logits: chex.Array) -> chex.Array:
return jnp.exp(logits)
def mean(self, parameters: chex.Array) -> chex.Array:
return 1 / parameters
def std(self, parameters: chex.Array) -> chex.Array:
return 1 / parameters
def entropy(self, parameters: chex.Array) -> chex.Array:
tau = parameters
return 1 - jnp.log(tau)
class UniformDistribution(Distribution):
"""Uniform implementation of abstract Distribution."""
parameter_size = 2
feature_size = 1
def sample(
self,
rng: chex.PRNGKey,
parameters: chex.Array,
shape: tuple[int, ...],
) -> chex.Array:
self._validate_parameters_shape(parameters, shape)
self._validate_output_shape(parameters, shape)
parameters = broadcast_params_with_ones(parameters, len(shape) + 1)
min_val, max_val = split_params(parameters)
normal_uniform = jax.random.uniform(rng, shape, minval=0., maxval=1.)
batch = min_val + (max_val - min_val) * normal_uniform
return batch
def density(
self,
parameters: chex.Array,
x: chex.Array,
logits: bool = False,
) -> chex.Array:
self._validate_parameters_shape(parameters, x.shape)
parameters = self.logits_to_params(parameters) if logits else parameters
parameters = broadcast_params_with_ones(parameters, len(x.shape) + 1)
min_val, max_val = split_params(parameters)
return jnp.where(
(x - min_val) * (max_val - x) >= 0, # <=> min_val >= x >= max_val
1 / (max_val - min_val),
0.)
def logits_to_params(self, logits: chex.Array) -> chex.Array:
raise UserWarning("Uniform distribution cannot de differentiated "
"in the current implementation.")
def mean(self, parameters: chex.Array) -> chex.Array:
min_val, max_val = split_params(parameters)
return (min_val + max_val) / 2
def std(self, parameters: chex.Array) -> chex.Array:
min_val, max_val = split_params(parameters)
return (max_val - min_val) / jnp.sqrt(12)
def entropy(self, parameters: chex.Array) -> chex.Array:
min_val, max_val = split_params(parameters)
return jnp.log(max_val - min_val)
class ConstantDistribution(Distribution):
"""Constant (Dirac) implementation of abstract Distribution.
Not intended to be used for modelling, but can be used to fix some
parameters of a TrajectoryGenerator to, e.g., generate a distribution
over Gaussian distributions with fixed variance but different means.
"""
parameter_size = 1
feature_size = 1
def sample(
self,
rng: chex.PRNGKey,
parameters: chex.Array,
shape: tuple[int, ...],
) -> chex.Array:
self._validate_parameters_shape(parameters, shape)
self._validate_output_shape(parameters, shape)
parameters = broadcast_params_with_ones(parameters, len(shape) + 1)
(constant,) = split_params(parameters)
return constant * jnp.ones(shape, dtype=jnp.float32)
def density(
self,
parameters: chex.Array,
x: chex.Array,
logits: bool = False,
) -> chex.Array:
self._validate_parameters_shape(parameters, x.shape)
raise UserWarning("Attempting to use ConstantDistribution.density(). "
"This is most likely an error, as this function would "
"not tell you anything meaningful.")
# If this function was supposed to return a value, it would be:
# jnp.where(parameters == x, jnp.inf, 0)
def mean(self, parameters: chex.Array) -> chex.Array:
return parameters
def std(self, parameters: chex.Array) -> chex.Array:
return jnp.zeros_like(parameters)
def entropy(self, parameters: chex.Array) -> chex.Array:
raise NotImplementedError("No entropy for Dirac distribution.")
def logits_to_params(self, logits: chex.Array) -> chex.Array:
raise UserWarning("Constant distribution cannot be differentiated.")
| nonstationary_mbml-main | experiments/distributions.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Trajectory generators used to train our RNNs to predict parameters.
These are basically our supervised datasets, generating data and loss functions.
"""
import abc
import contextlib
import functools
import math
import random
from typing import Any
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import jax.random as jrandom
import numpy as np
from nonstationary_mbml import base_constants
from nonstationary_mbml.experiments import distributions
def sample_distribution_parameters(
rng: chex.PRNGKey,
parameter_distribution: distributions.Distribution,
parameter_distribution_params: tuple[float, ...],
batch_size: int,
) -> chex.Array:
"""Creates a sample of parameters for the generation distribution.
Args:
rng: the PRNG key.
parameter_distribution: the distribution from which parameters are sampled.
parameter_distribution_params: parameter tuples for the
parameter_distribution.
batch_size: the size of the batch that will be sampled from the generation
distribution.
Returns:
an array of shape (batch_size, parameter_size), where parameter_size is the
sum of the `feature_size`s of all the parameter distributions.
"""
rng, key = jrandom.split(rng)
params = parameter_distribution_params
params = jnp.expand_dims(jnp.array(params), axis=0)
# params now has shape (1, pd.parameter_size)
params = jnp.repeat(params, batch_size, axis=0)
# params now has shape (batch_size, pd.parameter_size)
# add the sampled parameters to the output
return parameter_distribution.sample(
key, params, (batch_size, parameter_distribution.feature_size))
class TrajectoryGenerator(base_constants.DataGenerator):
"""Abstract class for trajectory generators."""
def __init__(
self,
gen_distribution: distributions.Distribution,
parameter_distribution: distributions.Distribution,
parameter_distribution_params: tuple[float, ...],
):
self.gen_distribution = gen_distribution
self.parameter_size = gen_distribution.parameter_size
if parameter_distribution.feature_size != self.parameter_size:
raise ValueError(
f"Incorrect number of parameter distibutions for "
f"{gen_distribution.__class__}. "
f"Expected {self.parameter_size}, "
f"got {parameter_distribution.feature_size} output parameters.")
if len(
parameter_distribution_params) != parameter_distribution.parameter_size:
raise ValueError(
"Not enough parameters supplied for"
f" {parameter_distribution.__class__}. Expected tuple of length"
f" {parameter_distribution.parameter_size}, got"
f" {parameter_distribution_params}."
)
self.parameter_distribution = parameter_distribution
self.parameter_distribution_params = parameter_distribution_params
@abc.abstractmethod
def sample(
self,
rng: chex.PRNGKey,
batch_size: int,
seq_length: int,
) -> tuple[chex.Array, chex.Array]:
"""Samples a batch with randomly sampled true parameters.
In the following, parameter_size is the number of parameters needed by the
generative distribution (2 for a gaussian, 2 for uniform, etc.),
feature_size is the number of output the generative distribution produces (1
most of the time, n for categorical or dirichlet).
This function is basically a BatchApply wrapper around parameters_for_gen.
Args:
rng: the random key to use in the random generation algorithm
batch_size: the number of sequences to return
seq_length: the length of the sequences to return
Returns:
batch: the batch of data, of shape (batch_size, seq_length, feature_size)
parameters: the parameters used to sample this batch,
of shape (batch_size, seq_length, parameter_size)
"""
class StaticTrajectoryGenerator(TrajectoryGenerator):
"""TG where the distribution parameters remain constant within a trajectory."""
@functools.partial(jax.jit, static_argnums=(0, 2, 3))
def sample(
self,
rng: chex.PRNGKey,
batch_size: int,
seq_length: int,
) -> tuple[chex.Array, chex.Array]:
params_rng, gen_rng = jrandom.split(rng)
parameters = sample_distribution_parameters(
params_rng, self.parameter_distribution,
self.parameter_distribution_params, batch_size)
sample_shape = (batch_size, seq_length, self.gen_distribution.feature_size)
batch = self.gen_distribution.sample(gen_rng, parameters, sample_shape)
# The same parameters are used at each timestep, so we copy them along T:
parameters_for_output = jnp.stack([parameters] * seq_length, axis=1)
return batch, parameters_for_output
class DynamicTrajectoryGenerator(TrajectoryGenerator, abc.ABC):
"""TG where distribution parameters change within a trajectory."""
@functools.partial(jax.jit, static_argnums=(0, 2, 3))
def sample(
self,
rng: chex.PRNGKey,
batch_size: int,
seq_length: int,
) -> tuple[chex.Array, chex.Array]:
params_rng, gen_rng = jrandom.split(rng, 2)
parameters = self._parameters_for_gen(params_rng, batch_size, seq_length)
partial_gen = functools.partial(
self.gen_distribution.sample,
rng=gen_rng,
shape=(batch_size * seq_length, self.gen_distribution.feature_size))
batch = hk.BatchApply(partial_gen)(parameters=parameters)
return batch, parameters
def _initial_state(self, rng: chex.PRNGKey, batch_size: int) -> chex.Array:
init_parameters = sample_distribution_parameters(
rng, self.parameter_distribution, self.parameter_distribution_params,
batch_size)
return init_parameters
def _parameters_for_gen(
self,
rng: chex.PRNGKey,
batch_size: int,
seq_length: int,
) -> chex.Array:
"""Parameters used to generate trajectory.
Args:
rng: the random key to use in the random generation algorithm
batch_size: the number of sequences to return
seq_length: the length of the sequences to return
Returns:
parameters: the parameters used to generate a trajectory,
of shape (batch_size, seq_length, parameter_size)
"""
init_state = self._initial_state(rng, batch_size)
keys = jrandom.split(rng, seq_length)
def scan_update_params(state, t):
return self.update_params(keys[t], state, t)
_, params = jax.lax.scan(scan_update_params, init_state,
jnp.arange(seq_length))
return params.swapaxes(0, 1)
@abc.abstractmethod
def update_params(
self,
rng: chex.PRNGKey,
state_t: Any,
t: int,
) -> tuple[Any, chex.Array]:
"""Updates params at each timestep t.
Args:
rng: random key
state_t: the state of the generator at time t, used to produce the new
parameters
t: time
Returns:
new_state: the state at time t+1
new_params: a (batch_size, parameter_size) tensor, parameters for time t+1
"""
class RegularShiftTrajectoryGenerator(DynamicTrajectoryGenerator):
"""Dynamic TG, samples new parameters every shift_period timesteps."""
def __init__(
self,
shift_period: int,
gen_distribution: distributions.Distribution,
parameter_distribution: distributions.Distribution,
parameter_distribution_params: tuple[float, ...],
):
super().__init__(gen_distribution, parameter_distribution,
parameter_distribution_params)
self._shift_period = shift_period
def update_params(
self,
rng: chex.PRNGKey,
state_t: Any,
t: int,
) -> tuple[Any, chex.Array]:
"""Function passed to scan. State only contains last parameters here."""
params = state_t
batch_size = params.shape[0]
samples = sample_distribution_parameters(rng, self.parameter_distribution,
self.parameter_distribution_params,
batch_size)
new_params = jnp.where(t % self._shift_period == 0, samples, params)
return new_params, new_params
class RandomShiftNoMemoryTrajectoryGenerator(DynamicTrajectoryGenerator):
"""Dynamic TG, samples new parameters every shift_period timesteps.
Shift_period is now a random variable, which is sampled once at the beginning
of the trajectory, and then resampled when the last sampled shift_period time
is reached.
"""
def __init__(
self,
shift_distribution: distributions.Distribution,
shift_parameters: Any,
gen_distribution: distributions.Distribution,
parameter_distribution: distributions.Distribution,
parameter_distribution_params: tuple[float, ...],
):
super().__init__(gen_distribution, parameter_distribution,
parameter_distribution_params)
self._shift_distribution = shift_distribution
self._shift_parameters = shift_parameters
def _sample_delta_shift_time(self, rng: chex.PRNGKey,
batch_size: int) -> chex.Array:
delta_shift_time = sample_distribution_parameters(rng,
self._shift_distribution,
self._shift_parameters,
batch_size)
delta_shift_time = jnp.clip(delta_shift_time, a_min=0)
delta_shift_time = jnp.round(delta_shift_time)
return delta_shift_time
def _initial_state(self, rng: chex.PRNGKey, batch_size: int) -> chex.Array:
init_params = super()._initial_state(rng, batch_size)
initial_time = 0
first_delta = self._sample_delta_shift_time(rng, batch_size)
next_shift_time = initial_time + first_delta
return init_params, next_shift_time # pytype: disable=bad-return-type # numpy-scalars
def update_params(
self,
rng: chex.PRNGKey,
state_t: Any,
t: int,
) -> tuple[Any, chex.Array]:
params, next_shift_time = state_t
batch_size = params.shape[0]
rng, rng2 = jrandom.split(rng)
samples = sample_distribution_parameters(rng, self.parameter_distribution,
self.parameter_distribution_params,
batch_size)
new_params = jnp.where(t == next_shift_time, samples, params)
next_shift_time += jnp.where(
t == next_shift_time, self._sample_delta_shift_time(rng2, batch_size),
0)
return (new_params, next_shift_time), new_params
def sample_ptw_switch_points(min_value: int, max_value: int) -> list[int]:
"""Returns switch points sampled from the PTW prior."""
switch_points = list()
if max_value <= min_value:
return switch_points
mean_value = (max_value + min_value) // 2
if random.random() < 0.5:
switch_points += sample_ptw_switch_points(min_value, mean_value)
switch_points.append(mean_value)
switch_points += sample_ptw_switch_points(mean_value + 1, max_value)
return switch_points
def fixed_ptw_switch_points(seq_length: int) -> list[int]:
"""Returns switch points sampled from the PTW prior."""
next_power_2 = 2**(math.ceil(math.log2(seq_length)))
switch_points = sample_ptw_switch_points(1, next_power_2)
switch_points = filter(lambda x: x <= seq_length - 1, switch_points)
return list(sorted(switch_points))
class PTWRandomShiftTrajectoryGenerator(TrajectoryGenerator):
"""Dynamic TG using PTW prior."""
def __init__(
self,
gen_distribution: distributions.Distribution,
parameter_distribution: distributions.Distribution,
parameter_distribution_params: tuple[float, ...],
):
super().__init__(
gen_distribution,
parameter_distribution,
parameter_distribution_params,
)
self._sample_data = jax.jit(
self.gen_distribution.sample, static_argnums=(2,))
self._sample_parameters = jax.jit(
sample_distribution_parameters, static_argnums=(1, 2, 3))
def sample(
self,
rng: chex.PRNGKey,
batch_size: int,
seq_length: int,
) -> tuple[chex.Array, chex.Array]:
"""Returns trajectories with switch points following the PTW prior."""
switch_points = fixed_ptw_switch_points(seq_length)
switch_points.append(seq_length)
batch = []
all_parameters = []
rng_seq = hk.PRNGSequence(rng)
last_switch = 0
for switch_point in switch_points:
length = switch_point - last_switch
if length == 0:
continue
last_switch = switch_point
parameters = self._sample_parameters(
next(rng_seq), self.parameter_distribution,
self.parameter_distribution_params, batch_size)
all_parameters.append(jnp.stack([parameters] * length, axis=1))
batch.append(
self._sample_data(
rng=next(rng_seq),
shape=(batch_size, length, self.gen_distribution.feature_size),
parameters=parameters))
batch = jnp.concatenate(batch, axis=1)
return batch, jnp.concatenate(all_parameters, axis=1)
@contextlib.contextmanager
def local_seed(seed: int):
"""Context manager to set local seed."""
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
def iid_ptw_change_point(batch_size: int, seq_length: int) -> np.ndarray:
"""Generates IID switch points sampled as indicators from the PTW prior."""
next_power_2 = 2**(math.ceil(math.log2(seq_length)))
# Initialises an array to store the change points as boolean masks.
change_points = np.zeros((batch_size, next_power_2), dtype=bool)
def insert(idx: np.ndarray, left: np.ndarray, right: np.ndarray) -> None:
"""Splits the selected trajectories given their left and right ends."""
# Returns if there is no trajectories to split.
if idx.shape[0] == 0:
return
# For debugging:
# assert (left <= right).all() and (left>=0).all() and (right<=T).all()
assert (len(idx) == len(left) == len(right))
mid = (left + right) // 2
# Splits with prob. 0.5 if the segment length is nonzero.
split = np.random.random(*idx.shape) < 0.5
split = split & (left < right)
# Updates the indices and endpoints, keeping those that had a split.
idx = idx[split]
left = left[split]
right = right[split]
mid = mid[split]
# Sets a change point for trajectories that are split.
change_points[idx, mid] = True
# Recursively splits the left and right halves.
insert(idx, left, mid)
insert(idx, mid + 1, right)
# Sets base condition.
all_idx = np.arange(batch_size)
left = np.ones(batch_size, np.uint64)
right = np.ones(batch_size, np.uint64) * next_power_2
insert(all_idx, left, right)
change_points = change_points[:, :seq_length]
return change_points
class IIDPTWRandomShiftTrajectoryGenerator(TrajectoryGenerator):
"""Draws iid trajectories from PTW prior."""
def sample(
self,
rng: chex.PRNGKey,
batch_size: int,
seq_length: int,
) -> tuple[chex.Array, chex.Array]:
"""Returns iid trajectories with switch points following the PTW prior."""
rng_seq = hk.PRNGSequence(rng)
with local_seed(next(rng_seq)[0].item()):
change_points = iid_ptw_change_point(batch_size, seq_length)
num_switch = change_points.sum(-1)
max_num_switch = num_switch.max()
segments = change_points.cumsum(-1)
all_parameters = np.zeros(change_points.shape +
(self.parameter_distribution.feature_size,))
# Loops over all segments.
# As the number of segments over the entire batch decays exponentially,this
# loop is not likely to be large and grows logarithmically with the number
# of time steps.
for i in range(0, max_num_switch + 1):
# A mask for the i'th segment over all trajectories.
seg_idx = segments == i
# Draws one sample for this segment and all trajectories.
param_samples = sample_distribution_parameters(
next(rng_seq), self.parameter_distribution,
self.parameter_distribution_params, batch_size)
# Expand parameters to sequence length. This is wasteful but is fast.
param_samples = jnp.repeat(param_samples[:, None, :], seq_length, axis=1)
# Sets the parameters using the mask.
all_parameters[seg_idx] = param_samples[seg_idx]
all_parameters = all_parameters.reshape(-1, all_parameters.shape[-1])
batch = self.gen_distribution.sample(
rng=next(rng_seq),
shape=(batch_size * seq_length, self.gen_distribution.feature_size),
parameters=all_parameters)
batch = batch.reshape((batch_size, seq_length, -1))
all_parameters = all_parameters.reshape((batch_size, seq_length, -1))
return batch, all_parameters
@jax.jit
def _sample_categorical_batch(key: chex.PRNGKey, all_parameters: chex.Array):
"""Jittable function to sample one-hot categorical variables."""
num_outcome = all_parameters.shape[-1]
batch = jrandom.categorical(key, logits=jnp.log(all_parameters))
batch = jnp.eye(num_outcome)[batch]
return batch
class IIDPTWRandomShiftCategoricalTrajectoryGenerator(TrajectoryGenerator):
"""Draws iid categorical trajectories from PTW prior.
This is faster than the general implementation above.
"""
def sample(
self,
rng: chex.PRNGKey,
batch_size: int,
seq_length: int,
) -> tuple[chex.Array, chex.Array]:
"""Returns iid categorical trajectories with PTW switch points."""
rng_seq = hk.PRNGSequence(rng)
num_outcome = self.parameter_distribution.parameter_size
assert num_outcome == self.parameter_distribution.feature_size
with local_seed(next(rng_seq)[0].item()):
change_points = iid_ptw_change_point(batch_size, seq_length)
num_switch = change_points.sum(-1)
max_num_switch = num_switch.max()
segs = change_points.cumsum(-1)
all_parameters = np.zeros(change_points.shape + (num_outcome,))
for i in range(0, max_num_switch + 1):
seg_idx = segs == i
# Draws samples using numpy directly is fast.
param_samples = np.random.dirichlet(
np.ones(num_outcome) * 0.5, size=batch_size)
param_samples = np.repeat(param_samples[:, None, :], seq_length, axis=1)
all_parameters[seg_idx] = param_samples[seg_idx]
# This is faster than np.random.multinomial.
all_parameters = jnp.asarray(all_parameters)
batch = _sample_categorical_batch(next(rng_seq), all_parameters)
return batch, all_parameters
class LINTrajectoryGenerator(DynamicTrajectoryGenerator):
"""Draws IID trajectories from the linear model defined by Willems 1996."""
def _initial_state( # pytype: disable=signature-mismatch # numpy-scalars
self,
rng: chex.PRNGKey,
batch_size: int,
) -> tuple[int, chex.Array]:
init_parameters = super()._initial_state(rng, batch_size)
return (jnp.ones((batch_size,)), init_parameters)
def update_params(
self,
rng: chex.PRNGKey,
state_t: Any,
t: int,
) -> tuple[Any, chex.Array]:
# Time steps are 1-indexed, but `t` starts at 0.
t += 1
# The state consists of the indices of the current intervals, denoted as
# `t_c` in Willems 1996, and `params`, the parameters of the interval.
t_c, params = state_t
batch_size = params.shape[0]
samples = sample_distribution_parameters(
rng=rng,
parameter_distribution=self.parameter_distribution,
parameter_distribution_params=self.parameter_distribution_params,
batch_size=batch_size,
)
new_params = jnp.where(jnp.expand_dims(t == t_c, 1), samples, params)
# The probability of observing a switch point decays harmonically and
# depends on the time `t` and the beginning of the current interval `t_c`.
is_switch_point = jrandom.bernoulli(rng, 0.5 / (t - t_c + 1))
# If we observe a switch point, we set `t_c` to the next time step to
# indicate the start of a new interval.
t_c = jnp.where(is_switch_point, t + 1, t_c)
return (t_c, new_params), new_params
| nonstationary_mbml-main | experiments/trajectory_generators.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluation for the meta learning experiments."""
import copy
import math
from typing import Any, Optional
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
import optax
import tree
from nonstationary_mbml import base_constants
from nonstationary_mbml import predictors
from nonstationary_mbml.experiments import config as config_lib
from nonstationary_mbml.experiments import constants as meta_learning_constants
from nonstationary_mbml.experiments import trajectory_generators as tg
@jax.jit
def _compute_true_cross_entropy(logits: chex.Array,
distribution_params: chex.Array) -> chex.Array:
logits = logits[:, :-1]
distribution_params = distribution_params[:, 1:]
return optax.softmax_cross_entropy(logits, distribution_params)
class MetaLearningEvaluator(base_constants.Evaluator):
"""Evaluator for meta learning."""
def __init__(
self,
predictor: predictors.Predictor,
data_generator: tg.TrajectoryGenerator,
batch_size: int,
seq_length: int,
optimal_predictors: Optional[dict[str, predictors.Predictor]] = None,
chunk_length: Optional[int] = None,
) -> None:
self._predictor = predictor
self._optimal_predictors = optimal_predictors
self._data_generator = data_generator
self._batch_size = batch_size
self._seq_length = seq_length
self._chunk_length = chunk_length
@jax.jit
def _dist_entropy(distribution_params: chex.Array) -> chex.Array:
dist_entropy = hk.BatchApply(data_generator.gen_distribution.entropy)(
distribution_params)
return jnp.squeeze(dist_entropy, axis=-1)
self._dist_entropy = _dist_entropy
if optimal_predictors is not None:
self._optimal_predictors_init_state = dict()
for predictor_name, optimal_predictor in self._optimal_predictors.items():
self._optimal_predictors_init_state[predictor_name] = (
optimal_predictor.initial_state(None, None, batch_size=batch_size)
)
def step(
self, predictor_params: Any, predictor_state: Any, rng: chex.PRNGKey
) -> dict[str, Any]:
"""Evaluates the predictor and returns a log dict."""
rngs = hk.PRNGSequence(rng)
data_batch, distribution_params = self._data_generator.sample(
rng, self._batch_size, self._seq_length)
if self._chunk_length is None:
logits, _ = self._predictor.unroll(
predictor_params, next(rngs), data_batch, predictor_state
)
else:
final_logits = []
predictor_state = copy.deepcopy(predictor_state)
for i in range(math.ceil(self._seq_length / self._chunk_length)):
data_chunk = data_batch[:, i * self._chunk_length:(i + 1) *
self._chunk_length]
logits, states = self._predictor.unroll(
predictor_params, next(rngs), data_chunk, predictor_state
)
if states is not None:
predictor_state = tree.map_structure(lambda x: x[:, -1], states)
else:
predictor_state = None
final_logits.append(logits)
logits = np.concatenate(final_logits, axis=1)
true_entropy = self._dist_entropy(distribution_params[:, 1:])
instantaneous_regret = _compute_true_cross_entropy(
logits, distribution_params) - true_entropy
mean_regret = jnp.mean(instantaneous_regret)
cumulative_regret = jnp.mean(jnp.sum(instantaneous_regret, axis=1))
if self._optimal_predictors is not None:
optimal_logits = dict()
optimal_cumulative_regret = dict()
optimal_instantaneous_regret = dict()
for predictor_name, optimal_predictor in self._optimal_predictors.items():
init_state = copy.deepcopy(
self._optimal_predictors_init_state[predictor_name]
)
optimal_logits[predictor_name] = optimal_predictor.unroll(
params=None, rng=next(rngs), batch=data_batch, init_state=init_state
)
optimal_instantaneous_regret[predictor_name] = (
_compute_true_cross_entropy(
optimal_logits[predictor_name], distribution_params
)
- true_entropy
)
optimal_cumulative_regret[predictor_name] = jnp.mean(
jnp.sum(optimal_instantaneous_regret[predictor_name], axis=1)
)
log_dict = {}
log_dict['logits'] = logits
log_dict['mean_regret'] = mean_regret
log_dict['cumulative_regret'] = cumulative_regret
if self._optimal_predictors is not None:
for predictor_name, optimal_predictor in self._optimal_predictors.items():
log_dict[f'optimal_cumulative_regret/{predictor_name}'] = (
optimal_cumulative_regret[predictor_name]
)
log_dict[f'cumulative_regret_above_optimal/{predictor_name}'] = (
cumulative_regret - optimal_cumulative_regret[predictor_name]
)
return log_dict
# The following function follows the protocol base_constants.EvaluatorBuilder.
def build_evaluator(
predictor: predictors.Predictor, config: config_lib.EvalConfig
) -> MetaLearningEvaluator:
"""Returns an evaluator from a meta_learning eval config."""
if config.optimal_predictors is not None:
optimal_predictors = dict()
for optimal_predictor in config.optimal_predictors:
optimal_predictors[optimal_predictor] = (
meta_learning_constants.OPTIMAL_PREDICTORS[optimal_predictor](
**config.optimal_predictors_kwargs[optimal_predictor]
)
)
else:
optimal_predictors = None
data_generator = meta_learning_constants.build_data_generator(config.data)
return MetaLearningEvaluator(
predictor=predictor,
data_generator=data_generator,
batch_size=config.batch_size,
seq_length=config.seq_length,
optimal_predictors=optimal_predictors,
chunk_length=config.chunk_length,
)
| nonstationary_mbml-main | experiments/evaluator.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LiveAndDie predictor classes."""
import copy
from typing import Any
import chex
import haiku as hk
import numpy as np
from nonstationary_mbml import predictors
@chex.dataclass
class LADRecords:
"""The records of a linear LAD estimator state.
Attributes:
timestep: The records' timestep (shared across records).
reset_timestep: The records' reset timesteps, of shape `(batch_size, time)`.
counts: The records' counts, of shape `(batch_size, time, 2)`.
log_prob: The records' log probability, of shape `(batch_size, time)`.
"""
timestep: int
reset_timestep: chex.Array
counts: chex.Array
log_prob: chex.Array
@chex.dataclass
class LADState:
"""The state of the linear LAD estimator.
Attributes:
log_prob: The state's log probabilities, of shape `(batch_size)`.
records: The state's records.
timestep: The state's global timestep.
"""
log_prob: chex.Array
records: LADRecords
timestep: int
class LADPredictor(predictors.Predictor):
"""Linear Live and Die estimator predictor.
WARNING:
LAD outputs a prediction before seeing the first token, which is
inconsistent with our predictor interface. Thus, we omit the first
prediction and append a dummy output at the end.
"""
def initial_state(
self,
params: Any,
rng: chex.PRNGKey,
batch_size: int,
) -> Any:
"""Returns the initial LADState."""
del params, rng
return LADState(
log_prob=np.zeros((batch_size,), dtype=np.float32),
records=LADRecords(
timestep=0,
reset_timestep=np.zeros((batch_size, 0), dtype=np.uint8),
counts=np.zeros((batch_size, 0, 2), dtype=np.uint8),
log_prob=np.zeros((batch_size, 0), dtype=np.float32),
),
timestep=0,
)
def init_params(
self,
rng: chex.PRNGKey,
batch_init: chex.Array,
state_init: chex.Array,
) -> hk.Params:
return dict()
def unroll(
self,
params: Any,
rng: chex.PRNGKey,
batch: chex.Array,
init_state: Any,
) -> chex.Array:
del params, rng
def scan_update_output(
state: LADState,
x: chex.Array,
) -> tuple[chex.Array, chex.Array]:
pred = self.output_from_state(state)
new_state = self.update(state, x.argmax(-1))
return new_state, pred # pytype: disable=bad-return-type # numpy-scalars
# Change to time-major layout since we unroll over the leading dimension.
batch = batch.swapaxes(0, 1)
state = copy.deepcopy(init_state)
predictions = list()
for x in batch:
state, pred = scan_update_output(state, x)
predictions.append(pred)
predictions = np.stack(predictions, axis=0)
# LAD outputs a prediction before seeing the first token, which is
# inconsistent with our predictor interface. Thus, we omit the first
# prediction and append a dummy output at the end.
predictions = np.concatenate(
[predictions[1:],
np.full_like(predictions[:1], np.nan)], axis=0)
predictions = predictions.swapaxes(0, 1)
return predictions
def output_from_state(self, state: LADState) -> chex.Array:
"""Returns the log probability of the next symbol being 0 or 1."""
log_prob = self.log_prob(state, np.ones_like(state.log_prob, dtype=int))
return np.stack([np.log(1.0 - np.exp(log_prob)), log_prob], axis=-1)
def compute_log_marginal(self, state: LADState) -> LADState:
"""Returns the state updated with new log marginal probability."""
chex.assert_axis_dimension_gt(
state.records.counts, 1, 0, exception_type=ValueError
)
state.log_prob = np.logaddexp.reduce(state.records.log_prob, axis=1)
return state
def transition_probability(
self,
new_timestep: int,
new_reset_timestep: chex.Array,
old_timestep: int,
old_reset_timestep: chex.Array,
state_timestep: int,
) -> chex.Array:
"""Returns the transition probability, shape `(batch_size, time_steps)`."""
n = 0.5
if np.any(new_timestep == new_reset_timestep):
assert new_timestep == state_timestep
else:
n += old_timestep - old_reset_timestep
d = old_timestep - old_reset_timestep + 1.0
return np.log(n / d)
def update(self, state: LADState, symbol: chex.Array) -> LADState:
"""Returns the updated state after processing `symbol`.
Args:
state: The current state of the linear LAD estimator.
symbol: The next symbol in the sequence, of shape `(batch_size,)`
"""
state.timestep += 1
state = self.compute_state_probs_linear(state, symbol)
state = self.compute_log_marginal(state)
return state
def log_prob(self, state: LADState, symbol: chex.Array) -> chex.Array:
"""Returns the log probability of `symbol` being the next symbol.
Args:
state: The current state of the linear LAD estimator.
symbol: The next symbol in the sequence, of shape `(batch_size,)`.
"""
new_state = copy.deepcopy(state)
self.update(new_state, symbol)
log_prob = new_state.log_prob - state.log_prob
assert np.all(log_prob <= 0)
return log_prob
def compute_state_probs_linear(
self,
state: LADState,
symbol: chex.Array,
) -> LADState:
"""Returns the updated state by after computing the coding probabilities.
Args:
state: The current state of the linear LAD estimator.
symbol: The next symbol in the sequence, of shape `(batch_size,)`.
"""
batch_size = state.log_prob.shape[0]
new_records = LADRecords(
timestep=state.timestep,
reset_timestep=np.tile(
np.arange(1, state.timestep + 1), (batch_size, 1)
),
counts=np.zeros((batch_size, state.timestep, 2), dtype=np.uint8),
log_prob=np.zeros((batch_size, state.timestep), dtype=np.float32),
)
time_steps = state.records.log_prob.shape[1]
time_range = range(time_steps)
batch_range = range(batch_size)
if 0 < time_steps:
idx = state.records.reset_timestep - 1
indices = np.stack((np.tile(batch_range, (time_steps, 1)).T, idx), axis=1)
chex.assert_trees_all_equal(
new_records.reset_timestep[indices[:, 0], indices[:, 1]],
state.records.reset_timestep,
exception_type=ValueError,
)
n_grid = np.dstack(np.meshgrid(batch_range, time_range))
symbol_indices = np.tile(symbol, (1, time_steps, 1))
n_indices = np.concatenate((n_grid, symbol_indices.transpose(1, 2, 0)),
axis=-1).transpose(1, 2, 0)
n = (
state.records.counts[
n_indices[:, 0], n_indices[:, 1], n_indices[:, 2]
]
+ 0.5
)
d = (
new_records.timestep
- new_records.reset_timestep[indices[:, 0], indices[:, 1]]
+ 1.0
)
r = np.log(n / d)
rec_indices = np.concatenate((indices, symbol_indices.transpose(2, 0, 1)),
axis=1)
trans_prob = self.transition_probability(
new_timestep=new_records.timestep,
new_reset_timestep=new_records.reset_timestep[
indices[:, 0], indices[:, 1]
],
old_timestep=state.records.timestep,
old_reset_timestep=state.records.reset_timestep,
state_timestep=state.timestep,
)
new_records.log_prob[indices[:, 0], indices[:, 1]] = (
state.records.log_prob + trans_prob + r
)
new_records.counts[indices[:, 0], indices[:, 1]] = state.records.counts
new_records.counts[
rec_indices[:, 0], rec_indices[:, 1], rec_indices[:, 2]
] += 1
# Now handle the (x, x) state.
idx = state.timestep - 1
new_records.counts[batch_range, idx, symbol] += 1
if time_steps == 0:
new_records.log_prob[:, idx] = np.log(0.5)
else:
new_records.log_prob[:, idx] = np.logaddexp.reduce(
state.records.log_prob
+ self.transition_probability(
new_timestep=new_records.timestep,
new_reset_timestep=new_records.reset_timestep[:, idx],
old_timestep=state.records.timestep,
old_reset_timestep=state.records.reset_timestep,
state_timestep=state.timestep,
)
+ np.log(0.5),
axis=1,
)
state.records = new_records
return state
| nonstationary_mbml-main | experiments/live_and_die_predictors.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stack RNN core.
Following the paper from Joulin et al (2015):
https://arxiv.org/abs/1503.01007
The idea is to add a stack extension to a recurrent neural network to be able to
simulate a machine accepting context-free languages.
The stack is completely differentiable. The actions taken are probabilities
only and therefore no RL is required. The stack and state update are just linear
combinations of the last states and these probabilities.
"""
from typing import Any, Mapping, Optional
import chex
from einshape import jax_einshape as einshape
import haiku as hk
import jax
import jax.nn as jnn
import jax.numpy as jnp
# First element is the stacks, second is the hidden internal state.
_StackRnnState = tuple[chex.Array, chex.Array]
# Number of actions the stack-RNN can take, namely POP, PUSH and NO_OP.
_NUM_ACTIONS = 3
def _update_stack(stack: chex.Array, actions: chex.Array,
push_value: chex.Array) -> chex.Array:
"""Updates the stack values.
We update the stack in two steps.
In the first step, we update the top of the stack, and essentially do:
stack[0] = push_action * push_value
+ pop_action * stack[1]
+ noop_action * stack[0]
Then, in the second step, we update the rest of the stack and we move the
elements up and down, depending on the action executed:
* If push_action were 1, then we'd be purely pushing a new element
to the top of the stack, so we'd move all elements down by one.
* Likewise, if pop_action were 1, we'd be purely taking an element
off the top of the stack, so we'd move all elements up by one.
* Finally, if noop_action were 1, we'd leave elements where they were.
The update is therefore essentially:
stack[i] = push_action * stack[i-1]
+ pop_action * stack[i+1]
+ noop_action * stack[i]
Args:
stack: The current stack, shape (batch_size, stack_size, stack_cell_size).
actions: The array of probabilities of the actions, shape (batch_size, 3).
push_value: The vector to push on the stack, if the push action probability
is positive, shape (batch_size, stack_cell_size).
Returns:
The new stack, same shape as the input stack.
"""
batch_size, stack_size, stack_cell_size = stack.shape
# Tiling the actions to match the top of the stack.
# Shape (batch_size, stack_cell_size, _NUM_ACTIONS)
cell_tiled_stack_actions = einshape('ba->bsa', actions, s=stack_cell_size)
push_action = cell_tiled_stack_actions[..., 0]
pop_action = cell_tiled_stack_actions[..., 1]
pop_value = stack[..., 1, :]
no_op_action = cell_tiled_stack_actions[..., 2]
no_op_value = stack[..., 0, :]
# Shape (batch_size, 1, stack_cell_size)
chex.assert_equal_shape([
push_value, pop_value, no_op_value, push_action, pop_action, no_op_action
])
top_new_stack = (
push_action * push_value + pop_action * pop_value +
no_op_action * no_op_value)
top_new_stack = jnp.expand_dims(top_new_stack, axis=1)
# Tiling the actions to match all of the stack except the top.
# Shape (batch_size, stack_size, stack_cell_size, _NUM_ACTIONS)
stack_tiled_stack_actions = einshape(
'ba->bcsa', actions, s=stack_cell_size, c=stack_size - 1)
push_action = stack_tiled_stack_actions[..., 0]
push_value = stack[..., :-1, :]
pop_action = stack_tiled_stack_actions[..., 1]
pop_extra_zeros = jnp.zeros((batch_size, 1, stack_cell_size))
pop_value = jnp.concatenate([stack[..., 2:, :], pop_extra_zeros], axis=1)
no_op_action = stack_tiled_stack_actions[..., 2]
no_op_value = stack[..., 1:, :]
# Shape (batch_size, stack_size-1, stack_cell_size)
chex.assert_equal_shape([
push_value, pop_value, no_op_value, push_action, pop_action, no_op_action
])
rest_new_stack = (
push_action * push_value + pop_action * pop_value +
no_op_action * no_op_value)
# Finally concatenate the new top with the new rest of the stack.
return jnp.concatenate([top_new_stack, rest_new_stack], axis=1)
class StackRNNCore(hk.RNNCore):
"""Core for the stack RNN."""
def __init__(
self,
stack_cell_size: int,
stack_size: int = 30,
n_stacks: int = 1,
inner_core: type[hk.RNNCore] = hk.VanillaRNN,
name: Optional[str] = None,
**inner_core_kwargs: Mapping[str, Any]
):
"""Initializes.
Args:
stack_cell_size: The dimension of the vectors we put in the stack.
stack_size: The total number of vectors we can stack.
n_stacks: Number of stacks to use in the network.
inner_core: The inner RNN core builder.
name: See base class.
**inner_core_kwargs: The arguments to be passed to the inner RNN core
builder.
"""
super().__init__(name=name)
self._rnn_core = inner_core(**inner_core_kwargs)
self._stack_cell_size = stack_cell_size
self._stack_size = stack_size
self._n_stacks = n_stacks
def __call__(
self, inputs: chex.Array, prev_state: _StackRnnState
) -> tuple[chex.Array, _StackRnnState]:
"""Steps the stack RNN core.
See base class docstring.
Args:
inputs: An input array of shape (batch_size, input_size). The time
dimension is not included since it is an RNNCore, which is unrolled over
the time dimension.
prev_state: A _StackRnnState tuple, consisting of the previous stacks and
the previous state of the inner core. Each stack has shape (batch_size,
stack_size, stack_cell_size), such that `stack[n][0]` represents the top
of the stack for the nth batch item, and `stack[n][-1]` the bottom of
the stack. The stacks are just the concatenation of all these tensors.
Returns:
- output: An output array of shape (batch_size, output_size).
- next_state: Same format as prev_state.
"""
stacks, old_core_state = prev_state
# The network can always read the top of the stack.
batch_size = stacks.shape[0]
top_stacks = stacks[:, :, 0, :]
top_stacks = jnp.reshape(
top_stacks, (batch_size, self._n_stacks * self._stack_cell_size))
inputs = jnp.concatenate([inputs, top_stacks], axis=-1)
new_core_output, new_core_state = self._rnn_core(inputs, old_core_state)
push_values = hk.Linear(self._n_stacks * self._stack_cell_size)(
new_core_output)
push_values = jnp.reshape(
push_values, (batch_size, self._n_stacks, self._stack_cell_size))
# Shape (batch_size, _NUM_ACTIONS)
stack_actions = jnn.softmax(
hk.Linear(self._n_stacks * _NUM_ACTIONS)(new_core_output), axis=-1)
stack_actions = jnp.reshape(stack_actions,
(batch_size, self._n_stacks, _NUM_ACTIONS))
new_stacks = jax.vmap(
_update_stack, in_axes=1, out_axes=1)(stacks, stack_actions,
push_values)
return new_core_output, (new_stacks, new_core_state)
def initial_state(self, batch_size: Optional[int]) -> _StackRnnState:
"""Returns the initial state of the core, a hidden state and an empty stack."""
core_state = self._rnn_core.initial_state(batch_size)
stacks = jnp.zeros(
(batch_size, self._n_stacks, self._stack_size, self._stack_cell_size))
return stacks, core_state
| nonstationary_mbml-main | models/stack_rnn.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic neural network models.
This file contains builders for different simple models:
- MLP
- CNN
- RNN/LSTM
"""
from typing import Any, Callable, Mapping, Optional, Sequence
import chex
import haiku as hk
import jax.nn as jnn
import jax.numpy as jnp
def make_cnn(
channel_widths: Sequence[int],
kernel_shape: tuple[int, int],
output_size: int,
mlp_widths: Optional[Sequence[int]] = None,
) -> Callable[[chex.Array], chex.Array]:
"""Returns a CNN model with extra MLP layers."""
def model(x: chex.Array) -> chex.Array:
for channels in channel_widths:
x = hk.Conv2D(channels, kernel_shape=kernel_shape)(x)
x = jnn.relu(x)
x = hk.Flatten()(x)
if mlp_widths is not None:
for width in mlp_widths:
x = hk.Linear(width)(x)
x = jnn.relu(x)
return hk.Linear(output_size)(x)
return model
def make_mlp(hidden_layers_sizes: Sequence[int],
output_size: int) -> Callable[[jnp.ndarray], jnp.ndarray]:
"""Returns an MLP model."""
def mlp(x):
flattened_in = hk.Flatten()(x)
layer_sizes = tuple(hidden_layers_sizes) + (output_size,)
return hk.nets.MLP(layer_sizes)(flattened_in)
return mlp
def make_rnn(
output_size: int,
rnn_core: type[hk.RNNCore],
return_all_outputs: bool = False,
return_all_states: bool = False,
input_window: int = 1,
**rnn_kwargs: Mapping[str, Any],
) -> Callable[[jnp.ndarray], Any]:
"""Returns an RNN model, not haiku transformed.
Only the last output in the sequence is returned. A linear layer is added to
match the required output_size.
Args:
output_size: The output size of the model.
rnn_core: The haiku RNN core to use. LSTM by default.
return_all_outputs: Whether to return the whole sequence of outputs of the
RNN, or just the last one.
return_all_states: Whether to return all the intermediary RNN states.
input_window: The number of tokens that are fed at once to the RNN.
**rnn_kwargs: Kwargs to be passed to the RNN core.
"""
def rnn_model(x: jnp.array,
initial_state: Optional[Any] = None) -> jnp.ndarray:
core = rnn_core(**rnn_kwargs)
if initial_state is None:
initial_state = core.initial_state(x.shape[0])
batch_size, seq_length, embed_size = x.shape
if seq_length % input_window != 0:
x = jnp.pad(x, ((0, 0), (0, input_window - seq_length % input_window),
(0, 0)))
new_seq_length = x.shape[1]
x = jnp.reshape(
x,
(batch_size, new_seq_length // input_window, input_window, embed_size))
x = hk.Flatten(preserve_dims=2)(x)
output, all_states = hk.dynamic_unroll(
core, x, initial_state, time_major=False, return_all_states=True)
output = jnp.reshape(output, (batch_size, new_seq_length, output.shape[-1]))
if not return_all_outputs:
output = output[:, -1, :] # (batch, time, alphabet_dim)
output = jnn.relu(output)
output = hk.Linear(output_size)(output)
if not return_all_states:
return output
else:
return output, all_states # pytype: disable=bad-return-type # jax-ndarray
return rnn_model
| nonstationary_mbml-main | models/basic.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Transformer model."""
import dataclasses
from typing import Callable, Optional
import chex
import haiku as hk
import jax.nn as jnn
import jax.numpy as jnp
from nonstationary_mbml.models import positional_encodings as pos_encs_lib
@chex.dataclass
class TransformerConfig:
"""Hyperparameters used in the Transformer architectures."""
# The size of the model output (i.e., the output vocabulary size).
output_size: int
# The dimension of the first embedding.
embedding_dim: int = 64
# The number of multi-head attention layers.
num_layers: int = 5
# The number of heads per layer.
num_heads: int = 8
# The number of hidden neurons per head. If None, it is set to be equal to
# `embedding_dim // num_heads`.
num_hiddens_per_head: Optional[int] = None
# The probability that each element is discarded by the dropout modules.
dropout_prob: float = 0.1
# The parameter initialization scale for the embeddings.
emb_init_scale: float = 0.02
# Whether to use the embeddings rather than raw inputs.
use_embeddings: bool = True
# The size of the sliding attention window. See MultiHeadDotProductAttention.
attention_window: Optional[int] = None
# The positional encoding used with default sin/cos (Vaswani et al., 2017).
positional_encodings: pos_encs_lib.PositionalEncodings = dataclasses.field(
default_factory=lambda: pos_encs_lib.PositionalEncodings.SIN_COS
)
# The maximum size of the context (used by the posiitonal encodings).
max_time: int = 10_000
# The parameters for the positional encodings, default sin/cos.
positional_encodings_params: pos_encs_lib.PositionalEncodingsParams = (
dataclasses.field(default_factory=pos_encs_lib.SinCosParams)
)
# How much larger the hidden layer of the feedforward network should be
# compared to the `embedding_dim`.
widening_factor: int = 4
# Add mask to make causal predictions.
causal_masking: bool = False
def __post_init__(self) -> None:
"""Sets `num_hiddens_per_head` if it is `None`."""
if self.num_hiddens_per_head is None:
self.num_hiddens_per_head = self.embedding_dim // self.num_heads
def layer_norm(x: chex.Array) -> chex.Array:
return hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(x)
def compute_sliding_window_mask(sequence_length: int,
attention_window: int) -> chex.Array:
"""Returns a k-diagonal mask for a sliding window.
Args:
sequence_length: The length of the sequence, which will determine the shape
of the output.
attention_window: The size of the sliding window.
Returns:
A symmetric matrix of shape (sequence_length, sequence_length),
attention_window-diagonal, with ones on the diagonal and on all the
upper/lower diagonals up to attention_window // 2.
Raises:
ValueError if attention_window is <= 0.
"""
if attention_window <= 0:
raise ValueError(
f'The attention window should be > 0. Got {attention_window}.')
if attention_window == 1:
return jnp.eye(sequence_length, sequence_length)
attention_mask = jnp.sum(
jnp.stack([
jnp.eye(sequence_length, sequence_length, k=k, dtype=jnp.int32)
for k in range(1, attention_window // 2 + 1)
]),
axis=0)
attention_mask = attention_mask + jnp.transpose(attention_mask)
attention_mask += jnp.eye(sequence_length, sequence_length)
return attention_mask
class MultiHeadDotProductAttention(hk.Module):
"""Multi-head dot-product attention (Vaswani et al., 2017)."""
def __init__(
self,
num_heads: int,
num_hiddens_per_head: int,
positional_encodings: pos_encs_lib.PositionalEncodings,
positional_encodings_params: pos_encs_lib.PositionalEncodingsParams,
attention_window: Optional[int] = None,
name: Optional[str] = None,
) -> None:
"""Initializes the attention module.
Args:
num_heads: Number of heads to use.
num_hiddens_per_head: Number of hidden neurons per head.
positional_encodings: Which positional encodings to use in the attention.
positional_encodings_params: Parameters for the positional encodings.
attention_window: Size of the attention sliding window. None means no
sliding window is used (or equivalently, window=full_attention_length).
We attend only on attention_window tokens around a given query token. We
attend to tokens before AND after the query token. If attention_window
is even, we use the value +1.
name: Name of the module.
"""
super().__init__(name=name)
self._num_heads = num_heads
self._num_hiddens_per_head = num_hiddens_per_head
self._positional_encodings = positional_encodings
self._attention_window = attention_window
self._positional_encodings_params = positional_encodings_params
def __call__(
self,
inputs_q: chex.Array,
inputs_kv: chex.Array,
mask: Optional[chex.Array] = None,
causal: bool = False,
) -> chex.Array:
"""Returns the output of the multi-head attention."""
batch_size, sequence_length, embedding_size = inputs_q.shape
num_hiddens = self._num_hiddens_per_head * self._num_heads
q = hk.Linear(num_hiddens, with_bias=False)(inputs_q)
k = hk.Linear(num_hiddens, with_bias=False)(inputs_kv)
v = hk.Linear(num_hiddens, with_bias=False)(inputs_kv)
# The second (sequence) dimension is undefined since it can differ between
# queries and keys/values when decoding.
new_shape = (batch_size, -1, self._num_heads, self._num_hiddens_per_head)
q = jnp.reshape(q, new_shape)
k = jnp.reshape(k, new_shape)
v = jnp.reshape(v, new_shape)
# Let b=batch_size, t=seq_len, h=num_heads, and d=num_hiddens_per_head.
if self._positional_encodings == pos_encs_lib.PositionalEncodings.RELATIVE:
attention = pos_encs_lib.compute_attention_with_relative_encodings(
q, k, self._positional_encodings_params.max_time, causal=causal)
else:
attention = jnp.einsum('bthd,bThd->bhtT', q, k)
attention *= 1. / jnp.sqrt(self._num_hiddens_per_head)
# ALiBi encodings are not scaled with the 1 / sqrt(d_k) factor.
if self._positional_encodings == pos_encs_lib.PositionalEncodings.ALIBI:
attention += pos_encs_lib.compute_alibi_encodings_biases(
attention.shape[1:])
if self._attention_window is not None:
# We compute the sliding attention by just applying a mask on the values
# that are outside our window.
attention_mask = compute_sliding_window_mask(sequence_length,
self._attention_window)
attention = jnp.where(attention_mask, attention,
jnp.finfo(jnp.float32).min)
if mask is not None:
attention = jnp.where(mask, attention, jnp.finfo(jnp.float32).min)
normalized_attention = jnn.softmax(attention)
output = jnp.einsum('bhtT,bThd->bthd', normalized_attention, v)
output = jnp.reshape(output, (batch_size, sequence_length, num_hiddens))
return hk.Linear(embedding_size, with_bias=False)(output)
class TransformerEncoder(hk.Module):
"""Transformer Encoder (Vaswani et al., 2017)."""
def __init__(self,
config: TransformerConfig,
name: Optional[str] = None) -> None:
"""Initializes the transformer encoder.
Args:
config: The hyperparameters used in Transformer architectures.
name: The name of the module.
"""
super().__init__(name=name)
self._config = config
def __call__(self, x: jnp.ndarray) -> chex.Array:
"""Returns the transformer encoder output, shape [B, T, E]."""
if self._config.use_embeddings:
# Since `x` is one-hot encoded, using hk.Linear is equivalent to
# hk.Embed with hk.EmbedLookupStyle.ONE_HOT.
embs_init = hk.initializers.TruncatedNormal(
stddev=self._config.emb_init_scale
)
embeddings = hk.Linear(
self._config.embedding_dim, with_bias=False, w_init=embs_init
)(x)
embeddings *= jnp.sqrt(self._config.embedding_dim)
else:
embeddings = x
batch_size, sequence_length, embedding_size = embeddings.shape
pos_enc_params = self._config.positional_encodings_params
if (
self._config.positional_encodings
== pos_encs_lib.PositionalEncodings.SIN_COS
):
pos_encodings = pos_encs_lib.sinusoid_position_encoding(
sequence_length=sequence_length,
hidden_size=embedding_size,
memory_length=0,
max_timescale=pos_enc_params.max_time,
min_timescale=2,
clamp_length=0,
causal=True,
)
h = embeddings + pos_encodings
h = hk.dropout(hk.next_rng_key(), self._config.dropout_prob, h)
else:
h = embeddings
# The causal mask is shared across heads.
if self._config.causal_masking:
causal_mask = jnp.tril(
jnp.ones((batch_size, 1, sequence_length, sequence_length)))
else:
causal_mask = None
for _ in range(self._config.num_layers):
attention = MultiHeadDotProductAttention(
num_heads=self._config.num_heads,
num_hiddens_per_head=self._config.num_hiddens_per_head,
positional_encodings=self._config.positional_encodings,
positional_encodings_params=pos_enc_params,
attention_window=self._config.attention_window)(
inputs_q=h,
inputs_kv=h,
mask=causal_mask,
causal=self._config.causal_masking)
attention = hk.dropout(hk.next_rng_key(), self._config.dropout_prob,
attention)
attention = layer_norm(h + attention)
# Position-wise feedforward network.
h = hk.Linear(self._config.embedding_dim * self._config.widening_factor)(
attention)
h = jnn.relu(h)
h = hk.Linear(self._config.embedding_dim)(h)
h = hk.dropout(hk.next_rng_key(), self._config.dropout_prob, h)
h = layer_norm(h + attention)
return h
def make_transformer_encoder(
output_size: int,
embedding_dim: int = 64,
num_layers: int = 5,
num_heads: int = 8,
num_hiddens_per_head: Optional[int] = None,
dropout_prob: float = 0.1,
emb_init_scale: float = 0.02,
use_embeddings: bool = True,
attention_window: Optional[int] = None,
positional_encodings: Optional[pos_encs_lib.PositionalEncodings] = None,
positional_encodings_params: Optional[
pos_encs_lib.PositionalEncodingsParams] = None,
widening_factor: int = 4,
return_all_outputs: bool = False,
causal_masking: bool = False,
) -> Callable[[chex.Array], chex.Array]:
"""Returns a transformer encoder model."""
if positional_encodings is None:
positional_encodings = pos_encs_lib.PositionalEncodings.SIN_COS
positional_encodings_params = pos_encs_lib.SinCosParams()
elif positional_encodings_params is None:
raise ValueError('No parameters for positional encodings are passed.')
config = TransformerConfig(
output_size=output_size,
embedding_dim=embedding_dim,
num_layers=num_layers,
num_heads=num_heads,
num_hiddens_per_head=num_hiddens_per_head,
dropout_prob=dropout_prob,
emb_init_scale=emb_init_scale,
use_embeddings=use_embeddings,
attention_window=attention_window,
positional_encodings=positional_encodings,
positional_encodings_params=positional_encodings_params,
widening_factor=widening_factor,
causal_masking=causal_masking,
)
def transformer_encoder(inputs: chex.Array) -> chex.Array:
output = TransformerEncoder(config)(inputs)
if not return_all_outputs:
output = output[:, -1, :]
return hk.Linear(output_size)(output)
return transformer_encoder
| nonstationary_mbml-main | models/transformer.py |
# Copyright 2023 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Positional encodings, used in transformer.py."""
import enum
import math
from typing import Any
import chex
import haiku as hk
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
class PositionalEncodings(enum.Enum):
"""Enum for all the positional encodings implemented."""
NONE = 0
SIN_COS = 1
ALIBI = 2
RELATIVE = 3
# General type used throughout the class for pos enc parameters.
PositionalEncodingsParams = Any
@chex.dataclass
class SinCosParams:
"""Parameters for the classical sin/cos positional encoding."""
# The maximum wavelength used.
max_time: int = 10_000
RelativeParams = SinCosParams
POS_ENC_TABLE = {
'NONE': PositionalEncodings.NONE,
'SIN_COS': PositionalEncodings.SIN_COS,
'ALIBI': PositionalEncodings.ALIBI,
'RELATIVE': PositionalEncodings.RELATIVE,
}
POS_ENC_PARAMS_TABLE = {
'NONE': SinCosParams,
'SIN_COS': SinCosParams,
'ALIBI': SinCosParams,
'RELATIVE': RelativeParams,
}
def sinusoid_position_encoding(
sequence_length: int,
hidden_size: int,
memory_length: int = 0,
max_timescale: float = 1e4,
min_timescale: float = 2.,
clamp_length: int = 0,
causal: bool = False,
):
"""Creates sinusoidal encodings.
The time dimension is larger than sequence_length as we need to cover all
cases of looking in either the future or past.
Args:
sequence_length: `int` sequence length, L
hidden_size: `int` dimension of the positional encoding vectors, D
memory_length: `int` size of the memory, M
max_timescale: `int` maximum timescale for the frequency
min_timescale: `int` minimum timescale for the frequency
clamp_length: If greater than 0, any positions further apart than
`clamp_length` are clamped to this value
causal: If true then generates a smaller set (L vs 2 * L) of time-encodings
for the use-case of causal attention.
Returns:
An array of shape [L + M, D] for causal and [2 * L + M, D] otherwise.
"""
freqs = np.arange(0, hidden_size, min_timescale)
inv_freq = max_timescale**(-freqs / hidden_size)
# Since inputs can look into the past and into the future, depending on the
# permutation mask, we need to have relative encodings for both. The furthest
# back an input can see is the final token, up to sequence_length +
# memory_length - 1. The furthest ahead an input can see is for token 0 where
# it can see up to sequence_length - 1 future tokens.
if causal:
pos_seq = np.arange(sequence_length + memory_length, 0, -1.0)
else:
pos_seq = np.arange(sequence_length + memory_length, -sequence_length, -1.0)
if clamp_length:
pos_seq = np.clip(pos_seq, a_min=-clamp_length, a_max=clamp_length)
sinusoid_inp = np.einsum('i,j->ij', pos_seq, inv_freq)
pos_emb = np.concatenate(
[np.sin(sinusoid_inp), np.cos(sinusoid_inp)], axis=-1)
return pos_emb
def _rel_shift_inner(logits: jax.Array,
attention_length: int) -> jax.Array:
"""Shifts the relative logits.
This is a more general than the original Transformer-XL implementation as
inputs may also see the future. (The implementation does not rely on a
causal mask removing the upper-right triangle.)
Given attention length 3 and inputs:
[[-3, -2, -1, 0, 1, 2],
[-3, -2, -1, 0, 1, 2],
[-3, -2, -1, 0, 1, 2]]
The shifted output is:
[[0, 1, 2],
[-1, 0, 1],
[-2, -1, 0]]
Args:
logits: input tensor of shape [T_q, T_v + T_q]
attention_length: T_v `int` length of the attention, should be equal to
memory size + sequence length.
Returns:
A shifted version of the input of size [T_q, T_v]. In each row, a window of
size T_v elements is kept. The window starts at
the rightmost end, for the first row. It then shifts left by 1 for each
subsequent row.
"""
if logits.ndim != 2:
raise ValueError('`logits` needs to be an array of dimension 2.')
tq, total_len = logits.shape
assert total_len == tq + attention_length
logits = jnp.reshape(logits, [total_len, tq])
logits = lax.slice(logits, (1, 0), logits.shape) # logits[1:]
logits = jnp.reshape(logits, [tq, total_len - 1])
# Equiv to logits[:, :attention_length].
logits = lax.slice(logits, (0, 0), (tq, attention_length))
return logits
def _rel_shift_causal(logits: jax.Array) -> jax.Array:
"""Shifts the relative logits, assuming causal attention.
Given inputs:
[[-4, -3, -2, -1],
[-4, -3, -2, -1]]
The shifted (and, later, masked) output is:
[[-3, -2, -1, 0],
[-4, -3, -2, -1]]
Args:
logits: input tensor of shape [T_q, T_v]
Returns:
A shifted version of the input of size [T_q, T_v].
"""
t1, t2 = logits.shape
# We prepend zeros on the final timescale dimension.
to_pad = jnp.zeros_like(logits[..., :1])
x = jnp.concatenate((to_pad, logits), axis=-1)
# Reshape trick to shift input.
x = jnp.reshape(x, [t2 + 1, t1])
# Remove extra time dimension and re-shape.
x = jax.lax.slice(x, [1] + [0] * (x.ndim - 1), x.shape)
return jnp.reshape(x, [t1, t2])
def relative_shift(logits: jax.Array,
attention_length: int,
causal: bool = False) -> jax.Array:
if causal:
fn = _rel_shift_causal
else:
fn = lambda t: _rel_shift_inner(t, attention_length)
return jax.vmap(jax.vmap(fn))(logits)
def compute_attention_with_relative_encodings(
queries: chex.Array,
keys: chex.Array,
max_time: int = 10_000,
causal: bool = False) -> chex.Array:
"""Returns attention with relative positional encodings.
This code strictly follows what is described in the TransformerXL paper.
https://arxiv.org/pdf/1901.02860.pdf
Args:
queries: The queries used for attention. Shape (b, t, h, d).
keys: The keys used for attention. Shape (b, T, h, d).
max_time: Constant used to scale position by in the sin/cos encodings.
causal: Whether to use causal attention when shifting the relative logits.
Returns:
The attention logits. Shape (b, h, t, T).
"""
batch_size, k_seq_len, num_heads, num_hiddens = keys.shape
hiddens = num_hiddens * num_heads
# First compute the content logits.
content_bias = hk.get_parameter(
name='relpos_contentbias',
shape=[num_heads, num_hiddens],
init=hk.initializers.RandomNormal(stddev=0.02))
content_logits = jnp.einsum('bthd,bThd->bhtT', queries + content_bias, keys)
positional_encodings = sinusoid_position_encoding(
sequence_length=k_seq_len,
hidden_size=hiddens,
memory_length=0,
max_timescale=max_time,
min_timescale=2,
clamp_length=0,
causal=causal,
)
positional_encodings = jnp.broadcast_to(positional_encodings, (batch_size,) +
positional_encodings.shape)
relative_keys = hk.Linear(hiddens, with_bias=False)(positional_encodings)
relative_keys = jnp.reshape(
relative_keys, positional_encodings.shape[:-1] + (num_heads, num_hiddens))
# Then compute the relative part.
relative_bias = hk.get_parameter(
name='relpos_relativebias',
shape=[num_heads, num_hiddens],
init=hk.initializers.RandomNormal(stddev=0.02))
relative_logits = jnp.einsum('bthd,bThd->bhtT', queries + relative_bias,
relative_keys)
# We shift the relative logits instead of the positional encoding matrix as
# described in Appendix B of the paper (https://arxiv.org/pdf/1901.02860.pdf).
relative_logits = relative_shift(
relative_logits, attention_length=content_logits.shape[-1], causal=causal)
assert content_logits.shape == relative_logits.shape
return content_logits + relative_logits
def _get_alibi_slopes(num_heads: int) -> list[float]:
"""Returns the slopes for the different attention heads.
While this does not exactly match the description of the [ALiBi
paper](https://arxiv.org/pdf/2108.12409.pdf), it corresponds to the [official
implementation](https://github.com/ofirpress/attention_with_linear_biases/blob/a06526fbfe557f9148e414b8569dcb97c7b182ba/fairseq/models/transformer.py#L742).
Args:
num_heads: The number of attention heads to create slopes for.
"""
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n) - 3)))
ratio = start
return [start * ratio**i for i in range(n)]
if math.log2(num_heads).is_integer():
return get_slopes_power_of_2(num_heads)
else:
closest_power_of_2 = 2**math.floor(math.log2(num_heads))
return (get_slopes_power_of_2(closest_power_of_2) + _get_alibi_slopes(
2 * closest_power_of_2)[0::2][:num_heads - closest_power_of_2])
def compute_alibi_encodings_biases(
attention_shape: tuple[int, ...]
) -> chex.Array:
"""Returns the biases following the ALiBi method.
This code strictly follows what is described in the ALiBi paper.
https://arxiv.org/pdf/2108.12409.pdf
Args:
attention_shape: The attention logits shape, without batch size, (h, t, T).
Returns:
The alibi biases, same shape as the input logits shape.
"""
num_heads, q_seq_len, k_seq_len = attention_shape
# Since we do not use causal masking, the upper triangle of the matrix has to
# be nonzero. Therefore, we set it equal to the lower triangle, but we also
# add a constant factor of 0.5 to the lower triangle, to (arbitrarily) break
# the symmetry (otherwise, the model cannot distinguish left and right).
alibi = np.zeros((q_seq_len, k_seq_len))
alibi -= sum(np.tri(*alibi.shape, k=-i) for i in range(1, q_seq_len))
alibi -= sum(np.tri(*alibi.T.shape, k=-i).T for i in range(1, k_seq_len))
alibi += 0.5 * np.tri(*alibi.shape, k=-1)
return alibi * jnp.array(_get_alibi_slopes(num_heads))[:, None, None]
| nonstationary_mbml-main | models/positional_encodings.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
import os
from setuptools import find_namespace_packages
from setuptools import setup
_CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
def _get_version():
with open(os.path.join(_CURRENT_DIR, 'dks', '__init__.py')) as fp:
for line in fp:
if line.startswith('__version__') and '=' in line:
version = line[line.find('=') + 1:].strip(' \'"\n')
if version:
return version
raise ValueError('`__version__` not defined in `dks/__init__.py`')
def _parse_requirements(requirements_txt_path):
with open(requirements_txt_path) as f:
return [
line.rstrip()
for line in f
if not (line.isspace() or line.startswith('#'))
]
_VERSION = _get_version()
setup(
name='dks',
version=_VERSION,
url='https://github.com/deepmind/dks',
license='Apache 2.0',
author='DeepMind',
description=('A Python library implementing the DKS/TAT neural network '
'transformation method.'),
long_description=open(os.path.join(_CURRENT_DIR, 'README.md')).read(),
long_description_content_type='text/markdown',
author_email='[email protected]',
# Contained modules and scripts.
packages=find_namespace_packages(exclude=['tests']),
install_requires=_parse_requirements(
os.path.join(_CURRENT_DIR, 'requirements.txt')),
tests_require=_parse_requirements(
os.path.join(_CURRENT_DIR, 'requirements_tests.txt')),
extras_require={
key: _parse_requirements(
os.path.join(_CURRENT_DIR, f'requirements_{key}.txt'))
for key in {'jax', 'pytorch', 'tf', 'tests'}
},
requires_python='>=3.7',
include_package_data=True,
zip_safe=False,
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| dks-main | setup.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Root package. Do not use directly."""
# Do not directly import this package; it won't do anything. Instead, import one
# of the framework-specific subpackages.
__version__ = "0.1.2"
| dks-main | dks/__init__.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Subpackage for examples."""
from dks.examples import haiku
| dks-main | dks/examples/__init__.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Subpackage for Haiku examples."""
from dks.examples.haiku import modified_resnet
| dks-main | dks/examples/haiku/__init__.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A "modified ResNet model" in Haiku with support for both DKS and TAT."""
import math
from typing import Any, Callable, Mapping, Optional, Sequence, Union
from dks.jax import activation_transform
from dks.jax import haiku_initializers
import haiku as hk
import jax.numpy as jnp
FloatStrOrBool = Union[str, float, bool]
BN_CONFIG = {
"create_offset": True,
"create_scale": True,
"decay_rate": 0.999,
}
class BlockV1(hk.Module):
"""ResNet V1 block with optional bottleneck."""
def __init__(
self,
channels: int,
stride: Union[int, Sequence[int]],
use_projection: bool,
bottleneck: bool,
use_batch_norm: bool,
activation: Callable[[jnp.ndarray], jnp.ndarray],
shortcut_weight: Optional[float],
w_init: Optional[Any],
name: Optional[str] = None,
):
super().__init__(name=name)
self.use_projection = use_projection
self.use_batch_norm = use_batch_norm
self.shortcut_weight = shortcut_weight
if self.use_projection and self.shortcut_weight != 0.0:
self.proj_conv = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=stride,
w_init=w_init,
with_bias=not use_batch_norm,
padding="SAME",
name="shortcut_conv")
if use_batch_norm:
self.proj_batchnorm = hk.BatchNorm(
name="shortcut_batchnorm", **BN_CONFIG)
channel_div = 4 if bottleneck else 1
conv_0 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=1 if bottleneck else 3,
stride=1 if bottleneck else stride,
w_init=w_init,
with_bias=not use_batch_norm,
padding="SAME",
name="conv_0")
conv_1 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=3,
stride=stride if bottleneck else 1,
w_init=w_init,
with_bias=not use_batch_norm,
padding="SAME",
name="conv_1")
layers = (conv_0, conv_1)
if use_batch_norm:
bn_0 = hk.BatchNorm(name="batchnorm_0", **BN_CONFIG)
bn_1 = hk.BatchNorm(name="batchnorm_1", **BN_CONFIG)
bn_layers = (bn_0, bn_1)
if bottleneck:
conv_2 = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=1,
w_init=w_init,
with_bias=not use_batch_norm,
padding="SAME",
name="conv_2")
layers = layers + (conv_2,)
if use_batch_norm:
bn_2 = hk.BatchNorm(name="batchnorm_2", **BN_CONFIG)
bn_layers += (bn_2,)
self.bn_layers = bn_layers
self.layers = layers
self.activation = activation
def __call__(self, inputs, is_training, test_local_stats):
out = shortcut = inputs
if self.use_projection and self.shortcut_weight != 0.0:
shortcut = self.proj_conv(shortcut)
if self.use_batch_norm:
shortcut = self.proj_batchnorm(shortcut, is_training, test_local_stats)
for i, conv_i in enumerate(self.layers):
out = conv_i(out)
if self.use_batch_norm:
out = self.bn_layers[i](out, is_training, test_local_stats)
if i < len(self.layers) - 1: # Don't apply activation on last layer
out = self.activation(out)
if self.shortcut_weight is None:
return self.activation(out + shortcut)
elif self.shortcut_weight != 0.0:
return self.activation(
math.sqrt(1 - self.shortcut_weight**2) * out +
self.shortcut_weight * shortcut)
else:
return out
class BlockV2(hk.Module):
"""ResNet V2 block with optional bottleneck."""
def __init__(
self,
channels: int,
stride: Union[int, Sequence[int]],
use_projection: bool,
bottleneck: bool,
use_batch_norm: bool,
activation: Callable[[jnp.ndarray], jnp.ndarray],
shortcut_weight: Optional[float],
w_init: Optional[Any],
name: Optional[str] = None,
):
super().__init__(name=name)
self.use_projection = use_projection
self.use_batch_norm = use_batch_norm
self.shortcut_weight = shortcut_weight
if self.use_projection and self.shortcut_weight != 0.0:
self.proj_conv = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=stride,
w_init=w_init,
with_bias=not use_batch_norm,
padding="SAME",
name="shortcut_conv")
channel_div = 4 if bottleneck else 1
conv_0 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=1 if bottleneck else 3,
stride=1 if bottleneck else stride,
w_init=w_init,
with_bias=not use_batch_norm,
padding="SAME",
name="conv_0")
conv_1 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=3,
stride=stride if bottleneck else 1,
w_init=w_init,
with_bias=not use_batch_norm,
padding="SAME",
name="conv_1")
layers = (conv_0, conv_1)
if use_batch_norm:
bn_0 = hk.BatchNorm(name="batchnorm_0", **BN_CONFIG)
bn_1 = hk.BatchNorm(name="batchnorm_1", **BN_CONFIG)
bn_layers = (bn_0, bn_1)
if bottleneck:
conv_2 = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=1,
w_init=w_init,
with_bias=not use_batch_norm,
padding="SAME",
name="conv_2")
layers = layers + (conv_2,)
if use_batch_norm:
bn_2 = hk.BatchNorm(name="batchnorm_2", **BN_CONFIG)
bn_layers += (bn_2,)
self.bn_layers = bn_layers
self.layers = layers
self.activation = activation
def __call__(self, inputs, is_training, test_local_stats):
x = shortcut = inputs
for i, conv_i in enumerate(self.layers):
if self.use_batch_norm:
x = self.bn_layers[i](x, is_training, test_local_stats)
x = self.activation(x)
if i == 0 and self.use_projection and self.shortcut_weight != 0.0:
shortcut = self.proj_conv(x)
x = conv_i(x)
if self.shortcut_weight is None:
return x + shortcut
elif self.shortcut_weight != 0.0:
return math.sqrt(
1 - self.shortcut_weight**2) * x + self.shortcut_weight * shortcut
else:
return x
class BlockGroup(hk.Module):
"""Higher level block for ResNet implementation."""
def __init__(
self,
channels: int,
num_blocks: int,
stride: Union[int, Sequence[int]],
resnet_v2: bool,
bottleneck: bool,
use_projection: bool,
use_batch_norm: bool,
activation: Callable[[jnp.ndarray], jnp.ndarray],
shortcut_weight: Optional[float],
w_init: Optional[Any],
name: Optional[str] = None,
):
super().__init__(name=name)
block_cls = BlockV2 if resnet_v2 else BlockV1
self.blocks = []
for i in range(num_blocks):
self.blocks.append(
block_cls(
channels=channels,
stride=(1 if i else stride),
use_projection=(i == 0 and use_projection),
use_batch_norm=use_batch_norm,
bottleneck=bottleneck,
shortcut_weight=shortcut_weight,
activation=activation,
w_init=w_init,
name="block_%d" % (i)))
def __call__(self, inputs, is_training, test_local_stats):
out = inputs
for block in self.blocks:
out = block(out, is_training, test_local_stats)
return out
def check_length(length, value, name):
if len(value) != length:
raise ValueError(f"`{name}` must be of length 4 not {len(value)}")
class ModifiedResNet(hk.Module):
"""Modified version of an Imagenet ResNet model that supports DKS/TAT."""
CONFIGS = {
18: {
"blocks_per_group": (2, 2, 2, 2),
"bottleneck": False,
"channels_per_group": (64, 128, 256, 512),
"use_projection": (False, True, True, True),
},
34: {
"blocks_per_group": (3, 4, 6, 3),
"bottleneck": False,
"channels_per_group": (64, 128, 256, 512),
"use_projection": (False, True, True, True),
},
50: {
"blocks_per_group": (3, 4, 6, 3),
"bottleneck": True,
"channels_per_group": (256, 512, 1024, 2048),
"use_projection": (True, True, True, True),
},
101: {
"blocks_per_group": (3, 4, 23, 3),
"bottleneck": True,
"channels_per_group": (256, 512, 1024, 2048),
"use_projection": (True, True, True, True),
},
152: {
"blocks_per_group": (3, 8, 36, 3),
"bottleneck": True,
"channels_per_group": (256, 512, 1024, 2048),
"use_projection": (True, True, True, True),
},
200: {
"blocks_per_group": (3, 24, 36, 3),
"bottleneck": True,
"channels_per_group": (256, 512, 1024, 2048),
"use_projection": (True, True, True, True),
},
}
def __init__(
self,
num_classes: int,
depth: int,
resnet_v2: bool = True,
use_batch_norm: bool = False,
shortcut_weight: Optional[float] = 0.0,
activation_name: str = "softplus",
w_init: Optional[Any] = haiku_initializers.ScaledUniformOrthogonal(
delta=True),
logits_config: Optional[Mapping[str, Any]] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
dropout_rate: float = 0.0,
transformation_method: str = "DKS",
dks_params: Optional[Mapping[str, FloatStrOrBool]] = None,
tat_params: Optional[Mapping[str, FloatStrOrBool]] = None,
name: Optional[str] = None,
):
"""Constructs a "modified ResNet model" with support for both DKS and TAT.
By default, we construct the network *without* normalization layers or
skip connections (making it a "vanilla network"), initialize the weights
with the SUO distribution, and use DKS to transform the activation functions
(which are "softplus" by default). These behaviors, and the option to use
TAT, are controlled via the contructor arguments.
This file was adapted from the original Haiku ResNet implementation:
https://github.com/deepmind/dm-haiku/blob/main/haiku/_src/nets/resnet.py
It is the end result of applying the rules described in the section titled
"Summary of our method" in the DKS paper (https://arxiv.org/abs/2110.01765)
to what is essentially a standard ResNet. See the section titled
"Application to various modified ResNets" in the DKS paper for more details.
The only departure from this is that we construct the "maximal C map
function" instead of the "maximal slope function" (which can be computed
from the former), which enables support for TAT.
Args:
num_classes: The number of classes to classify the inputs into.
depth: The number of layers.
resnet_v2: Whether to use the v2 ResNet implementation instead of v1.
Defaults to ``True``.
use_batch_norm: Whether to use Batch Normalization (BN). Note that DKS/TAT
are not compatible with the use of BN. Defaults to ``False``.
shortcut_weight: The weighting factor of shortcut branch, which must be
a float between 0 and 1, or None. If not None, the shortcut branch is
multiplied by ``shortcut_weight``, and the residual branch is multiplied
by ``residual_weight``, where
``shortcut_weight**2 + residual_weight**2 == 1.0``.
If None, no multiplications are performed (which corresponds to a
standard ResNet), and compatibility with DKS/TAT is lost. Note that
setting ``shortcut_weight`` to 0.0 effectively removes the skip
connections from the network. Defaults to ``0.0``.
activation_name: String name for activation function. To get TReLU from
the TAT paper one should set this to ``leaky_relu``, and set
the ``transformation_method`` argument to ``TAT``. Defaults to
``softplus``.
w_init: Haiku initializer used to initialize the weights.
logits_config: A dictionary of keyword arguments for the logits layer.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
dropout_rate: A float giving the dropout rate for penultimate layer of the
network (i.e. right before the layer which produces the class logits).
(Default: 0.0)
transformation_method: A string representing the method used to transform
the activation function. Can be ``DKS``, ``TAT``, or ``untransformed``.
Defaults to ``DKS``.
dks_params: A dictionary containing the parameters to use for DKS. See
``dks.base.activation_transform.get_transformed_activations`` for more
details. Defaults to ``None``.
tat_params: A dictionary containing the parameters to use for TAT. See
``dks.base.activation_transform.get_transformed_activations`` for more
details. Defaults to ``None``.
name: Name of the Sonnet module.
"""
super().__init__(name=name)
if shortcut_weight is not None and (shortcut_weight > 1.0
or shortcut_weight < 0.0):
raise ValueError("Unsupported value for shortcut_weight.")
if (use_batch_norm and
(transformation_method == "DKS" or transformation_method == "TAT")):
raise ValueError("DKS and TAT are not compatible with the use of BN "
"layers.")
if (shortcut_weight is None and
(transformation_method == "DKS" or transformation_method == "TAT")):
raise ValueError("Must specify a value for shortcut_weight when using "
"DKS or TAT.")
self.depth = depth
self.resnet_v2 = resnet_v2
self.use_batch_norm = use_batch_norm
self.shortcut_weight = shortcut_weight
self.activation_name = activation_name
self.dropout_rate = dropout_rate
blocks_per_group = ModifiedResNet.CONFIGS[depth]["blocks_per_group"]
channels_per_group = ModifiedResNet.CONFIGS[depth]["channels_per_group"]
bottleneck = ModifiedResNet.CONFIGS[depth]["bottleneck"]
use_projection = ModifiedResNet.CONFIGS[depth]["use_projection"]
logits_config = dict(logits_config or {})
logits_config.setdefault("w_init", w_init)
logits_config.setdefault("name", "logits")
# Number of blocks in each group for ResNet.
check_length(4, blocks_per_group, "blocks_per_group")
check_length(4, channels_per_group, "channels_per_group")
initial_conv_config = dict(initial_conv_config or {})
initial_conv_config.setdefault("output_channels", 64)
initial_conv_config.setdefault("kernel_shape", 7)
initial_conv_config.setdefault("stride", 2)
initial_conv_config.setdefault("with_bias", not use_batch_norm)
initial_conv_config.setdefault("padding", "SAME")
initial_conv_config.setdefault("name", "initial_conv")
initial_conv_config.setdefault("w_init", w_init)
act_dict = activation_transform.get_transformed_activations(
[self.activation_name], method=transformation_method,
dks_params=dks_params, tat_params=tat_params,
subnet_max_func=self.subnet_max_func)
self.activation = act_dict[self.activation_name]
self.initial_conv = hk.Conv2D(**initial_conv_config)
if not self.resnet_v2 and use_batch_norm:
self.initial_batchnorm = hk.BatchNorm(
name="initial_batchnorm", **BN_CONFIG)
self.block_groups = []
strides = (1, 2, 2, 2)
for i in range(4):
self.block_groups.append(
BlockGroup(
channels=channels_per_group[i],
num_blocks=blocks_per_group[i],
stride=strides[i],
resnet_v2=resnet_v2,
bottleneck=bottleneck,
use_batch_norm=use_batch_norm,
use_projection=use_projection[i],
shortcut_weight=shortcut_weight,
activation=self.activation,
w_init=w_init,
name="block_group_%d" % (i)))
if self.resnet_v2 and use_batch_norm:
self.final_batchnorm = hk.BatchNorm(name="final_batchnorm", **BN_CONFIG)
self.logits = hk.Linear(num_classes, **logits_config)
def __call__(self, inputs, is_training, test_local_stats=False):
out = inputs
out = self.initial_conv(out)
if not self.resnet_v2:
if self.use_batch_norm:
out = self.initial_batchnorm(out, is_training, test_local_stats)
out = self.activation(out)
out = hk.max_pool(
out, window_shape=(1, 3, 3, 1), strides=(1, 2, 2, 1), padding="SAME")
for block_group in self.block_groups:
out = block_group(out, is_training, test_local_stats)
if self.resnet_v2:
if self.use_batch_norm:
out = self.final_batchnorm(out, is_training, test_local_stats)
out = self.activation(out)
out = jnp.mean(out, axis=(1, 2))
if self.dropout_rate > 0.0 and is_training:
out = hk.dropout(hk.next_rng_key(), self.dropout_rate, out)
return self.logits(out)
def subnet_max_func(self, x, r_fn):
return subnet_max_func(x, r_fn, self.depth, self.shortcut_weight,
resnet_v2=self.resnet_v2)
def subnet_max_func(x, r_fn, depth, shortcut_weight, resnet_v2=True):
"""The subnetwork maximizing function of the modified ResNet model."""
# See Appendix B of the TAT paper for a step-by-step procedure for how
# to compute this function for different architectures.
blocks_per_group = ModifiedResNet.CONFIGS[depth]["blocks_per_group"]
bottleneck = ModifiedResNet.CONFIGS[depth]["bottleneck"]
use_projection = ModifiedResNet.CONFIGS[depth]["use_projection"]
if bottleneck and resnet_v2:
res_fn = lambda z: r_fn(r_fn(r_fn(z)))
elif (not bottleneck and resnet_v2) or (bottleneck and not resnet_v2):
res_fn = lambda z: r_fn(r_fn(z))
else:
res_fn = r_fn
res_branch_subnetwork = res_fn(x)
for i in range(4):
for j in range(blocks_per_group[i]):
res_x = res_fn(x)
if j == 0 and use_projection[i] and resnet_v2:
shortcut_x = r_fn(x)
else:
shortcut_x = x
x = (shortcut_weight**2 * shortcut_x + (1.0 - shortcut_weight**2) * res_x)
if not resnet_v2:
x = r_fn(x)
x = r_fn(x)
return max(x, res_branch_subnetwork)
| dks-main | dks/examples/haiku/modified_resnet.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data pre-processing functions for use with DKS/TAT in Tensorflow."""
import functools
import operator
from absl import logging
import tensorflow as tf
_prod = lambda x: functools.reduce(operator.mul, x, 1)
def per_location_normalization(x, homog_mode="one", homog_scale=1.0,
has_batch_dim=True):
"""Applies Per-Location Normalization (PLN) to a given tensor.
This function generalizes the idea of PLN from the DKS paper to tensors of
arbitrary shape. Normalization is done over the last dimension (and only the
last dimension), so that ``tf.reduce_mean(PLN(x)**2, dim=-1, keepdims=True)``
is a tensor of ones. Note here that "normalization" does not correspond to
making the vectors at each location have norm 1. Rather, they will have a
squared norm given by ``x.shape[-1]``.
All dimensions, except for the last, and possibly the first (which may be the
batch dimension), are treated as indexing different "locations", analogous to
how locations are indexed by the height and width dimensions in convolutional
layers. The last dimension is always considered the "data" or "feature"
dimension, analogous to the channels dimension in convolutional layers. For
models where the dimensions don't have this interpretation, this type of
preprocessing may not be suitable. (And it's likely that the rest of the
``dks`` package, and perhaps even the DKS/TAT method iself, won't be
applicable either.)
Before normalization occurs, a homogeneous coordinate may be appended to the
last dimension of the tensor. If and how this depends on the value
of the arguements ``homog_mode`` and ``homog_scale``, as described in the
arguments section. This step is designed to preserve the information that
would otherwise be lost due to normalization.
The motivation for PLN is to ensure that the input "q values" to a network are
always 1, which is a technical requirement of DKS/TAT. While DKS/TAT can often
work well in practice without PLN, there are situations where using PLN will
be crucial. In particular, if the input data, or particular samples from it,
have an extreme scale that deviates from the typical ones seen in CIFAR and
ImageNet (with the standard preprocessing applied). With CIFAR in particular
we have observed that some pixels in some images have feature vectors that are
exactly zero, which can lead to problems when using TAT with leaky ReLUs.
See the section titled "Uniform q values via Per-Location Normalization" in
the DKS paper (https://arxiv.org/abs/2110.01765) for a discussion of PLN.
Args:
x: A TF tensor representing the input to a network to be normalized. If
``x`` has a batch dimension it must be the first one.
homog_mode: A string indicating whether to append a homogeneous coordinate,
and how to compute it. Can be ``one``, ``avg_q``, or ``off``. If
``one``, the coordinate will have the value 1. If ``avg_q``, it will be
given by taking the mean squared value of ``x`` across the non-batch axes.
If ``off`, no homogeneous coordinate will be added. (Default: "one")
homog_scale: A float used to rescale homogenous coordinate (if used).
(Default: 1.0)
has_batch_dim: A boolean specifying whether ``x`` has a batch dimension
(which will always be its first dimension). Note that many data processing
pipelines will process training cases one at a time. Unless this is done
with a singleton leading "dummy" batch dimension (which isn't typical)
this argument should be set to False. (Default: True)
Returns:
A TF tensor which is the result of applying PLN to ``x``, as described
above.
"""
def q_val(z, axis):
return tf.reduce_mean(tf.square(z), axis=axis, keepdims=True)
x_shape = x.shape.as_list()
if len(x_shape) == 0 and has_batch_dim: # pylint: disable=g-explicit-length-test
raise ValueError("x doesn't appear to have a batch dimension.")
if homog_mode == "avg_q" and ((len(x_shape) <= 2 and has_batch_dim)
or (len(x_shape) <= 1 and not has_batch_dim)): # pylint: disable=g-explicit-length-test
raise ValueError("homog_mode='avg_q' should not be used for datasets with "
"no time/location dimension, as it doesn't offer anything "
"beyond what homog_mode='off' would in such cases.")
if ((len(x_shape) == 1 and has_batch_dim)
or (len(x_shape) == 0 and not has_batch_dim)): # pylint: disable=g-explicit-length-test
x = tf.expand_dims(x, axis=-1)
x_shape += [1]
# the threshold 20 is arbitrary
if _prod(x_shape[1 if has_batch_dim else 0:]) < 20 and homog_mode == "avg_q":
logging.warning("Using homog_mode='avg_q' for datasets with few total "
"degrees of freedom per batch element (taken over "
"time/location dimensions and the data dimension) is "
"dangerous. This is because it will remove one degree of "
"freedom, and possibly destroy important information. See "
"the discussion in the subsection of the DKS paper titled "
"'Uniform q values via Per-Location Normalization'.")
if x_shape[-1] < 20 and homog_mode == "off":
logging.warning("Using homog_mode='off' for datasets with a small data "
"dimension is dangerous. This is because it will remove "
"one degree of freedom in this dimension, and possibly "
"destroy important information. See the discussion in the "
"subsection of the DKS paper titled 'Uniform q values via "
"Per-Location Normalization'.")
if homog_mode == "avg_q":
homog = tf.sqrt(q_val(x, axis=list(range(1 if has_batch_dim else 0,
len(x_shape)))))
if has_batch_dim:
homog = tf.tile(homog, [1] + x_shape[1:-1] + [1])
else:
homog = tf.tile(homog, x_shape[:-1] + [1])
elif homog_mode == "one":
homog = tf.ones(x_shape[:-1] + [1])
elif homog_mode == "off":
homog = None
else:
raise ValueError(f"Unrecognized value for homog_mode: {homog_mode}.")
if homog_scale != 1.0 and homog is not None:
homog = homog_scale * homog
if homog is not None:
x = tf.concat([x, homog], axis=-1)
x = x / tf.sqrt(q_val(x, axis=-1))
return x
| dks-main | dks/tensorflow/data_preprocessing.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Subpackage for Tensorflow."""
from dks.tensorflow import activation_transform
from dks.tensorflow import data_preprocessing
from dks.tensorflow import parameter_sampling_functions
| dks-main | dks/tensorflow/__init__.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parameter sampling functions for use with DKS/TAT in TensorFlow."""
import numpy as np
import tensorflow as tf
def _stateless_uniform_orthogonal(shape, seed, gain=1.0,
dtype=tf.dtypes.float32):
"""Samples an orthogonal matrix from the uniform/Haar distribution."""
# The implementation of this function is essentially copied from
# tf.initializers.Orthogonal.
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize must be "
"at least two-dimensional. Received: "
f"shape={shape} of rank {len(shape)}.")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows))
a = tf.random.stateless_normal(flat_shape, seed, dtype=dtype)
# Compute the qr factorization
q, r = tf.linalg.qr(a, full_matrices=False)
# Make q uniform
d = tf.linalg.tensor_diag_part(r)
q *= tf.sign(d)
if num_rows < num_cols:
q = tf.linalg.matrix_transpose(q)
return gain * tf.reshape(q, shape)
def stateless_scaled_uniform_orthogonal(
shape, seed, gain=1.0, delta=True, dtype=tf.dtypes.float32):
"""Initializes fully-connected or conv weights using the SUO distribution.
Similar to a stateess functional version of tf.initializers.Orthogonal, except
except that it supports Delta initializations, and sampled weights are
rescaled by ``max(sqrt(out_dim / in_dim), 1)``, so that the layer preserves
q values at initialization-time (assuming initial biases of zero).
Note that this is a stateless function, and will produce the exact same output
given the same arguments. A stateful random op can be created by passing the
output of some stateful random op as the ``seed`` argument.
Should be used with a zeros initializer for the bias parameters for DKS/TAT.
See the "Parameter distributions" section of DKS paper
(https://arxiv.org/abs/2110.01765) for a discussion of the SUO distribution
and Delta initializations.
Args:
shape: A list of integers giving the shape of the parameter tensor.
seed: A shape [2] Tensor, the seed to the random number generator. Must have
dtype tf.int32 or tf.int64. (When using XLA, only tf.int32 is allowed.)
gain: A float giving an additional scale factor applied on top of the
standard recaling used in the SUO distribution. This should be left
at its default value when using DKS/TAT. (Default: 1.0)
delta: A bool determining whether or not to use a Delta initialization
(which zeros out all weights except those in the central location of
convolutional filter banks). (Default: True)
dtype: a float dtype for the return value. (Default: jnp.float64 if
jax_enable_x64 is true, otherwise jnp.float32).
Returns:
The sampled weights as a TF Tensor.
"""
if delta and len(shape) != 2:
# We assume 'weights' is a filter bank when len(shape) != 2
# In TensorFlow, conv filter banks have the shape
# [loc_dim_1, loc_dim_2, in_dim, out_dim]
in_dim = shape[-2]
out_dim = shape[-1]
rescale_factor = np.maximum(np.sqrt(out_dim / in_dim), 1.0)
nonzero_part = _stateless_uniform_orthogonal(
shape[-2:], seed, gain=(rescale_factor * gain), dtype=dtype)
if any(s % 2 != 1 for s in shape[:-2]):
raise ValueError("All spatial axes must have odd length for Delta "
"initializations.")
midpoints = tuple((s - 1) // 2 for s in shape[:-2])
return tf.scatter_nd((midpoints,), (nonzero_part,), shape)
else:
in_dim = np.prod(shape[:-1])
out_dim = shape[-1]
rescale_factor = np.maximum(np.sqrt(out_dim / in_dim), 1.0)
return _stateless_uniform_orthogonal(
shape, seed, gain=(rescale_factor * gain), dtype=dtype)
| dks-main | dks/tensorflow/parameter_sampling_functions.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF implementation of the activation transformations used in DKS/TAT."""
from dks.base import activation_transform
import tensorflow as tf
def _get_tf_activation_function(name):
"""Get activation function by name in TensorFlow."""
if name == "bentid":
return lambda x: (tf.sqrt(tf.square(x) + 1.) - 1.) / 2. + x
elif name == "erf":
return tf.math.erf
elif name == "atan":
return tf.math.atan
elif name == "asinh":
return tf.math.asinh
elif name == "leaky_relu":
return lambda x, negative_slope=0.01: tf.nn.leaky_relu( # pylint: disable=g-long-lambda
x, alpha=negative_slope)
elif name == "gelu":
return lambda x: tf.nn.gelu(x, approximate=True)
elif name == "gelu_exact":
return lambda x: tf.nn.gelu(x, approximate=False)
elif hasattr(tf.nn, name):
return getattr(tf.nn, name)
else:
raise ValueError(f"Unrecognized activation function name '{name}'.")
def get_transformed_activations(*args, **kwargs):
"""See ``dks.base.activation_transform.get_transformed_activations()``."""
return activation_transform.get_transformed_activations(
*args, **kwargs, activation_getter=_get_tf_activation_function)
| dks-main | dks/tensorflow/activation_transform.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data pre-processing functions for use with DKS/TAT in PyTorch."""
import functools
import operator
from absl import logging
import torch
_prod = lambda x: functools.reduce(operator.mul, x, 1)
def per_location_normalization(x, homog_mode="one", homog_scale=1.0,
has_batch_dim=True):
"""Applies Per-Location Normalization (PLN) to a given tensor.
This function generalizes the idea of PLN from the DKS paper to tensors of
arbitrary shape. Normalization is done over the last dimension (and only the
last dimension), so that ``torch.mean(PLN(x)**2, dim=-1, keepdim=True)`` is a
tensor of ones. Note here that "normalization" does not correspond to making
the vectors at each location have norm 1. Rather, they will have a squared
norm given by ``x.size()[-1]``.
All dimensions, except for the last, and possibly the first (which may be the
batch dimension), are treated as indexing different "locations", analogous to
how locations are indexed by the height and width dimensions in convolutional
layers. The last dimension is always considered the "data" or "feature"
dimension, analogous to the channels dimension in convolutional layers. For
models where the dimensions don't have this interpretation, this type of
preprocessing may not be suitable. (And it's likely that the rest of the
``dks`` package, and perhaps even the DKS/TAT method iself, won't be
applicable either.)
Before normalization occurs, a homogeneous coordinate may be appended to the
last dimension of the tensor. If and how this depends on the value
of the arguements ``homog_mode`` and ``homog_scale``, as described in the
arguments section. This step is designed to preserve the information that
would otherwise be lost due to normalization.
The motivation for PLN is to ensure that the input "q values" to a network are
always 1, which is a technical requirement of DKS/TAT. While DKS/TAT can often
work well in practice without PLN, there are situations where using PLN will
be crucial. In particular, if the input data, or particular samples from it,
have an extreme scale that deviates from the typical ones seen in CIFAR and
ImageNet (with the standard preprocessing applied). With CIFAR in particular
we have observed that some pixels in some images have feature vectors that are
exactly zero, which can lead to problems when using TAT with leaky ReLUs.
See the section titled "Uniform q values via Per-Location Normalization" in
the DKS paper (https://arxiv.org/abs/2110.01765) for a discussion of PLN.
Args:
x: A PyTorch tensor representing the input to a network to be normalized. If
``x`` has a batch dimension it must be the first one.
homog_mode: A string indicating whether to append a homogeneous coordinate,
and how to compute it. Can be ``one``, ``avg_q``, or ``off``. If
``one``, the coordinate will have the value 1. If ``avg_q``, it will be
given by taking the mean squared value of ``x`` across the non-batch axes.
If ``off`, no homogeneous coordinate will be added. (Default: "one")
homog_scale: A float used to rescale homogenous coordinate (if used).
(Default: 1.0)
has_batch_dim: A boolean specifying whether ``x`` has a batch dimension
(which will always be its first dimension). Note that many data processing
pipelines will process training cases one at a time. Unless this is done
with a singleton leading "dummy" batch dimension (which isn't typical)
this argument should be set to False. (Default: True)
Returns:
A PyTorch tensor which is the result of applying PLN to ``x``, as described
above.
"""
def q_val(z, dim):
return torch.mean(torch.square(z), dim=dim, keepdim=True)
x_shape = list(x.size())
if len(x_shape) == 0 and has_batch_dim: # pylint: disable=g-explicit-length-test
raise ValueError("x doesn't appear to have a batch dimension.")
if homog_mode == "avg_q" and ((len(x_shape) <= 2 and has_batch_dim)
or (len(x_shape) <= 1 and not has_batch_dim)): # pylint: disable=g-explicit-length-test
raise ValueError("homog_mode='avg_q' should not be used for datasets with "
"no time/location dimension, as it doesn't offer anything "
"beyond what homog_mode='off' would in such cases.")
if ((len(x_shape) == 1 and has_batch_dim)
or (len(x_shape) == 0 and not has_batch_dim)): # pylint: disable=g-explicit-length-test
x = torch.unsqueeze(x, dim=-1)
x_shape += [1]
# the threshold 20 is arbitrary
if _prod(x_shape[1 if has_batch_dim else 0:]) < 20 and homog_mode == "avg_q":
logging.warning("Using homog_mode='avg_q' for datasets with few total "
"degrees of freedom per batch element (taken over "
"time/location dimensions and the data dimension) is "
"dangerous. This is because it will remove one degree of "
"freedom, and possibly destroy important information. See "
"the discussion in the subsection of the DKS paper titled "
"'Uniform q values via Per-Location Normalization'.")
if x_shape[-1] < 20 and homog_mode == "off":
logging.warning("Using homog_mode='off' for datasets with a small data "
"dimension is dangerous. This is because it will remove "
"one degree of freedom in this dimension, and possibly "
"destroy important information. See the discussion in the "
"subsection of the DKS paper titled 'Uniform q values via "
"Per-Location Normalization'.")
if homog_mode == "avg_q":
homog = torch.sqrt(q_val(x, dim=list(range(1 if has_batch_dim else 0,
len(x_shape)))))
if has_batch_dim:
homog = torch.tile(homog, [1] + x_shape[1:-1] + [1])
else:
homog = torch.tile(homog, x_shape[:-1] + [1])
elif homog_mode == "one":
homog = torch.ones(x_shape[:-1] + [1])
elif homog_mode == "off":
homog = None
else:
raise ValueError(f"Unrecognized value for homog_mode: {homog_mode}.")
if homog_scale != 1.0 and homog is not None:
homog = homog_scale * homog
if homog is not None:
x = torch.cat([x, homog], dim=-1)
x = x / torch.sqrt(q_val(x, dim=-1))
return x
| dks-main | dks/pytorch/data_preprocessing.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Subpackage for PyTorch."""
from dks.pytorch import activation_transform
from dks.pytorch import data_preprocessing
from dks.pytorch import parameter_sampling_functions
| dks-main | dks/pytorch/__init__.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parameter sampling functions for use with DKS/TAT in PyTorch."""
import numpy as np
import torch
def scaled_uniform_orthogonal_(weights, gain=1.0, delta=True):
"""Initializes fully-connected or conv weights using the SUO distribution.
Similar to torch.nn.init.orthogonal_, except that it supports Delta
initializations, and sampled weights are rescaled by
``max(sqrt(out_dim / in_dim), 1)``, so that the layer preserves q values at
initialization-time (assuming initial biases of zero).
Note that as with all PyTorch functions ending with '_', this function
modifies the value of its tensor argument in-place.
Should be used with a zeros initializer for the bias parameters for DKS/TAT.
See the "Parameter distributions" section of DKS paper
(https://arxiv.org/abs/2110.01765) for a discussion of the SUO distribution
and Delta initializations.
Args:
weights: A PyTorch Tensor corresponding to the weights to be randomly
initialized.
gain: A float giving an additional scale factor applied on top of the
standard recaling used in the SUO distribution. This should be left
at its default value when using DKS/TAT. (Default: 1.0)
delta: A bool determining whether or not to use a Delta initialization
(which zeros out all weights except those in the central location of
convolutional filter banks). (Default: True)
Returns:
The ``weights`` argument (whose value will be initialized).
"""
shape = list(weights.size())
if delta and len(shape) != 2:
# We assume 'weights' is a filter bank when len(shape) != 2
# In PyTorch, conv filter banks have that shape
# [in_dim, out_dim, loc_dim_1, loc_dim_2]
in_dim = shape[0]
out_dim = shape[1]
rescale_factor = np.maximum(np.sqrt(out_dim / in_dim), 1.0)
nonzero_part = torch.nn.init.orthogonal_(weights.new_empty(in_dim, out_dim),
gain=(rescale_factor * gain))
if any(s % 2 != 1 for s in shape[2:]):
raise ValueError("All spatial axes must have odd length for Delta "
"initializations.")
midpoints = [(s - 1) // 2 for s in shape[2:]]
indices = [slice(None), slice(None)] + midpoints
with torch.no_grad():
weights.fill_(0.0)
weights.__setitem__(indices, nonzero_part)
return weights
else:
# torch.nn.orthogonal_ flattens dimensions [1:] instead of [:-1], which is
# the opposite of what we want here. So we'll first compute the version with
# the first two dimensions swapped, and then we'll transpose at the end.
shape = [shape[1], shape[0]] + shape[2:]
in_dim = np.prod(shape[1:])
out_dim = shape[0]
rescale_factor = np.maximum(np.sqrt(out_dim / in_dim), 1.0)
weights_t = torch.nn.init.orthogonal_(weights.new_empty(shape),
gain=(rescale_factor * gain))
with torch.no_grad():
return weights.copy_(weights_t.transpose_(0, 1))
| dks-main | dks/pytorch/parameter_sampling_functions.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch implementation of the activation transformations used in DKS/TAT."""
import math
from dks.base import activation_transform
import torch
import torch.nn.functional as tfunc
# PyTorch doesn't seem to currently support the commonly used GELU approximation
# in its public-facing API (as of June 2022) so we implement it here instead.
def _gelu_approx(x):
return (0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) *
(x + 0.044715 * torch.pow(x, 3)))))
def _get_pytorch_activation_function(name):
"""Get activation function by name in PyTorch."""
if name == "bentid":
return lambda x: (torch.sqrt(torch.square(x) + 1.) - 1.) / 2. + x
elif name == "gelu":
return _gelu_approx
elif name == "gelu_exact":
return tfunc.gelu
elif hasattr(tfunc, name):
return getattr(tfunc, name)
elif hasattr(torch, name):
return getattr(torch, name)
else:
raise ValueError(f"Unrecognized activation function name '{name}'.")
def get_transformed_activations(*args, **kwargs):
"""See ``dks.base.activation_transform.get_transformed_activations()``."""
return activation_transform.get_transformed_activations(
*args, **kwargs, activation_getter=_get_pytorch_activation_function)
| dks-main | dks/pytorch/activation_transform.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data pre-processing functions for use with DKS/TAT in JAX."""
import functools
import operator
from absl import logging
import jax.numpy as jnp
_prod = lambda x: functools.reduce(operator.mul, x, 1)
def per_location_normalization(x, homog_mode="one", homog_scale=1.0,
has_batch_dim=True):
"""Applies Per-Location Normalization (PLN) to a given array.
This function generalizes the idea of PLN from the DKS paper to arrays of
arbitrary shape. Normalization is done over the last dimension (and only the
last dimension), so that ``jnp.mean(PLN(x)**2, axis=-1, keepdims=True)`` is an
array of ones. Note here that "normalization" does not correspond to making
the vectors at each location have norm 1. Rather, they will have a squared
norm given by ``x.shape[-1]``.
All dimensions, except for the last, and possibly the first (which may be the
batch dimension), are treated as indexing different "locations", analogous to
how locations are indexed by the height and width dimensions in convolutional
layers. The last dimension is always considered the "data" or "feature"
dimension, analogous to the channels dimension in convolutional layers. For
models where the dimensions don't have this interpretation, this type of
preprocessing may not be suitable. (And it's likely that the rest of the
``dks`` package, and perhaps even the DKS/TAT method iself, won't be
applicable either.)
Before normalization occurs, a homogeneous coordinate may be appended to the
last dimension of the array. If and how this depends on the value
of the arguements ``homog_mode`` and ``homog_scale``, as described in the
arguments section. This step is designed to preserve the information that
would otherwise be lost due to normalization.
The motivation for PLN is to ensure that the input "q values" to a network are
always 1, which is a technical requirement of DKS/TAT. While DKS/TAT can often
work well in practice without PLN, there are situations where using PLN will
be crucial. In particular, if the input data, or particular samples from it,
have an extreme scale that deviates from the typical ones seen in CIFAR and
ImageNet (with the standard preprocessing applied). With CIFAR in particular
we have observed that some pixels in some images have feature vectors that are
exactly zero, which can lead to problems when using TAT with leaky ReLUs.
See the section titled "Uniform q values via Per-Location Normalization" in
the DKS paper (https://arxiv.org/abs/2110.01765) for a discussion of PLN.
Args:
x: A JAX array representing the input to a network to be normalized. If
``x`` has a batch dimension it must be the first one.
homog_mode: A string indicating whether to append a homogeneous coordinate,
and how to compute it. Can be ``one``, ``avg_q``, or ``off``. If
``one``, the coordinate will have the value 1. If ``avg_q``, it will be
given by taking the mean squared value of ``x`` across the non-batch axes.
If ``off`, no homogeneous coordinate will be added. (Default: "one")
homog_scale: A float used to rescale homogenous coordinate (if used).
(Default: 1.0)
has_batch_dim: A boolean specifying whether ``x`` has a batch dimension
(which will always be its first dimension). Note that many data processing
pipelines will process training cases one at a time. Unless this is done
with a singleton leading "dummy" batch dimension (which isn't typical)
this argument should be set to False. (Default: True)
Returns:
A JAX array which is the result of applying PLN to ``x``, as described
above.
"""
def q_val(z, axis):
return jnp.mean(jnp.square(z), axis=axis, keepdims=True)
x_shape = list(x.shape)
if len(x_shape) == 0 and has_batch_dim: # pylint: disable=g-explicit-length-test
raise ValueError("dataset doesn't appear to have a batch dimension.")
if homog_mode == "avg_q" and ((len(x_shape) <= 2 and has_batch_dim)
or (len(x_shape) <= 1 and not has_batch_dim)): # pylint: disable=g-explicit-length-test
raise ValueError("homog_mode='avg_q' should not be used for datasets with "
"no time/location dimension, as it doesn't offer anything "
"beyond what homog_mode='off' would in such cases.")
if ((len(x_shape) == 1 and has_batch_dim)
or (len(x_shape) == 0 and not has_batch_dim)): # pylint: disable=g-explicit-length-test
x = jnp.expand_dims(x, axis=-1)
x_shape += [1]
# the threshold 20 is arbitrary
if _prod(x_shape[1 if has_batch_dim else 0:]) < 20 and homog_mode == "avg_q":
logging.warning("Using homog_mode='avg_q' for datasets with few total "
"degrees of freedom per batch element (taken over "
"time/location dimensions and the data dimension) is "
"dangerous. This is because it will remove one degree of "
"freedom, and possibly destroy important information. See "
"the discussion in the subsection of the DKS paper titled "
"'Uniform q values via Per-Location Normalization'.")
if x_shape[-1] < 20 and homog_mode == "off":
logging.warning("Using homog_mode='off' for datasets with a small data "
"dimension is dangerous. This is because it will remove "
"one degree of freedom in this dimension, and possibly "
"destroy important information. See the discussion in the "
"subsection of the DKS paper titled 'Uniform q values via "
"Per-Location Normalization'.")
if homog_mode == "avg_q":
homog = jnp.sqrt(q_val(x, axis=list(range(1 if has_batch_dim else 0,
len(x_shape)))))
if has_batch_dim:
homog = jnp.tile(homog, [1] + x_shape[1:-1] + [1])
else:
homog = jnp.tile(homog, x_shape[:-1] + [1])
elif homog_mode == "one":
homog = jnp.ones(x_shape[:-1] + [1])
elif homog_mode == "off":
homog = None
else:
raise ValueError(f"Unrecognized value for homog_mode: {homog_mode}.")
if homog_scale != 1.0 and homog is not None:
homog = homog_scale * homog
if homog is not None:
x = jnp.concatenate([x, homog], axis=-1)
x = x / jnp.sqrt(q_val(x, axis=-1))
return x
| dks-main | dks/jax/data_preprocessing.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Haiku initializers for use with DKS/TAT."""
from dks.jax import parameter_sampling_functions
import haiku as hk
class ScaledUniformOrthogonal(hk.initializers.Initializer):
"""SUO (+ Delta) initializer for fully-connected and convolutional layers.
Similar to hk.initializers.Orthogonal, except that it supports Delta
initializations, and sampled weights are rescaled by
``max(sqrt(out_dim / in_dim), 1)`` so that the layer preserves q values at
initialization time (assuming initial biases of zero).
Should be used with a zeros initializer for the bias parameters for DKS/TAT.
See "Parameter distributions" section of DKS paper
(https://arxiv.org/abs/2110.01765) for a discussion of the SUO distribution
and Delta initializations.
"""
def __init__(self, scale=1.0, axis=-1, delta=True):
"""Construct a Haiku initializer which uses the SUO distribution.
Args:
scale: A float giving an additional scale factor applied on top of the
standard rescaling used in the SUO distribution. This should be left
at its default value when using DKS/TAT. (Default: 1.0)
axis: An int giving the axis corresponding to the "output dimension" of
the parameter tensor. (Default: -1)
delta: A bool determining whether or not to use a Delta initialization
(which zeros out all weights except those in the central location of
convolutional filter banks). (Default: True)
"""
if delta and axis != -1:
raise ValueError("Invalid axis value for Delta initializations. "
"Must be -1.")
self.scale = scale
self.axis = axis
self.delta = delta
def __call__(self, shape, dtype):
return parameter_sampling_functions.scaled_uniform_orthogonal(
hk.next_rng_key(), shape, scale=self.scale, axis=self.axis,
delta=self.delta, dtype=dtype)
| dks-main | dks/jax/haiku_initializers.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Subpackage for JAX."""
from dks.jax import activation_transform
from dks.jax import data_preprocessing
from dks.jax import haiku_initializers
from dks.jax import parameter_sampling_functions
| dks-main | dks/jax/__init__.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parameter sampling functions for use with DKS/TAT in JAX."""
import jax
from jax.config import config as jax_config
import jax.numpy as jnp
import numpy as np
def _get_default_float_dtype():
if jax_config.jax_enable_x64:
return jnp.float64
else:
return jnp.float32
def _uniform_orthogonal(key, shape, scale=1.0, axis=-1, dtype=None):
"""Samples an orthogonal matrix from the uniform/Haar distribution."""
# The implementation of this function is essentially copied from
# hk.initializers.Orthogonal.
if dtype is None:
dtype = _get_default_float_dtype()
if len(shape) < 2:
raise ValueError("Orthogonal initializer requires at least a 2D shape.")
n_rows = shape[axis]
n_cols = np.prod(shape) // n_rows
matrix_shape = (n_rows, n_cols) if n_rows > n_cols else (n_cols, n_rows)
norm_dst = jax.random.normal(key, matrix_shape, dtype)
q_mat, r_mat = jnp.linalg.qr(norm_dst)
q_mat *= jnp.sign(jnp.diag(r_mat))
if n_rows < n_cols:
q_mat = q_mat.T
q_mat = jnp.reshape(q_mat, (n_rows,) + tuple(np.delete(shape, axis)))
q_mat = jnp.moveaxis(q_mat, 0, axis)
return jax.lax.convert_element_type(scale, dtype) * q_mat
def scaled_uniform_orthogonal(key, shape, scale=1.0, axis=-1, delta=True,
dtype=None):
"""Initializes fully-connected or conv weights using the SUO distribution.
Output is similar to that of haiku.initializers.Orthogonal, except that it
supports Delta initializations, and sampled weights are rescaled by
``max(sqrt(out_dim / in_dim), 1)``, so that the layer preserves q values at
initialization-time (assuming initial biases of zero).
Note that as with all JAX functions, this is pure and totally stateless
function, and will produce the exact same output given the same arguments.
Should be used with a zeros initializer for the bias parameters for DKS/TAT.
See the "Parameter distributions" section of DKS paper
(https://arxiv.org/abs/2110.01765) for a discussion of the SUO distribution
and Delta initializations.
Args:
key: A PRNG key used as the random key.
shape: A list of integers giving the shape of the parameter tensor.
scale: A float giving an additional scale factor applied on top of the
standard recaling used in the SUO distribution. This should be left
at its default value when using DKS/TAT. (Default: 1.0)
axis: An int giving the axis corresponding to the "output dimension" of
the parameter tensor. (Default: -1)
delta: A bool determining whether or not to use a Delta initialization
(which zeros out all weights except those in the central location of
convolutional filter banks). (Default: True)
dtype: a float dtype for the return value. (Default: jnp.float64 if
jax_enable_x64 is true, otherwise jnp.float32).
Returns:
The sampled weights as a JAX ndarray.
"""
if delta and axis != -1:
raise ValueError("Invalid axis value for Delta initializations. "
"Must be -1.")
if delta and len(shape) != 2:
# We assume 'weights' is a filter bank when len(shape) != 2
# In JAX, conv filter banks have the shape
# [loc_dim_1, loc_dim_2, in_dim, out_dim]
in_dim = shape[-2]
out_dim = shape[-1]
rescale_factor = np.maximum(np.sqrt(out_dim / in_dim), 1.0)
nonzero_part = _uniform_orthogonal(
key, shape[-2:], scale=(rescale_factor * scale), axis=-1, dtype=dtype)
if any(s % 2 != 1 for s in shape[:-2]):
raise ValueError("All spatial axes must have odd length for Delta "
"initializations.")
midpoints = tuple((s - 1) // 2 for s in shape[:-2])
return jnp.zeros(shape, dtype).at[midpoints].set(nonzero_part)
else:
in_dim = np.prod(np.delete(shape, axis))
out_dim = shape[axis]
rescale_factor = np.maximum(np.sqrt(out_dim / in_dim), 1.0)
return _uniform_orthogonal(
key, shape, scale=(rescale_factor * scale), axis=axis, dtype=dtype)
| dks-main | dks/jax/parameter_sampling_functions.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAX implementation of the activation transformations used in DKS/TAT."""
from dks.base import activation_transform
import jax
import jax.numpy as jnp
def _get_jax_activation_function(name):
"""Get activation function by name in JAX."""
if name == "bentid":
return lambda x: (jnp.sqrt(jnp.square(x) + 1.) - 1.) / 2. + x
elif name == "softsign":
return jax.nn.soft_sign
elif name == "gelu":
return lambda x: jax.nn.gelu(x, approximate=True)
elif name == "gelu_exact":
return lambda x: jax.nn.gelu(x, approximate=False)
elif hasattr(jax.lax, name):
return getattr(jax.lax, name)
elif hasattr(jax.nn, name):
return getattr(jax.nn, name)
else:
raise ValueError(f"Unrecognized activation function name '{name}'.")
def get_transformed_activations(*args, **kwargs):
"""See ``dks.base.activation_transform.get_transformed_activations()``."""
return activation_transform.get_transformed_activations(
*args, **kwargs, activation_getter=_get_jax_activation_function)
| dks-main | dks/jax/activation_transform.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines a getter for NumPy activation functions (with autograd support)."""
from autograd import numpy as np
from autograd import scipy as sp
_SELU_LAMBDA = 1.0507009873554804934193349852946
_SELU_ALPHA = 1.6732632423543772848170429916717
_sigmoid = sp.special.expit
_erf = sp.special.erf
def _elu(x, a=1.0, l=1.0):
is_neg = x < 0
is_not_neg = np.logical_not(is_neg)
return l * (is_neg * a * (np.exp(x) - 1) + is_not_neg * x)
def _bentid(x):
return (np.sqrt(x**2 + 1.) - 1.) / 2. + x
def _softplus(x):
"""Numerically-stable softplus."""
return np.log(1. + np.exp(-np.abs(x))) + np.maximum(x, 0)
# aka Silu
def _swish(x):
return x * _sigmoid(x)
def _leaky_relu(x, negative_slope=0.01):
is_neg = x < 0
is_not_neg = np.logical_not(is_neg)
return negative_slope * is_neg * x + is_not_neg * x
# approximate GELU adapted from official JAX implementation:
def _gelu(x):
cdf = 0.5 * (1.0 + np.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * (x ** 3))))
return x * cdf
# exact GELU adapted from official JAX implementation:
def _gelu_exact(x):
return x * (sp.special.erf(x / np.sqrt(2.0)) + 1) / 2
_ACTIVATION_TABLE = {
"tanh": np.tanh,
"sigmoid": _sigmoid,
"erf": _erf,
"relu": lambda x: np.maximum(0., x),
"softplus": _softplus,
"selu": lambda x: _elu(x, _SELU_ALPHA, _SELU_LAMBDA),
"elu": _elu,
"swish": _swish,
"bentid": _bentid,
"atan": np.arctan,
"asinh": np.arcsinh,
"square": lambda x: x**2,
"softsign": lambda x: x / (1 + np.abs(x)),
"leaky_relu": _leaky_relu,
"gelu": _gelu,
"gelu_exact": _gelu_exact,
}
def get_activation_function(name):
if name in _ACTIVATION_TABLE:
return _ACTIVATION_TABLE[name]
else:
raise ValueError(f"Unrecognized activation function name '{name}'.")
| dks-main | dks/base/activation_getter.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Subpackage for base implementation in Python."""
from dks.base import activation_getter
from dks.base import activation_transform
| dks-main | dks/base/__init__.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NumPy implementation of the activation transformations used in DKS/TAT."""
import itertools
import os
from absl import logging
from autograd import elementwise_grad as egrad
from autograd import numpy as np
from dks.base.activation_getter import get_activation_function as _get_numpy_activation_function
import scipy.integrate as sp_int
import scipy.optimize as sp_opt
from scipy.special import roots_legendre
# pylint: disable=g-import-not-at-top
# This is a trick to achieve compatibility with multiple versions of SciPy:
try:
from scipy.integrate._quadrature import _cached_roots_legendre
except ImportError:
from scipy.integrate.quadrature import _cached_roots_legendre
# pylint: enable=g-import-not-at-top
# Random seed used to initialize activation function parameter searches after
# standard starting points fail (which almost never happens).
_RANDOM_SEED = 123
_CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
# Feel free to change the path of this file if needed. This file should ideally
# persist between experiments since it can take around 20 minutes to regenerate.
_ROOTS_CACHE_FILE = os.path.join(_CURRENT_DIR, "roots_{}.npy")
_QUADRATURE_ORDER = 100000
# These represent sensible values and ranges to initialize the activation
# function parameter search with. They are sensible because commonly used
# activation functions tend to have reasonable behavior for such values. If
# the searches fail for some activation function they may need to be
# tweaked/expanded.
_ALWAYS_TRY_VALUES = {"input_shift": (0.0, 1.0, -1.0),
"input_scale": (1.0, 0.1),
"output_shift": (0.0, 1.0, -1.0),
"negative_slope": (0.0, 0.5)}
_SAMPLE_RANGE_LOW = {"input_shift": -3.0,
"input_scale": 0.0,
"output_shift": -3.0,
"negative_slope": 0.0}
_SAMPLE_RANGE_HIGH = {"input_shift": 3.0,
"input_scale": 2.0,
"output_shift": 3.0,
"negative_slope": 1.0}
def _precompute_or_load_roots(order):
"""Compute or load the roots used by fixed_quad to save time."""
if order not in _cached_roots_legendre.cache:
roots_cache_file = _ROOTS_CACHE_FILE.format(order)
if os.path.exists(roots_cache_file):
with open(roots_cache_file, "rb") as fhandle:
_cached_roots_legendre.cache[order] = np.load(fhandle,
allow_pickle=False)
else:
roots = roots_legendre(order)
_cached_roots_legendre.cache[order] = roots
with open(roots_cache_file, "wb") as fhandle:
np.save(fhandle, roots, allow_pickle=False)
def _estimate_gaussian_mean(fn, order=_QUADRATURE_ORDER):
"""Estimate the mean of a function fn(x) where x ~ N(0,1)."""
_precompute_or_load_roots(order)
fn_weighted = lambda x: np.exp(-x**2 / 2) * fn(x)
integral, _ = sp_int.fixed_quad(fn_weighted, -10., 10., n=order)
return integral / np.sqrt(2*np.pi)
def _estimate_2d_gaussian_mean(fn):
"""Estimate the mean of a function fn(x, y) where x,y ~ N(0,1)."""
fn_weighted = lambda x, y: np.exp(-(x**2 + y**2) / 2) * fn(x, y)
integral, _ = sp_int.dblquad(fn_weighted, -10., 10., -10., 10., epsabs=0.0)
return integral / (2*np.pi)
def _calc_c_map(activation, derivative_order=0, c=1.0, q_output=None):
"""Evaluate local C map value assuming an input q value of 1.
Args:
activation: A callable representing the activation function (applied
elementwise) from which to define the local C map.
derivative_order: An integer giving the order of the derivative of the C map
to take before evaluating it. (Default: 0)
c: A float giving the input point at which to evaluate the C map. Must
be between -1.0 and 1.0. (Default: 1.0)
q_output: Float or None giving the output q value associated with
``activation``, if this is known. If None this will be computed from
scratch. (Default: None)
Returns:
A float giving the value of the (derivative of) the local C map for the
given activation function.
"""
derivative = activation
for _ in range(derivative_order):
derivative = egrad(derivative)
if c == 0.0:
integral = _estimate_gaussian_mean(derivative)**2
elif c == 1.0:
integral = _estimate_gaussian_mean(lambda x: derivative(x)**2)
elif c >= -1.0 and c <= 1.0:
sqrt1mc2 = np.sqrt(1.0 - c**2)
integral = _estimate_2d_gaussian_mean(
lambda x, y: derivative(x) * derivative(c*x + sqrt1mc2*y))
else:
raise NotImplementedError("Input c value must be between -1.0 and 1.0.")
if q_output is None:
q_output = _estimate_gaussian_mean(lambda x: activation(x)**2)
return integral / q_output
def _calc_c_slope(activation, c=1.0, q_output=None):
"""Evaluate local C map derivative assuming an input q value of 1."""
return _calc_c_map(activation, derivative_order=1, c=c, q_output=q_output)
def _calc_c_curv(activation, c=1.0, q_output=None):
"""Evaluate local C map second derivative assuming an input q value of 1."""
return _calc_c_map(activation, derivative_order=2, c=c, q_output=q_output)
def _calc_q_slope_1(activation):
"""Computes the derivative of a local Q map at q=1."""
derivative = egrad(activation)
return _estimate_gaussian_mean(lambda x: activation(x)*derivative(x)*x)
def _leaky_relu_cmap(c, negative_slope):
"""Evaluates the local C map for Leaky ReLU with the given negative slope."""
return ((1 - negative_slope)**2 * (np.sqrt(1 - c**2)
+ (np.pi - np.arccos(c)) * c) / np.pi
+ 2 * negative_slope * c) / (1 + negative_slope**2)
def _compute_output_params(act, local_c_val_0_target):
"""Compute output params to achieve Q(1)=1 and C(0)=local_c_val_0_target."""
if local_c_val_0_target is not None:
output_shift = np.sqrt(local_c_val_0_target) - _estimate_gaussian_mean(act)
act_shifted = lambda x: act(x) + output_shift
else:
output_shift = None
act_shifted = act
output_scale = 1. / np.sqrt(
_estimate_gaussian_mean(lambda x: act_shifted(x)**2))
return output_shift, output_scale
def _transform_activation(phi, params):
"""Transform an activation function phi using the given parameters."""
params = params.copy()
input_scale = params.pop("input_scale", None)
input_shift = params.pop("input_shift", None)
output_shift = params.pop("output_shift", None)
output_scale = params.pop("output_scale", None)
def activation(x):
# Note: DO NOT use += and *= below! Bad things will happen.
if input_scale is not None:
x = x * float(input_scale)
if input_shift is not None:
x = x + float(input_shift)
x = phi(x, **params)
if output_shift is not None:
x = x + float(output_shift)
if output_scale is not None:
x = x * float(output_scale)
return x
return activation
def _solve_for_activation_params(
name, local_q_slope_target, local_c_val_0_target, local_c_slope_1_target,
local_c_slope_0_target, local_c_curv_target, reject_condition=None,
num_tries=50):
"""Computes activation function parameters to achieve the given targets."""
# Making sure random starting points used in solvers will be the same for
# each run, so that they find the same exact solutions (which is important
# when using JAX with multiple processes).
np.random.seed(_RANDOM_SEED)
# RELU has 1 less degree of freedom so we use this special-case logic:
if name == "relu":
# The constant below is the maximum value of local_c_slope_1 that can be
# achieved in ReLUs given a positive input shift value.
if local_c_slope_1_target < 1.466942206924260361:
input_shift = 1.0
else:
input_shift = -1.0
constant_params = {"input_shift": input_shift}
opt_params = ("input_scale",)
local_q_slope_target = None # we turn this off for ReLUs
assert local_c_slope_0_target is None
assert local_c_curv_target is None
elif name == "leaky_relu":
constant_params = {}
opt_params = ("negative_slope", "input_shift")
elif local_c_val_0_target is not None:
constant_params = {}
opt_params = ("input_scale", "input_shift")
else:
constant_params = {}
opt_params = ("input_scale", "input_shift", "output_shift")
def make_params_from_pvector(p):
"""Make a dictionary of activation parameters from a given vector."""
params = constant_params.copy()
for i, pname in enumerate(opt_params):
assert pname not in params
params[pname] = p[i]
# We directly compute output_scale, and sometimes also output_shift, instead
# of taking them from the parameter vector.
output_shift, output_scale = _compute_output_params(
_transform_activation(
_get_numpy_activation_function(name), params),
local_c_val_0_target)
if output_shift is not None:
assert "output_shift" not in params
params["output_shift"] = output_shift
if output_scale is not None:
assert "output_scale" not in params
params["output_scale"] = output_scale
return params
def compute_error_vector(p):
"""Computes vector of errors (value - target) over relevant quantities."""
params = make_params_from_pvector(p)
phi_hat = _transform_activation(_get_numpy_activation_function(name),
params)
residual_array = []
if local_q_slope_target is not None:
local_q_slope = _calc_q_slope_1(phi_hat)
residual_array += [local_q_slope - local_q_slope_target]
if local_c_slope_1_target is not None:
local_c_1_slope = _calc_c_slope(phi_hat, c=1.0, q_output=1.0)
residual_array += [local_c_1_slope - local_c_slope_1_target]
if local_c_slope_0_target is not None:
local_c_0_slope = _calc_c_slope(phi_hat, c=0.0, q_output=1.0)
residual_array += [local_c_0_slope - local_c_slope_0_target]
if local_c_curv_target is not None:
local_c_curv = _calc_c_curv(phi_hat, c=1.0, q_output=1.0)
residual_array += [local_c_curv - local_c_curv_target]
return np.asarray(residual_array)
# Make the starting points for the search:
always_try = tuple(_ALWAYS_TRY_VALUES[pname] for pname in opt_params)
starting_points = list(itertools.product(*always_try))
for _ in range(num_tries - len(starting_points)):
starting_points += [
tuple(np.random.uniform(low=_SAMPLE_RANGE_LOW[pname],
high=_SAMPLE_RANGE_HIGH[pname])
for pname in opt_params)]
# For each starting point we run sp_opt.root to try and find a solution:
for starting_point in starting_points:
sol = sp_opt.root(compute_error_vector, np.asarray(starting_point),
method="hybr", jac=False, options=None)
if sol.success:
params = make_params_from_pvector(sol.x)
if reject_condition is None or not reject_condition(name, params):
break
logging.debug("Failed to find parameters from starting point %s.",
starting_point)
if not sol.success:
raise ValueError(f"Failed to find parameters for '{name}'!")
logging.info("Found parameters for '%s': %s", name, params)
return params
def _solve_increasing(fn, target, input_, min_, max_, tol=1e-8, max_eval=100):
"""Solves for x in fn(x)=target, where fn is an increasing function.
Args:
fn: A callable which takes a scalar input x and produces a scalar output.
Must compute an increasing function.
target: The target output value of ``fn``.
input_: The initial guess for x.
min_: A lower bound on the possible value of x.
max_: An upper bound on the possible value of x.
tol: A float which giving the acceptable tolerance for ``|fn(x) - target|``.
(Default: 1e-8)
max_eval: An integer giving the maximum number of times to evaluate ``fn``
before giving up. (Default: 100)
Returns:
A float giving a value of x such that ``|fn(x) - target| < tol``.
"""
# The method used to find the solution is a simple binary search in the
# inteval [min_, max_], where max_ or min_ will change each iteration. If
# max_ is infinity we double our current guess instead of averaging it with
# max_.
for _ in range(max_eval):
value = fn(input_)
logging.debug("binary search vals: min = %f, input = %f, max = %f, "
"target = %f, value = %f", min_, input_, max_, target, value)
if np.abs(value - target) < tol:
return input_
if value > target:
max_ = input_
input_ = 0.5 * (input_ + min_)
elif value < target:
min_ = input_
if np.isinf(max_):
input_ = input_ * 2
else:
input_ = 0.5 * (input_ + max_)
raise ValueError(f"Maximum evaluations ({max_eval}) exceeded while searching "
"for solution. This is probably due the specified target "
"being unachievable for the given architecture. For example,"
" a Leaky-ReLU MLP of only a few layers may not be able "
"to achieve the default C(0) target of 0.9 under TAT. "
"Intuitively, this is because a shallow network cannot be "
"made sufficiently nonlinear with such activation functions."
" The solution to this would be to either use a smaller "
"value for the C(0) target (corresponding to a more linear "
"model), or to a use a deeper architecture.")
def _compute_local_c_slope_1_target(max_slope_func, target_value):
return _solve_increasing(max_slope_func, target_value, 1.1, 1., np.inf)
def _compute_local_c_slope_0_target(max_slope_func, target_value):
return _solve_increasing(max_slope_func, target_value, 0.99, 0., 1.0)
def _compute_local_c_curv_target(max_curv_func, target_value):
return _solve_increasing(max_curv_func, target_value, 0.1, 0., np.inf)
def _compute_negative_slope_param(max_lrelu_c0, target_value):
return _solve_increasing(
lambda a: 1.0 - max_lrelu_c0(a), 1.0 - target_value, 0.5, 0., 1.0)
def _verify_params_dict_and_set_defaults(params_dict, defaults):
"""Verify keys in parameter dict and set any missing ones to the defaults."""
bad_keys = set(params_dict.keys()).difference(set(defaults.keys()))
if bad_keys:
raise ValueError(
f"Parameter dictionary had unrecognized keys: '{bad_keys}'")
for key in defaults.keys():
if key not in params_dict:
params_dict[key] = defaults[key]
def _get_activations_params(
activation_names, method="DKS", dks_params=None, tat_params=None,
max_slope_func=None, max_curv_func=None, subnet_max_func=None):
"""Get dict of optimized parameters for given named activation functions."""
if not isinstance(activation_names, (list, tuple)):
raise ValueError("activation_names argument must be a list or tuple of "
"strings.")
# Note that using dictionaries as defaults in the function def is bad, hence
# we do this instead:
if dks_params is None:
dks_params = {}
if tat_params is None:
tat_params = {}
_verify_params_dict_and_set_defaults(
dks_params, {"c_slope_1_target": 1.5, "local_q_slope_target": 1.0})
_verify_params_dict_and_set_defaults(
tat_params, {"c_val_0_target": 0.9, "c_curve_target": 0.3})
local_q_slope_target = None
local_c_val_0_target = None
local_c_slope_1_target = None
local_c_slope_0_target = None
local_c_curv_target = None
reject_condition = None
if method == "DKS":
if "relu" in activation_names:
logging.warning("The use of ReLUs with DKS is *highly* discouraged. You "
"are advised to use Leaky ReLUs instead.")
c_slope_1_target = dks_params["c_slope_1_target"]
if c_slope_1_target <= 1.0:
raise ValueError("Invalid value for DKS 'c_slope_1_target' parameter. "
"Must be a float greater than 1.0.")
if max_slope_func is None:
if subnet_max_func is None:
raise ValueError("Must supply 'subnet_max_func' if using DKS and not "
"passing in 'max_slope_func'.")
# We can compute the maximal slope function by replacing composition
# with multiplication in the maximal c value function.
max_slope_func = lambda x: subnet_max_func(1.0, lambda y: x * y)
# Three of the four conditions used by DKS. The remaining condition Q(1) = 1
# is implied.
local_q_slope_target = dks_params["local_q_slope_target"]
local_c_val_0_target = 0.0
# We set the local slope to achieve C'_f(1) <= target over all subnetworks
# f:
local_c_slope_1_target = _compute_local_c_slope_1_target(
max_slope_func, c_slope_1_target)
logging.info("Found 'local_c_slope_1_target': %s", local_c_slope_1_target)
elif method == "TAT" and "leaky_relu" not in activation_names:
if "relu" in activation_names:
raise ValueError("Standard ReLU is not supported with TAT. Use leaky "
"ReLU instead.")
c_curve_target = tat_params["c_curve_target"]
if c_curve_target <= 0.0:
raise ValueError("Invalid value for TAT 'c_curve_target' parameter. Must "
"be a float greater than 0.0.")
if max_curv_func is None:
if subnet_max_func is None:
raise ValueError("Must supply 'subnet_max_func' if using TAT with "
"smooth activations and not passing in "
"'max_curv_func'.")
# We can compute the maximal curvature function by replacing composition
# with addition in the maximal c value function.
max_curv_func = lambda x: subnet_max_func(0.0, lambda y: x + y)
# Three of the four conditions used by TAT in the smooth case. The remaining
# condition Q(1) = 1 is implied.
local_q_slope_target = 1.0
local_c_slope_1_target = 1.0
# We set the local second derivative to achieve C''_f(1) <= target over all
# subnetworks f:
local_c_curv_target = _compute_local_c_curv_target(
max_curv_func, c_curve_target)
logging.info("Found 'local_c_curv_target': %s", local_c_curv_target)
# This is a hacky fix used to avoid certain 'bad' solutions we observed that
# seem to have unstable Q maps and higher kernel approximation errors. It
# should probably be replaced with something more principled.
reject_condition = lambda name, params: ( # pylint: disable=g-long-lambda
params["input_scale"] * params["output_scale"] >= 2.0)
elif method == "TAT" and "leaky_relu" in activation_names:
if len(activation_names) > 1:
raise ValueError("When using Leaky ReLU with TAT it must be the only "
"activation function.")
c_val_0_target = tat_params["c_val_0_target"]
if c_val_0_target > 1.0 or c_val_0_target < 0.0:
raise ValueError("Invalid value for TAT 'c_val_0_target' parameter. Must "
"be a float between 0.0 and 1.0.")
if subnet_max_func is None:
raise ValueError("Must supply 'subnet_max_func' if using TAT with Leaky "
"ReLU activation functions.")
max_lrelu_c0 = lambda neg_slope: subnet_max_func( # pylint: disable=g-long-lambda
0.0, lambda c: _leaky_relu_cmap(c, neg_slope))
# We set the negative slope parameter to achieve C_f(0) <= target over all
# subnetworks f:
negative_slope = _compute_negative_slope_param(
max_lrelu_c0, c_val_0_target)
# This is the value required to achieve Q(1) = 1 for Leaky ReLUs. See the
# TAT paper for details.
output_scale = np.sqrt(2.0 / (1.0 + negative_slope**2))
logging.info("Found parameters for 'leaky_relu': negative_slope = %s, "
"output_scale = %s.", negative_slope, output_scale)
return {"leaky_relu": {"output_scale": output_scale,
"negative_slope": negative_slope}}
else:
raise ValueError(f"Unrecognized value for argument 'method': {method}")
params = {}
for name in activation_names:
params[name] = _solve_for_activation_params(
name,
local_q_slope_target=local_q_slope_target,
local_c_val_0_target=local_c_val_0_target,
local_c_slope_1_target=local_c_slope_1_target,
local_c_slope_0_target=local_c_slope_0_target,
local_c_curv_target=local_c_curv_target,
reject_condition=reject_condition)
return params
def get_transformed_activations(
activation_names, method="DKS", dks_params=None, tat_params=None,
max_slope_func=None, max_curv_func=None, subnet_max_func=None,
activation_getter=_get_numpy_activation_function,
):
"""Gets transformed activation functions using the DKS or TAT method.
See the DKS paper (https://arxiv.org/abs/2110.01765) and the TAT paper
(https://openreview.net/forum?id=U0k7XNTiFEq) for details about what these
are, how they are computed, and what their parameters mean. A procedure to
compute the "maximal slope function" is given in the section titled "Summary
of our method" of the DKS paper. Procedures to compute the "maximal curvature
function", and the "subnet maximizing function", are given in the appendix
section titled "Additional details and pseudocode for activation function
transformations" of the TAT paper.
Note that if you are using the JAX, PyTorch, or TensorFlow frameworks, you
probably want to be using the version of get_transformed_activations() in the
corresponding subpackage. (These are basically thin wrappers around this
function that pass a framework-specific value to the ``activation_getter``
argument.)
Args:
activation_names: An iterable of string names for the activation functions.
Supported names are the intersection of those supported by
dks.base.activation_getter.get_activation_function, and those supported
by the getter passed to the ``activation_getter`` argument (which defaults
to dks.base.activation_getter.get_activation_function). The built-in
getters in this package (for each framework) currently support the
following names: "tanh", "softplus", "leaky_relu", "relu" (not
recommended; use "leaky_relu" instead), "selu", "elu", "swish", "sigmoid",
"erf", "bentid", "atan", "asinh", "square", "softsign", "gelu", and
"gelu_exact".
method: A string representing the method used to transform the activation
functions. Can be "DKS", "TAT", or "untransformed". The latter choice
will return activation functions without any transformations.
(Default: "DKS")
dks_params: A dictionary containing the parameters to use for DKS. Keys
should be a subset of {"c_slope_1_target", "local_q_slope_target"}.
"c_slope_1_target" gives the target maximal slope value for the network
(corresponding to "zeta" from the paper), and defaults to 1.5.
"local_q_slope_target" gives the target value for the local Q map slope
of each nonlinear layer (which is kept at 1.0 in the paper -- except in
ablation tests), and defaults to 1.0. If ``dks_params`` is passed as None,
it defaults to the empty dictionary (so that the parameters will use their
default values). (Default: None)
tat_params: A dictionary containing the parameters to use for TAT. Keys
should be a subset of {"c_val_0_target", "c_curve_target"}.
"c_val_0_target" gives the maximum value of ``C_f(0)`` over subnetworks f,
which is used when transforming Leaky ReLUs (and corresponds to "eta" from
the paper), and defaults to 0.9. "c_curve_target" gives the maximum value
of ``C''_f(1)``, which is used for all other activation functions (and
corresponds to "tau" from the paper), and defaults to 0.3. If
``tat_params`` is passed as None, it defaults to the empty dictionary (so
that the parameters will use their default values). (Default: None)
max_slope_func: A callable which computes the "maximal slope function" of
the network, as defined in the DKS paper. It should take a single argument
representing the slope of each local C map at ``c=1``. If this is required
(i.e. when using DKS) but passed as None, it will be generated using
``subnet_max_func`` when possible. (Default: None)
max_curv_func: A callable which computes the "maximal curvature function" of
the network, as defined in the TAT paper. It should take a single
parameter representing the second derivative of each local C map at c=1.
If this is required (i.e. when using TAT with smooth activation functions)
but is passed as None, it will be generated using ``subnet_max_func`` when
possible. (Default: None)
subnet_max_func: A callable which computes the "subnetwork maximizing
function" of the network, as defined in the TAT paper (and denoted
``M_{f,r}(x)``). It should take two arguments: the input value ``x``, and
a callable ``r_fn`` which maps a float to a float. This is required when
using TAT with Leaky ReLUs. (Default: None)
activation_getter: A callable which takes a string name for an activation
function and returns the (untransformed) activation function corresponding
to this name. Defaults to one returning activation functions in NumPy
(with autograd). Returned transformed activation functions will be based
on the output of this callable. Other tensor frameworks can be supported
by changing this argument. See the versions of
get_transformed_activations() in the ``dks.jax``, ``dks.pytorch``, and
``dks.tensorflow`` subpackages.
Returns:
A dictionary mapping the activation function names to their corresponding
transformed activation functions.
"""
if method == "untransformed":
return {name: activation_getter(name) for name in activation_names}
params = _get_activations_params(
activation_names, method=method, dks_params=dks_params,
tat_params=tat_params, max_slope_func=max_slope_func,
max_curv_func=max_curv_func, subnet_max_func=subnet_max_func)
transformed_acts = {}
for name in activation_names:
transformed_acts[name] = _transform_activation(activation_getter(name),
params[name])
return transformed_acts
| dks-main | dks/base/activation_transform.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the activation function transform module for each framework."""
from absl.testing import absltest
from dks.base import activation_transform as activation_transform_numpy
from dks.jax import activation_transform as activation_transform_jax
from dks.pytorch import activation_transform as activation_transform_pytorch
from dks.tensorflow import activation_transform as activation_transform_tf
import jax.numpy as jnp
import numpy as np
from pkg_resources import parse_version
import tensorflow as tf
import torch
import tree
def _assert_structure_approx_equal(s1, s2):
tree.map_structure(np.testing.assert_almost_equal, s1, s2)
def _subnet_max_func(x, r_fn, shortcut_weight=0.6):
"""The subnetwork maximizing function of the modified ResNet model."""
blocks_per_group = (3, 4, 23, 3)
res_branch_subnetwork_x = r_fn(r_fn(r_fn(x)))
for i in range(4):
for j in range(blocks_per_group[i]):
res_x = r_fn(r_fn(r_fn(x)))
shortcut_x = r_fn(x) if (j == 0) else x
x = (shortcut_weight**2 * shortcut_x + (1.0 - shortcut_weight**2) * res_x)
x = r_fn(x)
return max(x, res_branch_subnetwork_x)
class ActivationTransformTest(absltest.TestCase):
"""Test class for this module."""
def test_parameter_computation(self):
"""Test that the correct transformation parameters are found."""
def check(activation_names, method, expected_params_dict):
params_dict = activation_transform_numpy._get_activations_params( # pylint: disable=protected-access
activation_names=activation_names,
method=method,
dks_params={"c_slope_1_target": 1.2},
tat_params={"c_val_0_target": 0.75, "c_curve_target": 0.2},
subnet_max_func=_subnet_max_func,
)
_assert_structure_approx_equal(params_dict, expected_params_dict)
check(("softplus",), "DKS",
{"softplus": {"input_scale": 0.18761008168267976,
"input_shift": 0.40688063442262007,
"output_shift": -0.9213466151411376,
"output_scale": 8.878672543665223}})
check(("tanh", "softplus"), "DKS",
{"tanh": {"input_scale": 0.07461073057540868,
"input_shift": 0.5566915199964182,
"output_shift": -0.5034378768692127,
"output_scale": 18.002635964442558},
"softplus": {"input_scale": 0.18761008168267976,
"input_shift": 0.40688063442262007,
"output_shift": -0.9213466151411376,
"output_scale": 8.878672543665223}})
check(("relu",), "DKS",
{"relu": {"input_shift": 1.0,
"input_scale": 0.3685360046708044,
"output_shift": -1.000373858553784,
"output_scale": 2.721729988761473}})
check(("leaky_relu",), "DKS",
{"leaky_relu": {"negative_slope": 0.8761257073473065,
"input_shift": -6.372924855154692e-13,
"output_shift": -0.049418692997275034,
"output_scale": 1.0651832705682147}})
check(("softplus",), "TAT",
{"softplus": {"input_scale": 0.15011489794748328,
"input_shift": 0.5374599901127068,
"output_shift": -0.996481014465811,
"output_scale": 10.54880187880574}})
check(("tanh", "softplus"), "TAT",
{"tanh": {"input_scale": 0.0580205218578313,
"input_shift": 0.5218639804099805,
"output_shift": -0.4796430704354356,
"output_scale": 22.36066261763117},
"softplus": {"input_scale": 0.15011489794748328,
"input_shift": 0.5374599901127068,
"output_shift": -0.996481014465811,
"output_scale": 10.54880187880574}})
check(("leaky_relu",), "TAT",
{"leaky_relu": {"output_scale": 1.196996549778802,
"negative_slope": 0.6291800290346146}})
check(("gelu", "gelu_exact"), "DKS",
{"gelu": {"input_scale": 0.07036271496383567,
"input_shift": 0.2586837248593587,
"output_shift": -0.15758328114374964,
"output_scale": 20.249573305194307},
"gelu_exact": {"input_scale": 0.07043253126531114,
"input_shift": 0.259489297927707,
"output_shift": -0.15815378961903598,
"output_scale": 20.21158902315857}})
def _run_value_tests(self, module, to_framework, to_numpy, places):
"""Test that transformed activation functions compute the correct values."""
val1 = to_framework(0.6)
val2 = to_framework(-0.6)
def check(activation_names, method, expected_values_dict):
act_dict = module.get_transformed_activations(
activation_names=activation_names,
method=method,
dks_params={"c_slope_1_target": 12.0, "local_q_slope_target": 0.97},
tat_params={"c_val_0_target": 0.95, "c_curve_target": 4.5},
subnet_max_func=_subnet_max_func
)
for name in expected_values_dict:
self.assertAlmostEqual(
to_numpy(act_dict[name](val1)), expected_values_dict[name][0],
places=places)
self.assertAlmostEqual(
to_numpy(act_dict[name](val2)), expected_values_dict[name][1],
places=places)
check(("softplus",), "DKS",
{"softplus": [0.5205088334781294, -0.6970897398761904]})
check(("tanh", "softplus"), "DKS",
{"tanh": [0.6910746968773931, -0.5122335409369118],
"softplus": [0.5205088334781294, -0.6970897398761904]})
check(("relu",), "DKS",
{"relu": [0.6053339463256114, -0.6486764456443863]})
check(("leaky_relu",), "DKS",
{"leaky_relu": [0.5628988987673328, -0.6649764416535503]})
check(("softplus",), "TAT",
{"softplus": [0.6923574763139374, -0.4935166367766701]})
check(("tanh", "softplus"), "TAT",
{"tanh": [0.6860178437500071, -0.49030275427738146],
"softplus": [0.6923574763139374, -0.4935166367766701]})
check(("leaky_relu",), "TAT",
{"leaky_relu": [0.7954047194402861, -0.2955187511683813]})
check(("tanh", "softplus", "relu", "leaky_relu"), "untransformed",
{"tanh": [0.5370495669980353, -0.5370495669980353],
"softplus": [1.0374879504858856, 0.4374879504858857],
"relu": [0.6, 0.0],
"leaky_relu": [0.6, -0.006]})
# GELU support was added in PyTorch 1.2.0
if (to_framework != torch.tensor
or parse_version(torch.__version__) >= parse_version("1.2.0")):
check(("gelu", "gelu_exact"), "TAT",
{"gelu": [0.6922557817568101, -0.491992384704867],
"gelu_exact": [0.6922429517529013, -0.49198103131465193]})
def test_transformed_activation_values_numpy(self):
self._run_value_tests(
activation_transform_numpy, np.asarray, lambda x: x, 8)
def test_transformed_activation_values_jax(self):
self._run_value_tests(
activation_transform_jax, jnp.asarray, np.asarray, 5)
def test_transformed_activation_values_pytorch(self):
self._run_value_tests(
activation_transform_pytorch, torch.tensor,
lambda x: x.detach().cpu().numpy(), 5)
def test_transformed_activation_values_tensorflow(self):
self._run_value_tests(
activation_transform_tf, tf.convert_to_tensor, lambda x: x.numpy(), 5)
if __name__ == "__main__":
absltest.main()
| dks-main | tests/test_activation_transform.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the parameter sampling functions for each framework."""
from absl.testing import absltest
from absl.testing import parameterized
from dks.jax import parameter_sampling_functions as parameter_sampling_functions_jax
from dks.pytorch import parameter_sampling_functions as parameter_sampling_functions_pytorch
from dks.tensorflow import parameter_sampling_functions as parameter_sampling_functions_tf
import jax
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
import torch
tf.compat.v1.enable_eager_execution() # enable eager in case we are using TF1
class ParameterSamplingFunctionTest(parameterized.TestCase):
"""Perform some basic sanity checks for the parameter sampling functions.
For each tensor programming framework, we test whether multiplication of the
sampled parameters by random vectors behaves as expected regarding
(approximate) empirical q value preservation. Note that only when the input
dimension is smaller than the output dimension will empirical q values be
exactly preserved (up to numerical precision).
"""
@parameterized.parameters((5, 7, 9, 3, 5), (3, 5, 9, 9, 5), (8, 8, 5, 7, 5),
(3072, 1024, 5, 3, 1))
def test_parameter_sampling_functions_jax(
self, in_channels, out_channels, dim1, dim2, places):
w = parameter_sampling_functions_jax.scaled_uniform_orthogonal(
jnp.array([1, 2], dtype=jnp.uint32), [in_channels, out_channels])
x = jax.random.normal(jnp.array([2, 3], dtype=jnp.uint32), [in_channels, 1])
expected_rq = np.sqrt(out_channels/in_channels)
self.assertAlmostEqual(
jnp.linalg.norm(jnp.matmul(w.T, x)) / jnp.linalg.norm(x),
expected_rq, places=places)
w = parameter_sampling_functions_jax.scaled_uniform_orthogonal(
jnp.array([3, 4], dtype=jnp.uint32),
(dim1, dim2, in_channels, out_channels))
self.assertAlmostEqual(
(jnp.linalg.norm(
jnp.matmul(w[(dim1-1) // 2, (dim2-1) // 2, :, :].T, x))
/ jnp.linalg.norm(x)),
expected_rq, places=places)
self.assertAlmostEqual(jnp.linalg.norm(
jnp.matmul(w[(dim1-1) // 2 + 1, (dim2-1) // 2, :, :].T, x)),
0.0, places=places)
self.assertAlmostEqual(jnp.linalg.norm(
jnp.matmul(w[(dim1-1) // 2, (dim2-1) // 2 + 1, :, :].T, x)),
0.0, places=places)
self.assertAlmostEqual(jnp.linalg.norm(
jnp.matmul(w[(dim1-1) // 2 + 1, (dim2-1) // 2 + 1, :, :].T, x)),
0.0, places=places)
if in_channels <= 200:
out_channels *= dim1 * dim2
w = parameter_sampling_functions_jax.scaled_uniform_orthogonal(
jnp.array([4, 5], dtype=jnp.uint32),
(dim1, dim2, in_channels, out_channels), delta=False)
x = jax.random.normal(jnp.array([5, 6], dtype=jnp.uint32),
[dim1 * dim2 * in_channels, 1])
self.assertAlmostEqual(
(jnp.linalg.norm(jnp.matmul(jnp.reshape(
w, [dim1 * dim2 * in_channels, out_channels]).T, x)
) / jnp.linalg.norm(x)),
np.sqrt(out_channels / (dim1 * dim2 * in_channels)), places=places)
@parameterized.parameters((5, 7, 9, 3, 5), (3, 5, 9, 9, 5), (8, 8, 5, 7, 5),
(3072, 1024, 5, 3, 1))
def test_parameter_sampling_functions_tensorflow(
self, in_channels, out_channels, dim1, dim2, places):
w = parameter_sampling_functions_tf.stateless_scaled_uniform_orthogonal(
[in_channels, out_channels], [1, 2])
x = tf.random.stateless_normal([in_channels, 1], [2, 3])
expected_rq = np.sqrt(out_channels/in_channels)
self.assertAlmostEqual(
(tf.norm(tf.matmul(w, x, transpose_a=True)) / tf.norm(x)).numpy(),
expected_rq, places=places)
w = parameter_sampling_functions_tf.stateless_scaled_uniform_orthogonal(
(dim1, dim2, in_channels, out_channels),
[3, 4])
self.assertAlmostEqual(
(tf.norm(
tf.matmul(w[(dim1-1) // 2, (dim2-1) // 2, :, :], x,
transpose_a=True))
/ tf.norm(x)).numpy(),
expected_rq, places=places)
self.assertAlmostEqual(
tf.norm(tf.matmul(w[(dim1-1) // 2 + 1, (dim2-1) // 2, :, :], x,
transpose_a=True)).numpy(),
0.0, places=places)
self.assertAlmostEqual(
tf.norm(tf.matmul(w[(dim1-1) // 2, (dim2-1) // 2 + 1, :, :], x,
transpose_a=True)).numpy(),
0.0, places=places)
self.assertAlmostEqual(
tf.norm(tf.matmul(w[(dim1-1) // 2 + 1, (dim2-1) // 2 + 1, :, :], x,
transpose_a=True)).numpy(),
0.0, places=places)
if in_channels <= 200:
out_channels *= dim1 * dim2
w = parameter_sampling_functions_tf.stateless_scaled_uniform_orthogonal(
(dim1, dim2, in_channels, out_channels), [4, 5], delta=False)
x = tf.random.stateless_normal([dim1 * dim2 * in_channels, 1], [5, 6])
self.assertAlmostEqual(
(tf.norm(tf.matmul(tf.reshape(
w, [dim1 * dim2 * in_channels, out_channels]), x,
transpose_a=True))
/ tf.norm(x)).numpy(),
np.sqrt(out_channels / (dim1 * dim2 * in_channels)), places=places)
@parameterized.parameters((5, 7, 9, 3, 5), (3, 5, 9, 9, 5), (8, 8, 5, 7, 5),
(3072, 1024, 5, 3, 1))
def test_parameter_sampling_functions_pytorch(
self, in_channels, out_channels, dim1, dim2, places):
torch.manual_seed(123)
def to_np(z):
return z.detach().cpu().numpy()
w = parameter_sampling_functions_pytorch.scaled_uniform_orthogonal_(
torch.empty(in_channels, out_channels))
x = torch.randn(in_channels, 1)
expected_rq = np.sqrt(out_channels/in_channels)
self.assertAlmostEqual(
to_np(torch.norm(torch.matmul(w.t(), x)) / torch.norm(x)),
expected_rq, places=places)
w = parameter_sampling_functions_pytorch.scaled_uniform_orthogonal_(
torch.empty(in_channels, out_channels, dim1, dim2))
self.assertAlmostEqual(
to_np(torch.norm(
torch.matmul(w[:, :, (dim1-1) // 2, (dim2-1) // 2].t(), x))
/ torch.norm(x)),
expected_rq, places=places)
self.assertAlmostEqual(to_np(torch.norm(
torch.matmul(w[:, :, (dim1-1) // 2 + 1, (dim2-1) // 2].t(), x))),
0.0, places=places)
self.assertAlmostEqual(to_np(torch.norm(
torch.matmul(w[:, :, (dim1-1) // 2, (dim2-1) // 2 + 1].t(), x))),
0.0, places=places)
self.assertAlmostEqual(to_np(torch.norm(
torch.matmul(w[:, :, (dim1-1) // 2 + 1, (dim2-1) // 2 + 1].t(), x))),
0.0, places=places)
if in_channels <= 200:
out_channels *= dim1 * dim2
w = parameter_sampling_functions_pytorch.scaled_uniform_orthogonal_(
torch.empty(in_channels, out_channels, dim1, dim2), delta=False)
x = torch.randn(dim1 * dim2 * in_channels, 1)
self.assertAlmostEqual(
to_np(torch.norm(torch.matmul(torch.reshape(
w.transpose(0, 1), [out_channels, dim1 * dim2 * in_channels]), x)
) / torch.norm(x)),
np.sqrt(out_channels / (dim1 * dim2 * in_channels)), places=places)
if __name__ == "__main__":
absltest.main()
| dks-main | tests/test_parameter_sampling_functions.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic tests for the Haiku modified ResNet example model."""
from absl.testing import absltest
from absl.testing import parameterized
from dks.examples.haiku.modified_resnet import ModifiedResNet
import haiku as hk
import jax
import jax.numpy as jnp
class ModifiedResNetTest(parameterized.TestCase):
@parameterized.parameters((50, 0.0, "tanh", False), (101, 0.5, "relu", True),
(152, 0.9, "leaky_relu", True))
def test_model_instantiation_and_apply(self, depth, shortcut_weight, act_name,
resnet_v2):
"""Tests that the model can be instantiated and applied on data."""
def func(batch, is_training):
model = ModifiedResNet(
num_classes=1000,
depth=depth,
resnet_v2=resnet_v2,
activation_name=act_name,
shortcut_weight=shortcut_weight,
)
return model(batch, is_training=is_training)
forward = hk.without_apply_rng(hk.transform_with_state(func))
rng = jax.random.PRNGKey(42)
image = jnp.ones([2, 224, 224, 3])
params, state = forward.init(rng, image, is_training=True)
logits, state = forward.apply(params, state, image, is_training=True)
self.assertEqual(logits.shape, (2, 1000))
if __name__ == "__main__":
absltest.main()
| dks-main | tests/test_haiku_modified_resnet.py |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the data preprocessing module for each framework."""
import functools
from absl.testing import absltest
from dks.jax import data_preprocessing as data_preprocessing_jax
from dks.pytorch import data_preprocessing as data_preprocessing_pytorch
from dks.tensorflow import data_preprocessing as data_preprocessing_tf
import jax.numpy as jnp
import numpy as np
import tensorflow as tf
import torch
class DataPreprocessingTest(absltest.TestCase):
"""Test class for this module."""
def _run_pln_test(self, module, to_framework, to_numpy, places):
"""Test PLN function outputs the correct values."""
def assert_almost_equal(x, y):
np.testing.assert_almost_equal(x, y, decimal=places)
def pln(x, **kwargs):
return to_numpy(module.per_location_normalization(
to_framework(x), **kwargs))
def check_is_normalized(x):
assert_almost_equal(np.mean(np.square(x), axis=-1), np.ones(x.shape[:-1]))
np.random.seed(123)
shape_list = {(2, 5, 7, 2), (5, 6, 1), (3, 4), (8,), (1, 1), (1, 10)}
for shape in shape_list:
for homog_mode in {"one", "off", "avg_q"}:
for homog_scale in {1.0, 3.2}:
for has_batch_dim in {True, False}:
if (homog_mode == "avg_q" and
((len(shape) <= 2 and has_batch_dim)
or (len(shape) <= 1 and not has_batch_dim))): # pylint: disable=g-explicit-length-test
with self.assertRaises(ValueError):
y = pln(np.random.normal(size=shape), homog_mode=homog_mode,
homog_scale=homog_scale, has_batch_dim=has_batch_dim)
else:
y = pln(np.random.normal(size=shape), homog_mode=homog_mode,
homog_scale=homog_scale, has_batch_dim=has_batch_dim)
check_is_normalized(y)
expected_y_shape = list(shape).copy()
if has_batch_dim and len(expected_y_shape) == 1:
expected_y_shape = expected_y_shape + [1]
if homog_mode != "off":
expected_y_shape[-1] = expected_y_shape[-1] + 1
assert y.shape == tuple(expected_y_shape)
y = pln(0.3 * np.ones(()), homog_mode="one", has_batch_dim=False)
assert_almost_equal(y, np.sqrt(2 / (1 + 0.3**2)) * np.array([0.3, 1.0]))
with self.assertRaises(ValueError):
pln(np.random.normal(size=()), homog_mode="avg_q", has_batch_dim=True)
y = pln(0.7 * np.ones((10, 6, 3)), homog_mode="off")
assert_almost_equal(y, np.ones((10, 6, 3)))
y = pln(0.7 * np.ones((10, 6, 3)), homog_mode="one", homog_scale=2.5)
assert_almost_equal(
y, np.sqrt(4 / (3 + (2.5 / 0.7) ** 2)) * np.concatenate(
[np.ones((10, 6, 3)), 2.5 / 0.7 * np.ones((10, 6, 1))], axis=-1))
y = pln(0.7 * np.ones((10, 6, 3)), homog_mode="avg_q")
assert_almost_equal(y, np.ones((10, 6, 4)))
y = pln(0.7 * np.ones((10, 6, 3)), homog_mode="avg_q", homog_scale=2.0)
assert_almost_equal(
y, np.sqrt(4 / 7) * np.concatenate(
[np.ones((10, 6, 3)), 2 * np.ones((10, 6, 1))], axis=-1))
def test_per_location_normalization_jax(self):
self._run_pln_test(
data_preprocessing_jax, jnp.asarray, np.asarray, 5)
def test_per_location_normalization_pytorch(self):
self._run_pln_test(
data_preprocessing_pytorch, torch.tensor,
lambda x: x.detach().cpu().numpy(), 5)
def test_per_location_normalization_tensorflow(self):
self._run_pln_test(
data_preprocessing_tf,
functools.partial(tf.convert_to_tensor, dtype=tf.float32),
lambda x: x.numpy(), 5)
if __name__ == "__main__":
absltest.main()
| dks-main | tests/test_data_preprocessing.py |
# Copyright 2021 The s6 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""S6 is a just-in-time compiler and profiler for Python."""
from s6.python.api import CompilationFailedError
from s6.python.api import inspect
from s6.python.api import jit
from s6.python.api import NotCompiledError
from s6.python.api import S6CodeDetail
from s6.python.api import S6JitCallable
__all__ = [
"CompilationFailedError",
"NotCompiledError",
"S6CodeDetail",
"S6JitCallable",
"inspect",
"jit",
]
| s6-main | src/__init__.py |
# Copyright 2021 The s6 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint:skip-file
# ----------------------------------
# Options affecting listfile parsing
# ----------------------------------
with section('parse'):
# Specify structure for custom cmake functions
additional_commands = {
's6_cc_library': {
'kwargs': {
'NAME': '*',
'SRCS': '*',
'HDRS': '*',
'DEPS': '*',
'INCLUDE_DIRS': '*',
'LINK_OPTS': '*',
}
},
's6_cc_binary': {
'kwargs': {
'NAME': '*',
'SRCS': '*',
'DEPS': '*',
'INCLUDE_DIRS': '*',
'LINK_OPTS': '*',
}
},
's6_cc_test': {
'kwargs': {
'NAME': '*',
'SRCS': '*',
'DEPS': '*',
'INCLUDE_DIRS': '*',
'LINK_OPTS': '*',
}
},
's6_cc_proto_library': {
'kwargs': {
'NAME': '*',
'SRCS': '*',
}
},
's6_pybind_extension': {
'kwargs': {
'NAME': '*',
'OUTPUT_NAME': '*',
'SRCS': '*',
'DEPS': '*',
'INCLUDE_DIRS': '*',
'LINK_OPTS': '*',
}
},
's6_py_test': {
'kwargs': {
'NAME': '*',
'SRCS': '*',
}
},
}
# Override configurations per-command where available
override_spec = {}
# Specify variable tags.
vartags = []
# Specify property tags.
proptags = []
# -----------------------------
# Options affecting formatting.
# -----------------------------
with section('format'):
# Disable formatting entirely, making cmake-format a no-op
disable = False
# How wide to allow formatted cmake files
line_width = 80
# How many spaces to tab for indent
tab_size = 2
# If true, lines are indented using tab characters (utf-8 0x09) instead of
# <tab_size> space characters (utf-8 0x20). In cases where the layout would
# require a fractional tab character, the behavior of the fractional
# indentation is governed by <fractional_tab_policy>
use_tabchars = False
# If <use_tabchars> is True, then the value of this variable indicates how
# fractional indentions are handled during whitespace replacement. If set to
# 'use-space', fractional indentation is left as spaces (utf-8 0x20). If set
# to `round-up` fractional indentation is replaced with a single tab character
# (utf-8 0x09) effectively shifting the column to the next tabstop
fractional_tab_policy = 'use-space'
# If an argument group contains more than this many sub-groups (parg or kwarg
# groups) then force it to a vertical layout.
max_subgroups_hwrap = 2
# If a positional argument group contains more than this many arguments, then
# force it to a vertical layout.
max_pargs_hwrap = 3
# If a cmdline positional group consumes more than this many lines without
# nesting, then invalidate the layout (and nest)
max_rows_cmdline = 2
# If true, separate flow control names from their parentheses with a space
separate_ctrl_name_with_space = False
# If true, separate function names from parentheses with a space
separate_fn_name_with_space = False
# If a statement is wrapped to more than one line, than dangle the closing
# parenthesis on its own line.
dangle_parens = False
# If the trailing parenthesis must be 'dangled' on its on line, then align it
# to this reference: `prefix`: the start of the statement, `prefix-indent`:
# the start of the statement, plus one indentation level, `child`: align to
# the column of the arguments
dangle_align = 'prefix'
# If the statement spelling length (including space and parenthesis) is
# smaller than this amount, then force reject nested layouts.
min_prefix_chars = 4
# If the statement spelling length (including space and parenthesis) is larger
# than the tab width by more than this amount, then force reject un-nested
# layouts.
max_prefix_chars = 10
# If a candidate layout is wrapped horizontally but it exceeds this many
# lines, then reject the layout.
max_lines_hwrap = 2
# What style line endings to use in the output.
line_ending = 'unix'
# Format command names consistently as 'lower' or 'upper' case
command_case = 'lower'
# Format keywords consistently as 'lower' or 'upper' case
keyword_case = 'upper'
# A list of command names which should always be wrapped
always_wrap = []
# If true, the argument lists which are known to be sortable will be sorted
# lexicographicall
enable_sort = True
# If true, the parsers may infer whether or not an argument list is sortable
# (without annotation).
autosort = False
# By default, if cmake-format cannot successfully fit everything into the
# desired linewidth it will apply the last, most aggressive attempt that it
# made. If this flag is True, however, cmake-format will print error, exit
# with non-zero status code, and write-out nothing
require_valid_layout = False
# A dictionary mapping layout nodes to a list of wrap decisions. See the
# documentation for more information.
layout_passes = {}
# ------------------------------------------------
# Options affecting comment reflow and formatting.
# ------------------------------------------------
with section('markup'):
# What character to use for bulleted lists
bullet_char = '*'
# What character to use as punctuation after numerals in an enumerated list
enum_char = '.'
# If comment markup is enabled, don't reflow the first comment block in each
# listfile. Use this to preserve formatting of your copyright/license
# statements.
first_comment_is_literal = False
# If comment markup is enabled, don't reflow any comment block which matches
# this (regex) pattern. Default is `None` (disabled).
literal_comment_pattern = None
# Regular expression to match preformat fences in comments default=
# ``r'^\s*([`~]{3}[`~]*)(.*)$'``
fence_pattern = '^\\s*([`~]{3}[`~]*)(.*)$'
# Regular expression to match rulers in comments default=
# ``r'^\s*[^\w\s]{3}.*[^\w\s]{3}$'``
ruler_pattern = '^\\s*[^\\w\\s]{3}.*[^\\w\\s]{3}$'
# If a comment line matches starts with this pattern then it is explicitly a
# trailing comment for the preceding argument. Default is '#<'
explicit_trailing_pattern = '#<'
# If a comment line starts with at least this many consecutive hash
# characters, then don't lstrip() them off. This allows for lazy hash rulers
# where the first hash char is not separated by space
hashruler_min_length = 10
# If true, then insert a space between the first hash char and remaining hash
# chars in a hash ruler, and normalize its length to fill the column
canonicalize_hashrulers = True
# enable comment markup parsing and reflow
enable_markup = True
# ----------------------------
# Options affecting the linter
# ----------------------------
with section('lint'):
# a list of lint codes to disable
disabled_codes = []
# regular expression pattern describing valid function names
function_pattern = '[0-9a-z_]+'
# regular expression pattern describing valid macro names
macro_pattern = '[0-9A-Z_]+'
# regular expression pattern describing valid names for variables with global
# (cache) scope
global_var_pattern = '[A-Z][0-9A-Z_]+'
# regular expression pattern describing valid names for variables with global
# scope (but internal semantic)
internal_var_pattern = '_[A-Z][0-9A-Z_]+'
# regular expression pattern describing valid names for variables with local
# scope
local_var_pattern = '[a-z][a-z0-9_]+'
# regular expression pattern describing valid names for privatedirectory
# variables
private_var_pattern = '_[0-9a-z_]+'
# regular expression pattern describing valid names for public directory
# variables
public_var_pattern = '[A-Z][0-9A-Z_]+'
# regular expression pattern describing valid names for function/macro
# arguments and loop variables.
argument_var_pattern = '[a-z][a-z0-9_]+'
# regular expression pattern describing valid names for keywords used in
# functions or macros
keyword_pattern = '[A-Z][0-9A-Z_]+'
# In the heuristic for C0201, how many conditionals to match within a loop in
# before considering the loop a parser.
max_conditionals_custom_parser = 2
# Require at least this many newlines between statements
min_statement_spacing = 1
# Require no more than this many newlines between statements
max_statement_spacing = 2
max_returns = 6
max_branches = 12
max_arguments = 5
max_localvars = 15
max_statements = 50
# -------------------------------
# Options affecting file encoding
# -------------------------------
with section('encode'):
# If true, emit the unicode byte-order mark (BOM) at the start of the file
emit_byteorder_mark = False
# Specify the encoding of the input file. Defaults to utf-8
input_encoding = 'utf-8'
# Specify the encoding of the output file. Defaults to utf-8. Note that cmake
# only claims to support utf-8 so be careful when using anything else
output_encoding = 'utf-8'
# -------------------------------------
# Miscellaneous configurations options.
# -------------------------------------
with section('misc'):
# A dictionary containing any per-command configuration overrides. Currently
# only `command_case` is supported.
per_command = {}
| s6-main | src/.cmake-format.py |
# Copyright 2021 The s6 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hello, S6!"""
print(__doc__)
| s6-main | src/python/hello_world.py |
# Copyright 2021 The s6 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for s6.python.type_feedback."""
import sys
from absl.testing import absltest
from s6.classes.python import classes
from s6.python import type_feedback
classes.adopt_existing_types()
classes.adopt_new_types()
def _type_feedback(f):
return type_feedback.extract_from_code_object(f.__code__)
def _only_nonempty_type_feedback(f):
return list(filter(lambda x: x != "empty", _type_feedback(f)))[0]
@absltest.skipIf(sys.version_info.minor >= 7, "CALL_METHOD not supported yet")
class TypeFeedbackTest(absltest.TestCase):
def test_binary_add_custom_class(self):
def f(x):
return x + x
class C(object):
def __add__(self, other):
pass
self.assertNotEqual(classes.classid(C()), 0)
f(C())
f(C())
self.assertStartsWith(_only_nonempty_type_feedback(f), "monomorphic, C#")
def test_binary_add_longs(self):
def f(x):
return x + x
# Run f with only longs.
for i in range(5):
f(i)
self.assertStartsWith(_only_nonempty_type_feedback(f), "monomorphic, int#")
def test_binary_mul_multiple_types_is_polymorphic(self):
def f(x, y):
return x * y
for i in range(5):
f(i, float(i))
self.assertRegex(
_only_nonempty_type_feedback(f),
r"polymorphic, either float#\d+ or int#")
def test_many_types_is_megamorphic(self):
def f(x):
return x * x
for _ in range(5):
# This is a new Class every time through the loop.
class C(object):
def __mul__(self, other):
pass
f(C())
self.assertEqual(_only_nonempty_type_feedback(f), "megamorphic")
def test_getattr(self):
def f(x):
return x.__len__
f("abc")
self.assertStartsWith(_only_nonempty_type_feedback(f), "monomorphic, str#")
def test_setattr(self):
def f(x):
x.y = 2
class C(object):
pass
class D(object):
pass
f(D())
f(C())
self.assertRegex(
_only_nonempty_type_feedback(f), r"polymorphic, either D#\d+ or C#")
if __name__ == "__main__":
absltest.main()
| s6-main | src/python/type_feedback_test.py |
# Copyright 2021 The s6 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for correctness of s6 JIT."""
from absl import flags
from absl.testing import absltest
from s6.python import api as s6
from pybind11_abseil import status
FLAGS = flags.FLAGS
class FailedToCompileError(RuntimeError):
pass
def s6compile(f):
fjit = s6.jit(f)
try:
s6.inspect(f).force_compile()
except status.StatusNotOk as e:
raise FailedToCompileError(f'{e}') from None
return fjit
def expect_compile_failure(f):
"""Raises an AssertionError if f(..) does not raise FailedToCompileError."""
def wrapper(*args, **kwargs):
try:
f(*args, **kwargs)
except FailedToCompileError as e:
raise absltest.SkipTest(f'{e}')
else:
raise AssertionError(f'Expected {f} to fail with compile error')
return wrapper
class JitTest(absltest.TestCase):
def compile_and_make_checker(self, f):
fc = s6compile(f)
def assert_output_matches(*a):
with self.subTest(args=a):
self.assertEqual(f(*a), fc(*a))
return assert_output_matches
def test_add(self):
def f(a, b):
return a + b
assert_output_matches = self.compile_and_make_checker(f)
assert_output_matches(3, 4)
assert_output_matches(5, 6)
def test_finally_return_simple(self):
def f(i):
try:
if i & 1:
return 1
finally:
if i & 2:
return 2
return 3
assert_output_matches = self.compile_and_make_checker(f)
for i in range(4):
assert_output_matches(i)
def test_finally_return(self):
def f(i):
try:
if i & 1:
return 1
try:
if i & 2:
return 4
finally:
if i & 4:
return 5
finally:
if i & 8:
return 2
try:
if i & 16:
return 6
finally:
if i & 32:
return 7
return 3
assert_output_matches = self.compile_and_make_checker(f)
for i in range(64):
assert_output_matches(i)
def test_finally_falltrough(self):
def f():
a = 4
try:
a += 6
finally:
a += 5
return a
assert_output_matches = self.compile_and_make_checker(f)
assert_output_matches()
def test_finally_loop(self):
def f(i):
try:
x = 0
for _ in range(5):
x += 1
try:
x += 10
if i & 2:
break
x += 100
if i & 4:
continue
x += 1000
if i & 8:
return x
finally:
x += 10**4
if i & 16:
break
x += 10**5
if i & 32:
return x
x += 10**6
return x
finally:
if i & 1:
return -1
assert_output_matches = self.compile_and_make_checker(f)
for i in range(64):
assert_output_matches(i)
def test_except(self):
def raiseif(b, a):
if b:
raise RuntimeError('raiseif')
return a
def f(i):
a = 1
try:
a += 2
a = raiseif(i & 1, a + 4)
a += 8
a = raiseif(i & 2, a + 16)
a += 32
raise RuntimeError('raise anyway')
except RuntimeError:
a += 64
return a
assert_output_matches = self.compile_and_make_checker(f)
for i in range(4):
assert_output_matches(i)
def test_finally_except(self):
def raiseif(b, a):
if b:
raise RuntimeError('raiseif')
return a
def f(i):
try:
a = 1
try:
a += 2
a = raiseif(i & 1, a + 4)
a += 8
raise RuntimeError('raise anyway')
finally:
a += 16
if i & 2:
return a
except RuntimeError:
a += 32
return a + 64
assert_output_matches = self.compile_and_make_checker(f)
for i in range(4):
assert_output_matches(i)
def test_using_super(self):
class X:
def g(self, a):
return 2 * a + 3
class Y(X):
def f(self, x, y):
return super().g(x + y)
y = Y()
assert_output_matches = self.compile_and_make_checker(Y.f)
assert_output_matches(y, 3, 5)
if __name__ == '__main__':
absltest.main()
| s6-main | src/python/jit_test.py |
# Copyright 2021 The s6 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for s6.python.api."""
import time
from absl import flags
from absl.testing import absltest
from s6.python import api
FLAGS = flags.FLAGS
class ApiTest(absltest.TestCase):
def test_jit_decorator(self):
@api.jit
def f(a, b=2):
return a + b
self.assertEqual(f(3, b=1), 4)
def test_hot_jit_decorator(self):
@api.jit
def f(a, b=2):
return a + b
# TODO: Does this demonstrate that our barrier to compilation
# (profile_instruction_interval) is too high?
for i in range(500000):
f(i)
self.assertTrue(api.inspect(f).is_compiled)
self.assertEqual(f(3, b=1), 4)
def test_hot_jit_decorator_as_method(self):
class C(object):
@api.jit
def f(self, a, b=2):
return a + b
# TODO: Does this demonstrate that our barrier to compilation
# (profile_instruction_interval) is too high?
c = C()
for i in range(500000):
c.f(i)
self.assertTrue(api.inspect(c.f).is_compiled)
self.assertEqual(c.f(3, b=1), 4)
def test_jit_as_method(self):
class C(object):
@api.jit
def f(self, a):
return a + 1
c = C()
self.assertEqual(c.f(2), 3)
def test_jit_decorator_on_recursive_function(self):
@api.jit
def fib(n):
return fib(n - 1) + fib(n - 2) if n >= 2 else 1
for _ in range(1000):
fib(10)
self.assertTrue(api.inspect(fib).is_compiled)
def test_inspect(self):
@api.jit
def f(a, b=2):
return a + b
f(2)
i = api.inspect(f)
self.assertFalse(i.is_compiled)
self.assertRaises(api.NotCompiledError, lambda: i.strongjit)
i.force_compile()
self.assertTrue(i.is_compiled)
self.assertIn('type_feedback', i.strongjit)
self.assertNotEmpty('type_feedback', i.x86asm)
self.assertEqual(f(2), 4)
i.deoptimize()
self.assertFalse(i.is_compiled)
self.assertEqual(f(2), 4)
def test_jit_and_interpret(self):
@api.jit
def f(a, b=2):
return a + b
i = api.inspect(f)
i.force_compile()
self.assertTrue(i.is_compiled)
self.assertNotIn('type_feedback', i.strongjit)
self.assertEqual(f._interpret(2), 4)
i.deoptimize()
self.assertFalse(i.is_compiled)
i.force_compile()
self.assertTrue(i.is_compiled)
self.assertIn('type_feedback', i.strongjit)
self.assertEqual(f(2), 4)
def test_jit_and_evaluate(self):
@api.jit
def f(a, b=2):
return a + b
i = api.inspect(f)
i.force_compile()
self.assertEqual(f._evaluate(2), 4)
self.assertEqual(f(2), 4)
def test_jit_forwards_docstring(self):
@api.jit
def f():
"""I am docstring."""
return None
self.assertEqual(f.__doc__, 'I am docstring.')
def test_jit_forwards_name(self):
@api.jit
def f():
return None
self.assertEqual(f.__name__, 'f')
if __name__ == '__main__':
absltest.main()
| s6-main | src/python/api_test.py |
# Copyright 2021 The s6 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests classes.adopt_existing_types().
This function, once called, cannot be undone, so this test is isolated from the
other adoption tests in classes_test.py.
"""
from absl.testing import absltest
import numpy
from s6.classes.python import classes
classes.adopt_existing_types()
class AdoptExistingTypesTest(absltest.TestCase):
def assertHasClass(self, c):
return self.assertNotEqual(classes.classid(c), 0)
def test_existing_types_adopted(self):
self.assertHasClass(42)
self.assertHasClass(2.0)
self.assertHasClass("str")
self.assertHasClass(True)
self.assertHasClass(numpy.ndarray([]))
if __name__ == "__main__":
absltest.main()
| s6-main | src/classes/python/adopt_existing_types_test.py |
# Copyright 2021 The s6 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for s6.class.python.class."""
import contextlib
from absl.testing import absltest
from absl.testing import parameterized
from s6.classes.python import classes
@contextlib.contextmanager
def adopt_new_types():
classes.adopt_new_types()
yield
classes.stop_adopting_new_types()
class ClassTest(parameterized.TestCase):
def assertZeroClass(self, obj):
self.assertEqual(classes.classid(obj), 0)
def assertNonzeroClass(self, obj):
self.assertNotEqual(classes.classid(obj), 0)
def test_unadopted_object(self):
# An unadopted object has a zero class.
self.assertZeroClass(self)
def test_adoptable(self):
# An instance of custom type should be adoptable.
class C(object):
pass
self.assertTrue(classes.adoptable(C()))
# So should longs.
self.assertTrue(classes.adoptable(42))
# But modules have a custom setter, so they aren't expected to be
# adoptable.
self.assertFalse(classes.adoptable(classes))
# Objects with existing instance attributes cannot be adopted.
c = C()
self.assertTrue(classes.adoptable(c))
c.x = 2
self.assertFalse(classes.adoptable(c))
# Type is special.
self.assertFalse(classes.adoptable(type))
# Types derived from unadoptable types aren't adoptable.
class D(type(classes)):
pass
d = D('x')
self.assertFalse(classes.adoptable(d))
def test_adopt(self):
class C(object):
pass
c = C()
classes.adopt(c)
c_classid = classes.classid(c)
self.assertNonzeroClass(c)
# Two objects with the same underlying type should have the same class.
d = C()
self.assertEqual(c_classid, classes.classid(d))
# But objects with different types should not.
class E(object):
pass
e = E()
classes.adopt(e)
self.assertNonzeroClass(e)
self.assertNotEqual(classes.classid(e), c_classid)
# We should be able to adopt things like floats.
f = 45.0
classes.adopt(f)
self.assertNonzeroClass(f)
def test_adopt_type(self):
class C(object):
pass
# New instances of C don't have a class.
self.assertZeroClass(C())
# But then I adopt C, and new instances now have a class.
classes.adopt(C())
self.assertNonzeroClass(C())
# And the class of multiple objects is still the same.
self.assertEqual(classes.classid(C()), classes.classid(C()))
# But instances of (newly) derived types do NOT have a class.
class D(C):
pass
self.assertZeroClass(D())
# But if I start adopting new types...
with adopt_new_types():
# Then new instances of new types *should* have a class.
class E(C):
pass
self.assertNonzeroClass(E())
def test_adopt_type_object(self):
class C(object):
def __init__(self):
pass
# New instances of C don't have a class.
self.assertZeroClass(C())
# But then I adopt C, and new instances now have a class.
classes.adopt(C())
self.assertNonzeroClass(C())
# Also, C now has a class (it's a type class).
self.assertNonzeroClass(C)
# The type class should have __call__, and __init__
self.assertEqual(classes.get_class_attributes(C)['__call__'], type.__call__)
self.assertEqual(classes.get_class_attributes(C)['__init__'], C.__init__)
def test_adopt_metatype(self):
# I turn on type tracking...
with adopt_new_types():
# And I define a metaclass...
class Meta(type):
def __new__(cls, name, bases, dct):
x = super().__new__(cls, name, bases, dct)
return x
# And F uses that metaclass.
class F(metaclass=Meta):
pass
# Instances of F should have classes already.
f = F()
self.assertNonzeroClass(f)
def test_add_attribute_transitions_class(self):
class C(object):
pass
c = C()
classes.adopt(c)
original_class = classes.classid(c)
self.assertNonzeroClass(c)
# I add an attribute, and I get a new class.
c.x = 2
self.assertNonzeroClass(c)
self.assertNotEqual(original_class, classes.classid(c))
# I add an attribute, but I'm cheeky about it. I ask for the dict and write
# into that.
c.__dict__['x'] = 2
# This kills the class.
self.assertZeroClass(c)
def test_modify_attribute_does_not_transition(self):
class C(object):
pass
c = C()
classes.adopt(c)
original_class = classes.classid(c)
self.assertNonzeroClass(c)
# I add an attribute, and I get a new class.
c.x = 2
class_with_x = classes.classid(c)
self.assertNonzeroClass(c)
self.assertNotEqual(original_class, class_with_x)
# Add change c.x. I should have the same class.
c.x = 3
self.assertEqual(class_with_x, classes.classid(c))
# Now I remove it. I should get a new class that isn't zero.
del c.x
self.assertNonzeroClass(c)
self.assertNotEqual(original_class, classes.classid(c))
self.assertNotEqual(class_with_x, classes.classid(c))
def test_get_type_attributes(self):
class C(object):
def f(self):
pass
d = classes.get_type_attributes(C)
self.assertEqual(d['f'], C.f)
C.f = 22
d = classes.get_type_attributes(C)
self.assertEqual(d['f'], 22)
class D(C):
def f(self):
pass
d = classes.get_type_attributes(C)
self.assertEqual(d['f'], 22)
d = classes.get_type_attributes(D)
self.assertEqual(d['f'], D.f)
def test_get_type_attributes_metatype(self):
# I define a metaclass...
class Meta(type):
def __new__(cls, name, bases, dct):
x = super().__new__(cls, name, bases, dct)
# All instances will have an `f` attribute.
x.f = 2
return x
# And F uses that metaclass.
class F(metaclass=Meta):
pass
d = classes.get_type_attributes(F)
self.assertEqual(d['f'], 2)
def test_type_modifications(self):
class C(object):
pass
c = C()
classes.adopt(c)
self.assertNonzeroClass(c)
original_class = classes.classid(c)
self.assertNotIn('x', classes.get_class_attributes(c))
C.x = 2
self.assertEqual(classes.classid(c), original_class)
self.assertIn('x', classes.get_class_attributes(c))
def test_shadowed_attr(self):
class C(object):
def f(self):
pass
c = C()
classes.adopt(c)
original_class_id = classes.classid(c)
c.f = 2
f_class_id = classes.classid(c)
self.assertNotEqual(original_class_id, f_class_id)
self.assertEqual(classes.get_class_attributes(c)['f'], 2)
# Setting the attribute to the same value shouldn't change anything.
c.f = 2
self.assertEqual(f_class_id, classes.classid(c))
self.assertEqual(classes.get_class_attributes(c)['f'], 2)
# But setting it to something else should erase any assumptions about what
# c.f could hold.
c.f = 3
self.assertEqual(f_class_id, classes.classid(c))
self.assertIsNone(classes.get_class_attributes(c)['f'])
# And deleting the instance attribute should re-expose the type attribute.
del c.f
self.assertEqual(classes.get_class_attributes(c)['f'], C.f)
def test_shadowed_data_descriptor(self):
class Descr(object):
def __get__(self, *args):
pass
def __set__(self, *args):
pass
descr = Descr()
class C(object):
pass
c = C()
classes.adopt(c)
c.x = 2
self.assertEqual(classes.get_class_attributes(c)['x'], 2)
original_class_id = classes.classid(c)
# Let's add a data descriptor to C. That shouldn't cause a transition, but
# the data descriptor should shadow `c.x`.
C.x = descr
self.assertEqual(original_class_id, classes.classid(c))
self.assertEqual(classes.get_class_attributes(c)['x'], descr)
# Now let's delete the data descriptor. We should get `c.x` back.
del C.x
self.assertEqual(original_class_id, classes.classid(c))
self.assertEqual(classes.get_class_attributes(c)['x'], 2)
def test_base_type_update(self):
class C(object):
pass
class D(C):
pass
d = D()
classes.adopt(d)
self.assertNotIn('x', classes.get_class_attributes(d))
C.x = 2
self.assertIn('x', classes.get_class_attributes(d))
def test_bases_update(self):
# Deliberately build a very deep hierarchy. Adopt the bottom type in the
# hierarchy and fiddle with the __bases__ member of D.
class C(object):
x = 2
class D(C):
pass
class E(D):
pass
class F(E):
pass
class G(object):
pass
f = F()
classes.adopt(f)
self.assertIn('x', classes.get_class_attributes(f))
D.__bases__ = (G,)
self.assertNotIn('x', classes.get_class_attributes(f))
def test_divergent_transition(self):
class C(object):
pass
c1 = C()
classes.adopt(c1)
c2 = C()
classes.adopt(c2)
c1.x = 42
c2.x = 23
# 'x' must be in the attributes list but must not have a known value,
# because the value differs between c1 and c2.
self.assertEqual(classes.classid(c1), classes.classid(c2))
self.assertIsNone(classes.get_class_attributes(c1)['x'])
self.assertIsNone(classes.get_class_attributes(c2)['x'])
def test_wrapped_setattr(self):
class D(type):
def __setattr__(cls, name, value):
super().__setattr__(name, value)
def __delattr__(cls, name):
super().__delattr__(name)
class E(object, metaclass=D):
pass
e = E()
try:
classes.adopt(e)
except:
pass
E.x = 2
del E.x
def test_data_descriptor_without_get(self):
class Descr(object):
def __set__(self, *args):
pass
descr = Descr()
class C(object):
pass
c = C()
classes.adopt(c)
c.x = 2
self.assertEqual(classes.get_class_attributes(c)['x'], 2)
original_class_id = classes.classid(c)
# Let's add a data descriptor to C. That shouldn't cause a transition, but
# the data descriptor should shadow `c.x`.
C.x = descr
self.assertEqual(original_class_id, classes.classid(c))
self.assertEqual(classes.get_class_attributes(c)['x'], descr)
# Now let's delete the data descriptor. We should get `c.x` back.
del C.x
self.assertEqual(original_class_id, classes.classid(c))
self.assertEqual(classes.get_class_attributes(c)['x'], 2)
def test_overwritten_getattr(self):
class C(object):
pass
c = C()
classes.adopt(c)
self.assertTrue(classes.class_is_valid(c))
C.__getattr__ = lambda *args: 42
self.assertFalse(classes.class_is_valid(c))
def test_overwritten_setattr(self):
class C(object):
pass
c = C()
classes.adopt(c)
self.assertTrue(classes.class_is_valid(c))
C.__setattr__ = lambda *args: 42
self.assertFalse(classes.class_is_valid(c))
def test_overwritten_delattr(self):
class C(object):
pass
c = C()
classes.adopt(c)
self.assertTrue(classes.class_is_valid(c))
C.__delattr__ = lambda *args: 42
self.assertFalse(classes.class_is_valid(c))
def test_ndarray(self):
import numpy
a = numpy.ndarray([])
classes.adopt(a)
self.assertNonzeroClass(a)
# Test distilled from programs using numpy; ndarray inherits from object
# but not as the first item in its MRO. The symptom is a TypeError with
# message "can't apply this __setattr__ to numpy.ndarray object".
self.assertRaises(AttributeError, lambda: a.__setattr__('x', 2))
def test_nested_wrapped_setattr(self):
class C(object):
def __setattr__(self, name, value):
pass
class D(C):
def xxxx(self, name, value):
return object.__setattr__(self, name, value)
d = D()
try:
classes.adopt(d)
except:
pass
d.xxxx('x', 3)
def test_modified_type(self):
class C(object):
def x(self):
return 2
classes.adopt(C())
C.y = C()
c = C()
self.assertNonzeroClass(c)
c.a = 2
self.assertNonzeroClass(c)
if __name__ == '__main__':
absltest.main()
| s6-main | src/classes/python/classes_test.py |
# Copyright 2021 The s6 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for s6.strongjit.dis6."""
from absl.testing import absltest
from absl.testing import parameterized
from s6.strongjit import dis6
from pybind11_abseil import status
# Each test checks if the first line, the definition of the function, is
# correct.
class Dis6Test(parameterized.TestCase):
def test_dis6_function(self):
def foo():
pass
ir_string = dis6.dis6(foo)
self.assertNotEmpty(ir_string)
self.assertTrue(ir_string.startswith('function foo {'))
def test_dis6_lambda(self):
x = lambda a: a + 1
ir_string = dis6.dis6(x)
self.assertNotEmpty(ir_string)
self.assertTrue(ir_string.startswith('function <lambda> {'))
def test_dis6_bound_method(self):
class Foo:
def bar(self):
pass
foo = Foo()
ir_string = dis6.dis6(foo.bar)
self.assertNotEmpty(ir_string)
self.assertTrue(ir_string.startswith('function bar {'))
def test_dis6_unbound_method(self):
class Foo:
def bar():
pass
ir_string = dis6.dis6(Foo.bar)
self.assertNotEmpty(ir_string)
self.assertTrue(ir_string.startswith('function bar {'))
def test_dis6_callable(self):
class Foo:
def __call__():
pass
foo = Foo()
ir_string = dis6.dis6(foo.__call__)
self.assertNotEmpty(ir_string)
self.assertTrue(ir_string.startswith('function __call__ {'))
class A:
pass
@parameterized.parameters(1, 'test123', A, A())
def test_dis6_invalid_argument(self, obj):
if obj == self.A:
with self.assertRaisesRegex(status.StatusNotOk,
'Argument must be a function or method.'):
dis6.dis6(obj)
else:
with self.assertRaisesRegex(TypeError,
'incompatible function arguments.'):
dis6.dis6(obj)
if __name__ == '__main__':
absltest.main()
| s6-main | src/strongjit/dis6_test.py |
# Copyright 2021 The s6 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modified from unpack sequence benchmark in https://github.com/python/pyperformance.
Modifications include:
- Removing pyperf (causes seg fault due to S6 de-referencing frame pointer,
known bug).
- Run with and without S6 and compare.
- Move the `do_unpack` for-loop out by one function call.
Microbenchmark for Python's sequence unpacking.
"""
import sys
import time
import s6
def do_unpacking(to_unpack):
# 400 unpackings
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
def bench_tuple_unpacking(loops):
x = tuple(range(10))
range_it = range(loops)
t0 = time.time()
for _ in range_it:
do_unpacking(x)
return time.time() - t0
def bench_list_unpacking(loops):
x = list(range(10))
range_it = range(loops)
t0 = time.time()
for _ in range_it:
do_unpacking(x)
return time.time() - t0
def _bench_all(loops):
dt1 = bench_tuple_unpacking(loops)
dt2 = bench_list_unpacking(loops)
return dt1 + dt2
def bench_all(loops):
return _bench_all(loops)
@s6.jit
def s6_bench_all(loops):
return bench_all(loops)
def add_cmdline_args(cmd, args):
if args.benchmark:
cmd.append(args.benchmark)
if __name__ == "__main__":
it = 4000
if len(sys.argv) >= 2:
it = int(sys.argv[1])
print(f'Starting unpack sequence (fast) benchmark, running for {it} iterations.')
py37_time = bench_all(it)
no_warmup_s6_time = s6_bench_all(it)
print(f'S6 time without warmup: {no_warmup_s6_time} sec.')
print(f'Default Python 3.7 time: {py37_time} sec.')
print(f'Speedup: {(float(py37_time)) / no_warmup_s6_time}')
| s6-main | src/benchmarks/unpack_sequence_fast.py |
# Copyright 2021 The s6 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modified from Raytrace benchmark in https://github.com/python/pyperformance.
Modifications include:
- Removing pyperf (causes seg fault due to S6 de-referencing frame pointer,
known bug).
- Run with and without S6 and compare.
This file contains definitions for a simple raytracer.
Copyright Callum and Tony Garnock-Jones, 2008.
This file may be freely redistributed under the MIT license,
http://www.opensource.org/licenses/mit-license.php
From http://www.lshift.net/blog/2008/10/29/toy-raytracer-in-python
"""
import array
import math
import s6
import time
import sys
DEFAULT_LOOPS = 100
DEFAULT_WIDTH = 100
DEFAULT_HEIGHT = 100
EPSILON = 0.00001
class Vector(object):
def __init__(self, initx, inity, initz):
self.x = initx
self.y = inity
self.z = initz
def __str__(self):
return '(%s,%s,%s)' % (self.x, self.y, self.z)
def __repr__(self):
return 'Vector(%s,%s,%s)' % (self.x, self.y, self.z)
def magnitude(self):
return math.sqrt(self.dot(self))
def __add__(self, other):
if other.isPoint():
return Point(self.x + other.x, self.y + other.y, self.z + other.z)
else:
return Vector(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other):
other.mustBeVector()
return Vector(self.x - other.x, self.y - other.y, self.z - other.z)
def scale(self, factor):
return Vector(factor * self.x, factor * self.y, factor * self.z)
def dot(self, other):
other.mustBeVector()
return (self.x * other.x) + (self.y * other.y) + (self.z * other.z)
def cross(self, other):
other.mustBeVector()
return Vector(self.y * other.z - self.z * other.y,
self.z * other.x - self.x * other.z,
self.x * other.y - self.y * other.x)
def normalized(self):
return self.scale(1.0 / self.magnitude())
def negated(self):
return self.scale(-1)
def __eq__(self, other):
return (self.x == other.x) and (self.y == other.y) and (self.z == other.z)
def isVector(self):
return True
def isPoint(self):
return False
def mustBeVector(self):
return self
def mustBePoint(self):
raise 'Vectors are not points!'
def reflectThrough(self, normal):
d = normal.scale(self.dot(normal))
return self - d.scale(2)
Vector.ZERO = Vector(0, 0, 0)
Vector.RIGHT = Vector(1, 0, 0)
Vector.UP = Vector(0, 1, 0)
Vector.OUT = Vector(0, 0, 1)
assert Vector.RIGHT.reflectThrough(Vector.UP) == Vector.RIGHT
assert Vector(-1, -1, 0).reflectThrough(Vector.UP) == Vector(-1, 1, 0)
class Point(object):
def __init__(self, initx, inity, initz):
self.x = initx
self.y = inity
self.z = initz
def __str__(self):
return '(%s,%s,%s)' % (self.x, self.y, self.z)
def __repr__(self):
return 'Point(%s,%s,%s)' % (self.x, self.y, self.z)
def __add__(self, other):
other.mustBeVector()
return Point(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other):
if other.isPoint():
return Vector(self.x - other.x, self.y - other.y, self.z - other.z)
else:
return Point(self.x - other.x, self.y - other.y, self.z - other.z)
def isVector(self):
return False
def isPoint(self):
return True
def mustBeVector(self):
raise 'Points are not vectors!'
def mustBePoint(self):
return self
class Sphere(object):
def __init__(self, centre, radius):
centre.mustBePoint()
self.centre = centre
self.radius = radius
def __repr__(self):
return 'Sphere(%s,%s)' % (repr(self.centre), self.radius)
def intersectionTime(self, ray):
cp = self.centre - ray.point
v = cp.dot(ray.vector)
discriminant = (self.radius * self.radius) - (cp.dot(cp) - v * v)
if discriminant < 0:
return None
else:
return v - math.sqrt(discriminant)
def normalAt(self, p):
return (p - self.centre).normalized()
class Halfspace(object):
def __init__(self, point, normal):
self.point = point
self.normal = normal.normalized()
def __repr__(self):
return 'Halfspace(%s,%s)' % (repr(self.point), repr(self.normal))
def intersectionTime(self, ray):
v = ray.vector.dot(self.normal)
if v:
return 1 / -v
else:
return None
def normalAt(self, p):
return self.normal
class Ray(object):
def __init__(self, point, vector):
self.point = point
self.vector = vector.normalized()
def __repr__(self):
return 'Ray(%s,%s)' % (repr(self.point), repr(self.vector))
def pointAtTime(self, t):
return self.point + self.vector.scale(t)
Point.ZERO = Point(0, 0, 0)
class Canvas(object):
def __init__(self, width, height):
self.bytes = array.array('B', [0] * (width * height * 3))
for i in range(width * height):
self.bytes[i * 3 + 2] = 255
self.width = width
self.height = height
def plot(self, x, y, r, g, b):
i = ((self.height - y - 1) * self.width + x) * 3
self.bytes[i] = max(0, min(255, int(r * 255)))
self.bytes[i + 1] = max(0, min(255, int(g * 255)))
self.bytes[i + 2] = max(0, min(255, int(b * 255)))
def write_ppm(self, filename):
header = 'P6 %d %d 255\n' % (self.width, self.height)
with open(filename, "wb") as fp:
fp.write(header.encode('ascii'))
fp.write(self.bytes.tostring())
def firstIntersection(intersections):
result = None
for i in intersections:
candidateT = i[1]
if candidateT is not None and candidateT > -EPSILON:
if result is None or candidateT < result[1]:
result = i
return result
class Scene(object):
def __init__(self):
self.objects = []
self.lightPoints = []
self.position = Point(0, 1.8, 10)
self.lookingAt = Point.ZERO
self.fieldOfView = 45
self.recursionDepth = 0
def moveTo(self, p):
self.position = p
def lookAt(self, p):
self.lookingAt = p
def addObject(self, object, surface):
self.objects.append((object, surface))
def addLight(self, p):
self.lightPoints.append(p)
def render(self, canvas):
fovRadians = math.pi * (self.fieldOfView / 2.0) / 180.0
halfWidth = math.tan(fovRadians)
halfHeight = 0.75 * halfWidth
width = halfWidth * 2
height = halfHeight * 2
pixelWidth = width / (canvas.width - 1)
pixelHeight = height / (canvas.height - 1)
eye = Ray(self.position, self.lookingAt - self.position)
vpRight = eye.vector.cross(Vector.UP).normalized()
vpUp = vpRight.cross(eye.vector).normalized()
for y in range(canvas.height):
for x in range(canvas.width):
xcomp = vpRight.scale(x * pixelWidth - halfWidth)
ycomp = vpUp.scale(y * pixelHeight - halfHeight)
ray = Ray(eye.point, eye.vector + xcomp + ycomp)
colour = self.rayColour(ray)
canvas.plot(x, y, *colour)
def rayColour(self, ray):
if self.recursionDepth > 3:
return (0, 0, 0)
try:
self.recursionDepth = self.recursionDepth + 1
intersections = [(o, o.intersectionTime(ray), s)
for (o, s) in self.objects]
i = firstIntersection(intersections)
if i is None:
return (0, 0, 0) # the background colour
else:
(o, t, s) = i
p = ray.pointAtTime(t)
return s.colourAt(self, ray, p, o.normalAt(p))
finally:
self.recursionDepth = self.recursionDepth - 1
def _lightIsVisible(self, l, p):
for (o, s) in self.objects:
t = o.intersectionTime(Ray(p, l - p))
if t is not None and t > EPSILON:
return False
return True
def visibleLights(self, p):
result = []
for l in self.lightPoints:
if self._lightIsVisible(l, p):
result.append(l)
return result
def addColours(a, scale, b):
return (a[0] + scale * b[0],
a[1] + scale * b[1],
a[2] + scale * b[2])
class SimpleSurface(object):
def __init__(self, **kwargs):
self.baseColour = kwargs.get('baseColour', (1, 1, 1))
self.specularCoefficient = kwargs.get('specularCoefficient', 0.2)
self.lambertCoefficient = kwargs.get('lambertCoefficient', 0.6)
self.ambientCoefficient = 1.0 - self.specularCoefficient - self.lambertCoefficient
def baseColourAt(self, p):
return self.baseColour
def colourAt(self, scene, ray, p, normal):
b = self.baseColourAt(p)
c = (0, 0, 0)
if self.specularCoefficient > 0:
reflectedRay = Ray(p, ray.vector.reflectThrough(normal))
reflectedColour = scene.rayColour(reflectedRay)
c = addColours(c, self.specularCoefficient, reflectedColour)
if self.lambertCoefficient > 0:
lambertAmount = 0
for lightPoint in scene.visibleLights(p):
contribution = (lightPoint - p).normalized().dot(normal)
if contribution > 0:
lambertAmount = lambertAmount + contribution
lambertAmount = min(1, lambertAmount)
c = addColours(c, self.lambertCoefficient * lambertAmount, b)
if self.ambientCoefficient > 0:
c = addColours(c, self.ambientCoefficient, b)
return c
class CheckerboardSurface(SimpleSurface):
def __init__(self, **kwargs):
SimpleSurface.__init__(self, **kwargs)
self.otherColour = kwargs.get('otherColour', (0, 0, 0))
self.checkSize = kwargs.get('checkSize', 1)
def baseColourAt(self, p):
v = p - Point.ZERO
v.scale(1.0 / self.checkSize)
if ((int(abs(v.x) + 0.5)
+ int(abs(v.y) + 0.5)
+ int(abs(v.z) + 0.5)) % 2):
return self.otherColour
else:
return self.baseColour
def _bench_raytrace(loops, width, height, filename):
range_it = range(loops)
t0 = time.perf_counter()
for i in range_it:
canvas = Canvas(width, height)
s = Scene()
s.addLight(Point(30, 30, 10))
s.addLight(Point(-10, 100, 30))
s.lookAt(Point(0, 3, 0))
s.addObject(Sphere(Point(1, 3, -10), 2),
SimpleSurface(baseColour=(1, 1, 0)))
for y in range(6):
s.addObject(Sphere(Point(-3 - y * 0.4, 2.3, -5), 0.4),
SimpleSurface(baseColour=(y / 6.0, 1 - y / 6.0, 0.5)))
s.addObject(Halfspace(Point(0, 0, 0), Vector.UP),
CheckerboardSurface())
s.render(canvas)
dt = time.perf_counter() - t0
if filename:
canvas.write_ppm(filename)
return dt
def bench_raytrace(loops, width, height, filename):
return _bench_raytrace(loops, width, height, filename)
@s6.jit
def s6_bench_raytrace(loops, width, height, filename):
return _bench_raytrace(loops, width, height, filename)
def add_cmdline_args(cmd, args):
cmd.append("--width=%s" % args.width)
cmd.append("--height=%s" % args.height)
if args.filename:
cmd.extend(("--filename", args.filename))
if __name__ == "__main__":
loops = 15
if len(sys.argv) >= 2:
loops = int(sys.argv[1])
print(f'Starting ray trace benchmark, running for {loops} loops.')
py37_time = bench_raytrace(loops, DEFAULT_WIDTH, DEFAULT_HEIGHT, None)
no_warmup_s6_time = s6_bench_raytrace(loops, DEFAULT_WIDTH, DEFAULT_HEIGHT, None)
print(f'S6 time without warmup: {no_warmup_s6_time} sec.')
print(f'Default Python 3.7 time: {py37_time} sec.')
print(f'Speedup: {(float(py37_time)) / no_warmup_s6_time}')
| s6-main | src/benchmarks/raytrace.py |
# Copyright 2021 The s6 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modified from Richards benchmark in https://github.com/python/pyperformance.
Modifications include:
- Removing pyperf (causes seg fault due to S6 de-referencing frame pointer,
known bug).
- Run with and without S6 and compare.
Based on a Java version:
Based on original version written in BCPL by Dr Martin Richards
in 1981 at Cambridge University Computer Laboratory, England
and a C++ version derived from a Smalltalk version written by
L Peter Deutsch.
Java version: Copyright (C) 1995 Sun Microsystems, Inc.
Translation from C++, Mario Wolczko
Outer loop added by Alex Jacoby
"""
import sys
import time
import s6
# Task IDs
I_IDLE = 1
I_WORK = 2
I_HANDLERA = 3
I_HANDLERB = 4
I_DEVA = 5
I_DEVB = 6
# Packet types
K_DEV = 1000
K_WORK = 1001
# Packet
BUFSIZE = 4
BUFSIZE_RANGE = range(BUFSIZE)
class Packet(object):
def __init__(self, l, i, k):
self.link = l
self.ident = i
self.kind = k
self.datum = 0
self.data = [0] * BUFSIZE
def append_to(self, lst):
self.link = None
if lst is None:
return self
else:
p = lst
next = p.link
while next is not None:
p = next
next = p.link
p.link = self
return lst
# Task Records
class TaskRec(object):
pass
class DeviceTaskRec(TaskRec):
def __init__(self):
self.pending = None
class IdleTaskRec(TaskRec):
def __init__(self):
self.control = 1
self.count = 10000
class HandlerTaskRec(TaskRec):
def __init__(self):
self.work_in = None
self.device_in = None
def workInAdd(self, p):
self.work_in = p.append_to(self.work_in)
return self.work_in
def deviceInAdd(self, p):
self.device_in = p.append_to(self.device_in)
return self.device_in
class WorkerTaskRec(TaskRec):
def __init__(self):
self.destination = I_HANDLERA
self.count = 0
# Task
class TaskState(object):
def __init__(self):
self.packet_pending = True
self.task_waiting = False
self.task_holding = False
def packetPending(self):
self.packet_pending = True
self.task_waiting = False
self.task_holding = False
return self
def waiting(self):
self.packet_pending = False
self.task_waiting = True
self.task_holding = False
return self
def running(self):
self.packet_pending = False
self.task_waiting = False
self.task_holding = False
return self
def waitingWithPacket(self):
self.packet_pending = True
self.task_waiting = True
self.task_holding = False
return self
def isPacketPending(self):
return self.packet_pending
def isTaskWaiting(self):
return self.task_waiting
def isTaskHolding(self):
return self.task_holding
def isTaskHoldingOrWaiting(self):
return self.task_holding or (not self.packet_pending and self.task_waiting)
def isWaitingWithPacket(self):
return self.packet_pending and self.task_waiting and not self.task_holding
tracing = False
layout = 0
def trace(a):
global layout
layout -= 1
if layout <= 0:
print()
layout = 50
print(a, end='')
TASKTABSIZE = 10
class TaskWorkArea(object):
def __init__(self):
self.taskTab = [None] * TASKTABSIZE
self.taskList = None
self.holdCount = 0
self.qpktCount = 0
taskWorkArea = TaskWorkArea()
class Task(TaskState):
def __init__(self, i, p, w, initialState, r):
self.link = taskWorkArea.taskList
self.ident = i
self.priority = p
self.input = w
self.packet_pending = initialState.isPacketPending()
self.task_waiting = initialState.isTaskWaiting()
self.task_holding = initialState.isTaskHolding()
self.handle = r
taskWorkArea.taskList = self
taskWorkArea.taskTab[i] = self
def fn(self, pkt, r):
raise NotImplementedError
def addPacket(self, p, old):
if self.input is None:
self.input = p
self.packet_pending = True
if self.priority > old.priority:
return self
else:
p.append_to(self.input)
return old
def runTask(self):
if self.isWaitingWithPacket():
msg = self.input
self.input = msg.link
if self.input is None:
self.running()
else:
self.packetPending()
else:
msg = None
return self.fn(msg, self.handle)
def waitTask(self):
self.task_waiting = True
return self
def hold(self):
taskWorkArea.holdCount += 1
self.task_holding = True
return self.link
def release(self, i):
t = self.findtcb(i)
t.task_holding = False
if t.priority > self.priority:
return t
else:
return self
def qpkt(self, pkt):
t = self.findtcb(pkt.ident)
taskWorkArea.qpktCount += 1
pkt.link = None
pkt.ident = self.ident
return t.addPacket(pkt, self)
def findtcb(self, id):
t = taskWorkArea.taskTab[id]
if t is None:
raise Exception("Bad task id %d" % id)
return t
# DeviceTask
class DeviceTask(Task):
def __init__(self, i, p, w, s, r):
Task.__init__(self, i, p, w, s, r)
def fn(self, pkt, r):
d = r
assert isinstance(d, DeviceTaskRec)
if pkt is None:
pkt = d.pending
if pkt is None:
return self.waitTask()
else:
d.pending = None
return self.qpkt(pkt)
else:
d.pending = pkt
if tracing:
trace(pkt.datum)
return self.hold()
class HandlerTask(Task):
def __init__(self, i, p, w, s, r):
Task.__init__(self, i, p, w, s, r)
def fn(self, pkt, r):
h = r
assert isinstance(h, HandlerTaskRec)
if pkt is not None:
if pkt.kind == K_WORK:
h.workInAdd(pkt)
else:
h.deviceInAdd(pkt)
work = h.work_in
if work is None:
return self.waitTask()
count = work.datum
if count >= BUFSIZE:
h.work_in = work.link
return self.qpkt(work)
dev = h.device_in
if dev is None:
return self.waitTask()
h.device_in = dev.link
dev.datum = work.data[count]
work.datum = count + 1
return self.qpkt(dev)
# IdleTask
class IdleTask(Task):
def __init__(self, i, p, w, s, r):
Task.__init__(self, i, 0, None, s, r)
def fn(self, pkt, r):
i = r
assert isinstance(i, IdleTaskRec)
i.count -= 1
if i.count == 0:
return self.hold()
elif i.control & 1 == 0:
i.control //= 2
return self.release(I_DEVA)
else:
i.control = i.control // 2 ^ 0xd008
return self.release(I_DEVB)
# WorkTask
A = ord('A')
class WorkTask(Task):
def __init__(self, i, p, w, s, r):
Task.__init__(self, i, p, w, s, r)
def fn(self, pkt, r):
w = r
assert isinstance(w, WorkerTaskRec)
if pkt is None:
return self.waitTask()
if w.destination == I_HANDLERA:
dest = I_HANDLERB
else:
dest = I_HANDLERA
w.destination = dest
pkt.ident = dest
pkt.datum = 0
for i in BUFSIZE_RANGE: # range(BUFSIZE)
w.count += 1
if w.count > 26:
w.count = 1
pkt.data[i] = A + w.count - 1
return self.qpkt(pkt)
def schedule():
t = taskWorkArea.taskList
while t is not None:
if tracing:
print("tcb =", t.ident)
if t.isTaskHoldingOrWaiting():
t = t.link
else:
if tracing:
trace(chr(ord("0") + t.ident))
t = t.runTask()
class Richards(object):
def run(self, iterations):
return self._run(iterations)
@s6.jit
def s2_run(self, iterations):
return self._run(iterations)
def _run(self, iterations):
for i in range(iterations):
taskWorkArea.holdCount = 0
taskWorkArea.qpktCount = 0
IdleTask(I_IDLE, 1, 10000, TaskState().running(), IdleTaskRec())
wkq = Packet(None, 0, K_WORK)
wkq = Packet(wkq, 0, K_WORK)
WorkTask(I_WORK, 1000, wkq, TaskState(
).waitingWithPacket(), WorkerTaskRec())
wkq = Packet(None, I_DEVA, K_DEV)
wkq = Packet(wkq, I_DEVA, K_DEV)
wkq = Packet(wkq, I_DEVA, K_DEV)
HandlerTask(I_HANDLERA, 2000, wkq, TaskState(
).waitingWithPacket(), HandlerTaskRec())
wkq = Packet(None, I_DEVB, K_DEV)
wkq = Packet(wkq, I_DEVB, K_DEV)
wkq = Packet(wkq, I_DEVB, K_DEV)
HandlerTask(I_HANDLERB, 3000, wkq, TaskState(
).waitingWithPacket(), HandlerTaskRec())
wkq = None
DeviceTask(I_DEVA, 4000, wkq,
TaskState().waiting(), DeviceTaskRec())
DeviceTask(I_DEVB, 5000, wkq,
TaskState().waiting(), DeviceTaskRec())
schedule()
if taskWorkArea.holdCount == 9297 and taskWorkArea.qpktCount == 23246:
pass
else:
return False
return True
if __name__ == '__main__':
it = 50
if len(sys.argv) >= 2:
it = int(sys.argv[1])
print(f'Starting Richards benchmark, running for {it} iterations.')
richards = Richards()
py37_time = time.time()
richards.run(it)
py37_time = time.time() - py37_time
no_warmup_s6_time = time.time()
richards.s2_run(it)
no_warmup_s6_time = time.time() - no_warmup_s6_time
print(f'S6 time without warmup: {no_warmup_s6_time} sec.')
print(f'Default Python 3.7 time: {py37_time} sec.')
print(f'Speedup: {(float(py37_time)) / no_warmup_s6_time}')
| s6-main | src/benchmarks/richards.py |
# Copyright 2021 The s6 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modified from unpack sequence benchmark in https://github.com/python/pyperformance.
Modifications include:
- Removing pyperf (causes seg fault due to S6 de-referencing frame pointer,
known bug).
- Run with and without S6 and compare.
Microbenchmark for Python's sequence unpacking.
"""
import sys
import time
import s6
def do_unpacking(loops, to_unpack):
range_it = range(loops)
t0 = time.time()
for _ in range_it:
# 400 unpackings
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
a, b, c, d, e, f, g, h, i, j = to_unpack
return time.time() - t0
def bench_tuple_unpacking(loops):
x = tuple(range(10))
return do_unpacking(loops, x)
def bench_list_unpacking(loops):
x = list(range(10))
return do_unpacking(loops, x)
def _bench_all(loops):
dt1 = bench_tuple_unpacking(loops)
dt2 = bench_list_unpacking(loops)
return dt1 + dt2
def bench_all(loops):
return _bench_all(loops)
@s6.jit
def s6_bench_all(loops):
return bench_all(loops)
def add_cmdline_args(cmd, args):
if args.benchmark:
cmd.append(args.benchmark)
if __name__ == "__main__":
it = 4000
if len(sys.argv) >= 2:
it = int(sys.argv[1])
print(f'Starting unpack sequence (slow) benchmark, running for {it} iterations.')
py37_time = bench_all(it)
no_warmup_s6_time = s6_bench_all(it)
print(f'S6 time without warmup: {no_warmup_s6_time} sec.')
print(f'Default Python 3.7 time: {py37_time} sec.')
print(f'Speedup: {(float(py37_time)) / no_warmup_s6_time}')
| s6-main | src/benchmarks/unpack_sequence_slow.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Setup for pip package."""
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = [
'absl-py',
'attrs',
'chex',
'jax',
'jaxlib',
'jaxline',
'ml-collections',
'optax',
'numpy',
'pandas',
'scipy',
'tensorflow',
'typing_extensions',
]
setup(
name='eigengame',
version='1.0',
description=('Top-k Eigendecompositions for Large, Streaming Data.'),
url='https://github.com/deepmind/eigengame',
author='DeepMind',
author_email='[email protected]',
packages=find_packages(),
install_requires=REQUIRED_PACKAGES,
platforms=['any'],
license='Apache 2.0',
)
| eigengame-main | setup.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for the distributed TPU implementation of EigenGame."""
import collections
import enum
import functools
import os
from typing import Callable, Dict, NamedTuple, Optional, Tuple, Union
from absl import logging
import chex
import jax
import jax.numpy as jnp
from jaxline import utils
import numpy as np
class ExperimentType(enum.Enum):
BIASED_CCA = 'biased_cca'
CCA = 'unbiased_cca'
PLS = 'unbiased_pls'
ICA = 'unbiased_ica'
PCA = 'pca'
MATRIX_INVERSE = 'matrix_inverse'
class SplitVector(NamedTuple):
"""Generalized Eigenvector object used for CCA, PLS, etc.
Concatenations and splits are some of the slowest things you can do on a TPU,
so we want to keep the portions of the eigenvectors responsible for each data
source separate.
"""
x: chex.ArrayTree
y: chex.ArrayTree
class AuxiliaryParams(NamedTuple):
r"""Container for auxiliary variables for $\gamma$-EigenGame."""
b_vector_product: chex.ArrayTree
b_inner_product_diag: chex.Array
EigenGameGradientFunction = Callable[..., Tuple[chex.ArrayTree,
AuxiliaryParams]]
EigenGameQuotientFunction = Callable[..., Tuple[chex.ArrayTree, chex.ArrayTree]]
@functools.partial(
jax.pmap,
in_axes=0,
out_axes=0,
axis_name='devices',
static_broadcasted_argnums=0,
)
def init_aux_variables(
device_count: int,
init_vectors: chex.ArrayTree,
) -> AuxiliaryParams:
"""Initializes the auxiliary variables from the eigenvalues."""
leaves, _ = jax.tree_util.tree_flatten(init_vectors)
per_device_eigenvectors = leaves[0].shape[0]
total_eigenvectors = per_device_eigenvectors * device_count
return AuxiliaryParams(
b_vector_product=jax.tree_map(
lambda leaf: jnp.zeros((total_eigenvectors, *leaf.shape[1:])),
init_vectors,
),
b_inner_product_diag=jnp.zeros(total_eigenvectors),
)
class AuxiliaryMovingAverage:
"""Simple class which computes the moving average of the auxiliary variables.
This is used in the generalized eigenvalue problem, where the reciprocal of
an estimate may have a severe biasing effect for small batches.
"""
def __init__(self, max_len: int):
self._values = collections.deque(maxlen=max_len)
def get_moving_average(self) -> Optional[chex.ArrayTree]:
if not self._values:
return None
length = len(self._values)
values_sum = jax.tree_map(
lambda *x: sum(x),
self._values[0],
*list(self._values)[1:],
)
return jax.tree_map(lambda x: x / length, values_sum)
def add_value(self, new_value: chex.ArrayTree) -> None:
self._values.append(new_value)
def get_spherical_gradients(
gradient: chex.ArrayTree,
eigenvectors: chex.ArrayTree,
) -> chex.ArrayTree:
"""Project gradients to a perpendicular to each of the eigenvectors."""
tangential_component_scale = tree_einsum(
'l..., l...-> l',
gradient,
eigenvectors,
reduce_f=lambda x, y: x + y,
)
tangential_component = tree_einsum_broadcast(
'l..., l -> l... ',
eigenvectors,
tangential_component_scale,
)
return jax.tree_map(
lambda x, y: x - y,
gradient,
tangential_component,
)
def normalize_eigenvectors(eigenvectors: chex.ArrayTree) -> chex.ArrayTree:
"""Normalize all eigenvectors."""
squared_norm = jax.tree_util.tree_reduce(
lambda x, y: x + y,
jax.tree_map(
lambda x: jnp.einsum('k..., k... -> k', x, x),
eigenvectors,
))
return jax.tree_map(
lambda x: jnp.einsum('k..., k -> k...', x, 1 / jnp.sqrt(squared_norm)),
eigenvectors,
)
def initialize_eigenvectors(
eigenvector_count: int,
batch: chex.ArrayTree,
rng_key: chex.PRNGKey,
) -> chex.ArrayTree:
"""Initialize the eigenvectors on a unit sphere and shards it.
Args:
eigenvector_count: Total number of eigenvectors (i.e. k)
batch: A batch of the data we're trying to find the eigenvalues of. The
initialized vectors will take the shape and tree structure of this data.
Array tree with leaves of shape [b, ...].
rng_key: jax rng seed. For multihost, each host should have a different
seed in order to initialize correctly
Returns:
A pytree of initialized, normalized vectors in the same structure as the
input batch. Array tree with leaves of shape [num_devices, l, ...].
"""
device_count = jax.device_count()
local_device_count = jax.local_device_count()
if eigenvector_count % device_count != 0:
raise ValueError(f'Number of devices ({device_count}) must divide number of'
'eigenvectors ({eigenvector_count}).')
per_device_count = eigenvector_count // device_count
leaves, treedef = jax.tree_flatten(batch)
shapes = [(per_device_count, *leaf.shape[1:]) for leaf in leaves]
eigenvectors = []
per_device_keys = jax.random.split(rng_key, local_device_count)
for per_device_key in per_device_keys:
# generate a different key for each leaf on each device
per_leaf_keys = jax.random.split(per_device_key, len(leaves))
# generate random number for each leaf
vector_leaves = [
jax.random.normal(key, shape)
for key, shape in zip(per_leaf_keys, shapes)
]
eigenvector_tree = jax.tree_unflatten(treedef, vector_leaves)
normalized_eigenvector = normalize_eigenvectors(eigenvector_tree)
eigenvectors.append(normalized_eigenvector)
return jax.device_put_sharded(eigenvectors, jax.local_devices())
def get_local_slice(
local_identity_slice: chex.Array,
input_vector_tree: chex.ArrayTree,
) -> chex.ArrayTree:
"""Get the local portion from all the eigenvectors.
Multiplying by a matrix here to select the vectors that we care about locally.
This is significantly faster than using jnp.take.
Args:
local_identity_slice: A slice of the identity matrix denoting the vectors
which we care about locally
input_vector_tree: An array tree of data, with the first index querying all
the vectors.
Returns:
A pytree of the same structure as input_vector_tree, but with only the
relevant vectors specified in the identity.
"""
def get_slice(all_vectors):
return jnp.einsum(
'k..., lk -> l...',
all_vectors,
local_identity_slice,
)
return jax.tree_map(get_slice, input_vector_tree)
def per_vector_metric_log(
metric_name: str,
metric: chex.Array,
) -> Dict[str, float]:
"""Creates logs for each vector in a sortable way."""
# get the biggest index length
max_index_length = len(str(len(metric)))
def get_key(index: int) -> str:
"""Adds metrix prefix and pads the index so the key is sortable."""
padded_index = str(index).rjust(max_index_length, '0')
return metric_name + '_' + padded_index
return {get_key(i): value for i, value in enumerate(metric)}
def tree_einsum(
subscripts: str,
*operands: chex.ArrayTree,
reduce_f: Optional[Callable[[chex.Array, chex.Array], chex.Array]] = None
) -> Union[chex.ArrayTree, chex.Array]:
"""Applies an leaf wise einsum to a list of trees.
Args:
subscripts: subscript string denoting the einsum operation.
*operands: a list of pytrees with the same structure. The einsum will be
applied leafwise.
reduce_f: Function denoting a reduction. If not left empty, this calls a
tree reduce on the resulting tree after the einsum.
Returns:
A pytree with the same structure as the input operands if reduce_f.
Otherwise an array which is the result of the reduction.
"""
einsum_function = functools.partial(jnp.einsum, subscripts)
mapped_tree = jax.tree_map(einsum_function, *operands)
if reduce_f is None:
return mapped_tree
else:
return jax.tree_util.tree_reduce(reduce_f, mapped_tree)
def tree_einsum_broadcast(
subscripts: str,
tree: chex.ArrayTree,
*array_operands: chex.Array,
reduce_f: Optional[Callable[[chex.Array, chex.Array], chex.Array]] = None
) -> Union[chex.ArrayTree, chex.Array]:
"""Applies an einsum operation on a list of arrays to all leaves of a tree.
Args:
subscripts: subscript string denoting the einsum operation. The first
argument must denote the tree leaf, followed by the list of arrays.
tree: A pytree. The einsum will be applied with the leaves of this tree in
the first argument.
*array_operands: A list of arrays. The sinsum with these arrays will be
mapped to each leaf in the tree.
reduce_f: Function denoting a reduction. If not left empty, this calls a
tree reduce on the resulting tree after the einsum.
Returns:
A pytree with the same structure as the input tree. if reduce_f.
Otherwise an array which is the result of the reduction.
"""
einsum_function = lambda leaf: jnp.einsum(subscripts, leaf, *array_operands)
mapped_tree = jax.tree_map(einsum_function, tree)
if reduce_f is None:
return mapped_tree
else:
return jax.tree_util.tree_reduce(reduce_f, mapped_tree)
def get_first(xs):
"""Gets values from the first device."""
return jax.tree_util.tree_map(lambda x: x[0], xs)
class InMemoryCheckpointerPlusSaveEigVecs(utils.InMemoryCheckpointer):
"""A Checkpointer reliant on an in-memory global dictionary."""
def __init__(self, config, mode: str):
super().__init__(config, mode)
self._checkpoint_dir = config.checkpoint_dir
def save(self, ckpt_series: str) -> None:
"""Saves the checkpoint."""
super().save(ckpt_series)
series = utils.GLOBAL_CHECKPOINT_DICT[ckpt_series]
active_state = self.get_experiment_state(ckpt_series)
id_ = 0 if not series.history else series.history[-1].id + 1
filename = ckpt_series + '_' + str(id_)
filepath = os.path.join(self._checkpoint_dir, filename) + '.npy'
vecs = np.array(active_state.experiment_module.get_eigenvectors())
np.save(filepath, vecs)
logging.info('Saved eigenvectors to %s.', filepath)
| eigengame-main | eigengame/eg_utils.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Boilerplate needed to do a jaxline experiment with Eigengame."""
import abc
import functools
from typing import Callable, Dict, Iterator, Optional, Tuple
import chex
from eigengame import eg_gradients
from eigengame import eg_objectives
from eigengame import eg_utils
import jax
import jax.numpy as jnp
from jaxline import experiment
from jaxline import utils
import ml_collections
import optax
def get_experiment_type(
experiment_type: eg_utils.ExperimentType
) -> Tuple[eg_utils.EigenGameGradientFunction,
eg_utils.EigenGameQuotientFunction,
int]:
"""Selects the a gradient function and default evaluation functions.
More experiment types may be added here as their respective matrix products
are implemented.
Args:
experiment_type: string value which selects the model type
Returns:
Returns two functions and an integer. The first function evaluates the
gradients and is used in AbstractEigenGameExperiment to update the
eigenvectors for the given type of experiment. The latter is used to
evaluate the numerator and denominator of the rayleigh quotient, which
serves as estimates for the generalized eigenvalues. The last value, the
integer, indicates how many independent minibatches the functions expect as
input.
"""
if experiment_type == eg_utils.ExperimentType.BIASED_CCA:
return (
eg_gradients.biased_cca_gradients,
eg_objectives.biased_cca_rayleigh_quotient_components,
1
)
elif experiment_type == eg_utils.ExperimentType.CCA:
return (
eg_gradients.unbiased_cca_gradients,
eg_objectives.unbiased_cca_rayleigh_quotient_components,
2
)
elif experiment_type == eg_utils.ExperimentType.PLS:
return (
eg_gradients.unbiased_pls_gradients,
eg_objectives.unbiased_pls_rayleigh_quotient_components,
2
)
elif experiment_type == eg_utils.ExperimentType.PCA:
return (
eg_gradients.pca_generalized_eigengame_gradients,
eg_objectives.pca_generalised_eigengame_rayleigh_quotient_components,
1
)
elif experiment_type == eg_utils.ExperimentType.ICA:
# Not yet tested
return (
eg_gradients.unbiased_ica_gradients,
eg_objectives.unbiased_ica_rayleigh_quotient_components,
3
)
elif experiment_type == eg_utils.ExperimentType.MATRIX_INVERSE:
return (
eg_gradients.matrix_inverse_gradients,
eg_objectives.matrix_inverse_rayleigh_quotient_components,
1
)
else:
raise ValueError('Please specify a valid experiment Type e.g. "cca"')
def decaying_schedule_with_warmup(
step: int,
warm_up_step: int,
end_step: int,
base_lr: float,
end_lr: float,
) -> float:
"""Learning rate schedule with warmup and harmonic decay.
We have a warmup period for the eigenvalues as the auxiliary values and the
mean estimates are learned. During this period, the learning rate increases
linearly until it reaches the base learning rate after the period ends.
This is followed by an harmonically decaying learning rate which end_lr at
the end_state.
Args:
step: global step of the schedule
warm_up_step: number of warmup steps
end_step: step at which end_lr is reached
base_lr: maximum learning rate. Reached when warmup finishes.
end_lr: learning rate at end_step
Returns:
The learning rate at the current step:
"""
warmup_lr = step * base_lr / warm_up_step
# calculate shift and scale such that scale/(step+shift) satisfies
# schedule(warm_up_step) = base_lr and
# schedule(end_step) = end_lr
decay_shift = (warm_up_step * base_lr - end_step * end_lr) / (
end_lr - base_lr)
decay_scale = base_lr * (warm_up_step + decay_shift)
decay_lr = decay_scale / (step + decay_shift)
return jnp.where(step < warm_up_step, warmup_lr, decay_lr)
def create_checkpointer(
config: ml_collections.ConfigDict,
mode: str,
) -> utils.Checkpointer:
"""Creates an object to be used as a checkpointer."""
return eg_utils.InMemoryCheckpointerPlusSaveEigVecs(config, mode)
class AbstractEigenGameExperiment(experiment.AbstractExperiment):
"""Jaxline object for running Eigengame Experiments."""
NON_BROADCAST_CHECKPOINT_ATTRS = {
'_eigenvectors': 'eigenvectors',
'_auxiliary_variables': 'auxiliary_variables',
'_opt_state': 'opt_state',
'_aux_opt_state': 'aux_opt_state',
'_mean_estimate': 'mean_estimate',
'_mean_opt_state': 'mean_opt_state',
}
def __init__(self, mode: str, init_rng: chex.Array,
config: ml_collections.ConfigDict):
super().__init__(mode=mode, init_rng=init_rng)
# Get a different seed for each host
if jax.process_count() > 1:
init_rngs = jax.random.split(init_rng, jax.process_count())
init_rng = init_rngs[jax.process_index()]
self._eigenvector_count = config.eigenvector_count
self._epsilon = config.epsilon
self._maximize = config.maximize
self._track_mean = config.track_mean
self._net_activations = self.build_preprocess_function(
config.preprocess_config,)
self.data_config = config.dataset_config
self._data_generator = self.build_dataset(self.data_config)
(
self._gradient_function,
self._rayleigh_quotient_function,
self._num_samples,
) = get_experiment_type(config.experiment_type)
# Initialize the eigenvalues and mean estimate from a batch of data
(
self._eigenvectors,
self._mean_estimate,
self._auxiliary_variables,
) = self._initialize_eigenvector_params(init_rng)
if self._track_mean:
self._mean_opt = optax.sgd(lambda step: 1 / (step + 1))
self._mean_opt_state = jax.pmap(self._mean_opt.init)(self._mean_estimate,)
else:
self._mean_opt = None
self._mean_opt_state = None
if mode == 'train':
# Setup the update function
self._update = self._build_update_function()
# Initialize the data generators and optimizers
self._optimizer = optax.adam(
functools.partial(decaying_schedule_with_warmup,
**config.optimizer_schedule_config),
**config.optimizer_config)
self._opt_state = jax.pmap(self._optimizer.init)(self._eigenvectors)
# Create optimizer for the auxiliary variables. Don't use ADAM for this
# at the same time as ADAM for the main optimiser! It may cause the
# experiment to become unstable.
self._aux_optimizer = optax.sgd(**config.aux_optimizer_config)
self._aux_opt_state = jax.pmap(self._aux_optimizer.init)(
self._auxiliary_variables,)
# Create optimizer for the mean estimate
# This effectively calculates the mean up til the latest step.
self._eval_batches = None
else:
self._optimizer = None
self._opt_state = None
self._aux_optimizer = None
self._aux_opt_state = None
self._update = None
self._eval_batches = config.eval_batches
@abc.abstractmethod
def build_preprocess_function(
self,
preprocess_config: ml_collections.ConfigDict,
) -> Callable[[chex.ArrayTree, chex.PRNGKey], chex.ArrayTree]:
"""Build a pmappable function which is called on all machines in parallel.
This function will be pmapped inside the updatefunction, and called
immediately on the batch taken from built_dataset.
Args:
preprocess_config: config dict specified in the experiment configs.
Contains parameters for this function.
Returns:
Pmappable function which takes in a local batch of data from build_dataset
of shape [per_device_batch_size, ...] and a rng key. Returns preprocessed
batch of shape [per_device_batch_size, ...]
"""
@abc.abstractmethod
def build_dataset(
self,
dataset_config: ml_collections.ConfigDict,
) -> Iterator[chex.ArrayTree]:
"""Iterator which continuously returns the batches of the dataset.
Args:
dataset_config: config dict specified in the experiment configs. Contains
parameters for this function.
Returns:
Iterator which will return batches of data which will be sharded across
across machines. This means we need pytrees of shape:
[num_local_devices, per_device_batch_size, ...]
"""
def _initialize_eigenvector_params(
self,
init_rng: chex.PRNGKey,
) -> chex.ArrayTree:
"""Initializes the eigenvalues, mean estimates and auxiliary variables."""
init_batch = next(self._data_generator)
local_init_data = eg_utils.get_first(init_batch)
model_rng, eigenvector_rng = jax.random.split(init_rng, 2)
local_activiation_batch = self._net_activations(local_init_data, model_rng)
if self._num_samples > 1:
local_activiation_batch = local_activiation_batch[0]
initial_eigenvectors = eg_utils.initialize_eigenvectors(
self._eigenvector_count,
local_activiation_batch,
eigenvector_rng,
)
initial_mean_estimate = jax.device_put_replicated(
jax.tree_map(
lambda x: jnp.zeros(x.shape[1:]),
local_activiation_batch,
),
jax.local_devices(),
)
auxiliary_variables = eg_utils.init_aux_variables(
jax.device_count(),
initial_eigenvectors,
)
return initial_eigenvectors, initial_mean_estimate, auxiliary_variables
def _build_update_function(self):
"""pmaps and applies masks to the update functions."""
sliced_identity = eg_gradients.create_sharded_identity(
self._eigenvector_count)
mask = eg_gradients.create_sharded_mask(self._eigenvector_count)
return functools.partial(
jax.pmap(
self._update_eigenvectors,
axis_name='devices',
in_axes=0,
out_axes=0,
),
mask=mask,
sliced_identity=sliced_identity)
def _update_eigenvectors(
self,
local_eigenvectors: chex.ArrayTree,
opt_state: chex.ArrayTree,
auxiliary_variables: Optional[eg_utils.AuxiliaryParams],
aux_opt_state: Optional[eg_utils.AuxiliaryParams],
batch: chex.Array,
mean_estimate: Optional[chex.ArrayTree],
mean_opt_state: Optional[chex.ArrayTree],
rng: chex.PRNGKey,
mask: chex.Array,
sliced_identity: chex.Array,
) -> Tuple[chex.ArrayTree, chex.ArrayTree, eg_utils.AuxiliaryParams,
eg_utils.AuxiliaryParams, Optional[chex.ArrayTree],
Optional[chex.ArrayTree],]:
"""Calculates the new vectors, applies update and then renormalize."""
# Generate activations from the batch of data.
data = self._net_activations(batch, rng)
# Calculate the gradient and the new auxiliary variable values.
gradient, new_aux = self._gradient_function(
local_eigenvectors=local_eigenvectors,
sharded_data=data,
auxiliary_variables=auxiliary_variables,
mask=mask,
sliced_identity=sliced_identity,
mean_estimate=mean_estimate,
epsilon=self._epsilon,
maximize=self._maximize
)
# Update and normalize the eigenvectors variables.
update, new_opt_state = self._optimizer.update(
# (TODO ccharlie) implement __neg__ for this object when overhauling it
jax.tree_map(lambda x: -x, gradient),
opt_state,
)
new_eigenvectors = optax.apply_updates(local_eigenvectors, update)
new_eigenvectors = eg_utils.normalize_eigenvectors(new_eigenvectors)
# Update the auxiliary as well. In this case we're minimising the
# squared error between the new target and the old.
auxiliary_error = jax.tree_map(
lambda x, y: x - y,
auxiliary_variables,
new_aux,
)
aux_update, new_aux_opt_state = self._aux_optimizer.update(
auxiliary_error,
aux_opt_state,
)
new_aux_value = optax.apply_updates(auxiliary_variables, aux_update)
if self._track_mean:
# The mean also needs to be estimated -- since we're looking at the
# covariances we need the data to be centered.
if self._num_samples == 1:
data_tuple = (data,)
else:
data_tuple = data
minibatch_mean = lambda x: jnp.mean(x, axis=0)
ind_batch_mean = lambda *x: sum(x) / len(x) # independent batches
# average over independent sample dim, minibatch dim, device dim
batch_mean_estimate = jax.lax.pmean(
jax.tree_util.tree_map(minibatch_mean,
jax.tree_util.tree_map(ind_batch_mean,
*data_tuple)),
axis_name='devices',
)
mean_error = jax.tree_map(
lambda x, y: x - y,
mean_estimate,
batch_mean_estimate,
)
mean_update, new_mean_opt_state = self._mean_opt.update(
mean_error,
mean_opt_state,
)
new_mean_estimate = optax.apply_updates(mean_estimate, mean_update)
else:
new_mean_opt_state = None
new_mean_estimate = None
return ( # pytype: disable=signature-mismatch # jax-ndarray
new_eigenvectors,
new_opt_state,
new_aux_value,
new_aux_opt_state,
new_mean_estimate,
new_mean_opt_state,
)
def get_eigenvectors(self) -> chex.ArrayTree:
"""Returns the current eigenvectors as jax array."""
return self._eigenvectors
def step( # pytype: disable=signature-mismatch # jax-ndarray
self,
global_step: int,
rng: chex.Array,
**unused_kwargs,
) -> Dict[str, chex.Array]:
"""Calls the update function on the eigen vectors and aux variables."""
batch = next(self._data_generator)
(
self._eigenvectors,
self._opt_state,
self._auxiliary_variables,
self._aux_opt_state,
self._mean_estimate,
self._mean_opt_state,
) = self._update(
self._eigenvectors,
self._opt_state,
self._auxiliary_variables,
self._aux_opt_state,
batch,
self._mean_estimate,
self._mean_opt_state,
rng,
)
return {}
@functools.partial(
jax.pmap,
in_axes=0,
out_axes=0,
axis_name='devices',
static_broadcasted_argnums=0,
)
def _eval_eigenvalues(
self,
local_eigenvectors: chex.ArrayTree,
batch: chex.Array,
mean_estimate: Optional[chex.ArrayTree],
rng: chex.PRNGKey, # pytype: disable=signature-mismatch # jax-ndarray
) -> Tuple[chex.Array, chex.Array]:
"""pmaps the cosine similarity function."""
data = self._net_activations(batch, rng)
return self._rayleigh_quotient_function(
local_eigenvectors,
data,
mean_estimate,
self._epsilon,
self._maximize,
)
def evaluate( # pytype: disable=signature-mismatch # jax-ndarray
self,
global_step: int,
rng: chex.Array,
**unused_kwargs,
) -> Dict[str, chex.Array]:
"""Calculate the eigenvalues for each eigenvector."""
numerator, denominator = 0, 0
self._data_generator = self.build_dataset(self.data_config)
for _ in range(self._eval_batches):
batch = next(self._data_generator)
new_numerator, new_denominator = self._eval_eigenvalues(
self._eigenvectors, batch, self._mean_estimate, rng)
numerator += eg_utils.get_first(new_numerator)
denominator += eg_utils.get_first(new_denominator)
eigenvalues = numerator / denominator
return eg_utils.per_vector_metric_log('eigenvalue', eigenvalues) # pytype: disable=bad-return-type # numpy-scalars
| eigengame-main | eigengame/eg_experiment.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| eigengame-main | eigengame/__init__.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Different Eigengame gradients and their associated helpers.
einsum legend:
...: the massive data dimension over which we're running CCA/ICA/whatever
other method based on GEVP we're doing. This data may not be contiguious, and
may consist of arbitrary shapes in a pytree. Hence we use ellipses to denote
it here.
l: local eigenvector index -- the number of eigenvectors per machine.
k: global eigenvector index -- number of eigenvectors over all machines.
b: batch dimension, batch size per machine.
"""
from typing import Optional, Tuple
import chex
from eigengame import eg_gradients
from eigengame import eg_utils
import jax
import jax.numpy as jnp
import scipy
def cosine_similarity(
eigenvectors: chex.ArrayTree,
target_vectors: chex.ArrayTree,
) -> chex.Array:
"""Calculate the cosine similarity on each machine then replicate to all."""
normed_targets = eg_utils.normalize_eigenvectors(target_vectors)
similarity = eg_utils.tree_einsum(
'l..., l... -> l',
normed_targets,
eigenvectors,
reduce_f=lambda x, y: x + y,
)
return jax.lax.all_gather(
similarity,
axis_name='devices',
axis=0,
tiled=True,
)
def unbiased_cca_rayleigh_quotient_components(
local_eigenvectors: eg_utils.SplitVector,
sharded_data: Tuple[eg_utils.SplitVector, eg_utils.SplitVector],
mean_estimate: Optional[eg_utils.SplitVector] = None,
epsilon: Optional[float] = None,
maximize: bool = True,
) -> Tuple[chex.Array, chex.Array]:
"""Use the Rayleigh quotient to estimate the generalized eigenvalues.
Calculates the numerator (vTAv) and denominator (vTBv) separately so we can
loop over the entire dataset then divide. The calculation is distributed
across machines.
Args:
local_eigenvectors: eg_utils.SplitVector holding the generalized
eigenvectors sharded across machines. Array Tree with leaves of shape
[l, ...] denoting v_l.
sharded_data: eg_utils.SplitVector holding data from our two data sources.
Array Tree with leaves of shape [b, ...].
mean_estimate: eg_utils.SplitVector containing an estimate of the mean of
the input data if it is not centered by default. This is used to calculate
the covariances. Array tree with leaves of shape [...].
epsilon: Add an optional isotropic term for the variance matrix to make it
positive definite. In this case, we're solving for: Av =
lambda*(B+epsilon*I)v.
maximize: Whether to search for top-k eigenvectors of Av = lambda*Bv (True)
or the top-k of (-A)v = lambda*Bv (False). Due to the symmetry of the CCA
problem, setting to True or False should not change the performance.
Returns:
Tuple of two arrays of shape [k] denoting the numerator and denominator of
the rayleigh quotient. These can be summed and then divided outside of this
function in order to estimate the eigenvalues.
"""
all_eigenvectors = jax.lax.all_gather(
eg_utils.normalize_eigenvectors(local_eigenvectors),
axis_name='devices',
axis=0,
tiled=True,
)
data_vector_x = []
data_vector_y = []
for sample in sharded_data:
if mean_estimate is not None:
sample = jax.tree_map(lambda x, y: x - y, sample, mean_estimate)
data_vector = eg_utils.tree_einsum(
'k..., b... -> kb',
all_eigenvectors,
sample,
)
data_vector_x.append(
jax.tree_util.tree_reduce(
lambda x, y: x + y,
data_vector.x, # pytype: disable=attribute-error # numpy-scalars
))
data_vector_y.append(
jax.tree_util.tree_reduce(
lambda x, y: x + y,
data_vector.y, # pytype: disable=attribute-error # numpy-scalars
))
sharded_numerator = 2 * jnp.einsum(
'kb, kb -> k',
data_vector_x[0],
data_vector_y[0],
) # <v,Av>
sharded_denominator = jnp.einsum(
'kb, kb -> k',
data_vector_x[1],
data_vector_x[1],
) + jnp.einsum(
'kb, kb -> k',
data_vector_y[1],
data_vector_y[1],
) # <v,Bv>
numerator = jax.lax.pmean(sharded_numerator, axis_name='devices')
denominator = jax.lax.pmean(sharded_denominator, axis_name='devices')
per_machine_batch_size = data_vector_x[0].shape[1]
numerator /= per_machine_batch_size
denominator /= per_machine_batch_size
if not maximize:
numerator = -numerator
if epsilon is not None:
# We can just add epsilon since we normalized the eigenvectors.
denominator += epsilon
return (
numerator,
denominator,
) # vAv and v(B + epsilonI)v
def biased_cca_rayleigh_quotient_components(
local_eigenvectors: eg_utils.SplitVector,
sharded_data: eg_utils.SplitVector,
mean_estimate: Optional[eg_utils.SplitVector] = None,
epsilon: Optional[float] = None,
maximize: bool = True,
) -> Tuple[chex.Array, chex.Array]:
"""Use the Rayleigh quotient to estimate the cca generalized eigenvalues.
Calculates the numerator (vTAv) and denominator (vTBv) separately so we can
loop over the entire dataset then divide. The calculation is distributed
across machines.
Args:
local_eigenvectors: eg_utils.SplitVector holding the generalized
eigenvectors sharded across machines. Array Tree with leaves of shape
[l, ...] denoting v_l.
sharded_data: eg_utils.SplitVector holding data from our two data sources.
Array Tree with leaves of shape [b, ...].
mean_estimate: eg_utils.SplitVector containing an estimate of the mean of
the input data if it is not centered by default. This is used to calculate
the covariances. Array tree with leaves of shape [...].
epsilon: Add an optional isotropic term for the variance matrix to make it
positive definite. In this case, we're solving for: Av =
lambda*(B+epsilon*I)v.
maximize: Whether to search for top-k eigenvectors of Av = lambda*Bv (True)
or the top-k of (-A)v = lambda*Bv (False). Due to the symmetry of the CCA
problem, setting to True or False should not change the performance.
Returns:
Tuple of two arrays of shape [k] denoting the numerator and denominator of
the rayleigh quotient. These can be summed and then divided outside of this
function in order to estimate the eigenvalues.
"""
all_eigenvectors = jax.lax.all_gather(
eg_utils.normalize_eigenvectors(local_eigenvectors),
axis_name='devices',
axis=0,
tiled=True,
)
if mean_estimate is not None:
sharded_data = jax.tree_map(
lambda x, y: x - y,
sharded_data,
mean_estimate,
)
data_vector = eg_utils.tree_einsum(
'k..., b... -> kb',
all_eigenvectors,
sharded_data,
)
data_vector_x = jax.tree_util.tree_reduce(lambda x, y: x + y, data_vector.x) # pytype: disable=attribute-error # numpy-scalars
data_vector_y = jax.tree_util.tree_reduce(lambda x, y: x + y, data_vector.y) # pytype: disable=attribute-error # numpy-scalars
sharded_numerator = 2 * jnp.einsum(
'kb, kb -> k',
data_vector_x,
data_vector_y,
) # <v,Av>
sharded_denominator = jnp.einsum(
'kb, kb -> k',
data_vector_x,
data_vector_x,
) + jnp.einsum(
'kb, kb -> k',
data_vector_y,
data_vector_y,
) # <v,Bv>
numerator = jax.lax.pmean(sharded_numerator, axis_name='devices')
denominator = jax.lax.pmean(sharded_denominator, axis_name='devices')
per_machine_batch_size = data_vector_x.shape[1]
numerator /= per_machine_batch_size
denominator /= per_machine_batch_size
if not maximize:
numerator = -numerator
if epsilon is not None:
# We can just add epsilon since we normalized the eigenvectors.
denominator += epsilon
return (
numerator,
denominator,
) # vAv and v(B + epsilonI)v
def unbiased_pls_rayleigh_quotient_components(
local_eigenvectors: eg_utils.SplitVector,
sharded_data: Tuple[eg_utils.SplitVector, eg_utils.SplitVector],
mean_estimate: Optional[eg_utils.SplitVector] = None,
epsilon: Optional[float] = None,
maximize: bool = True,
) -> Tuple[chex.Array, chex.Array]:
"""Use the Rayleigh quotient to estimate the generalized eigenvalues.
Calculates the numerator (vTAv) and denominator (vTBv) separately so we can
loop over the entire dataset then divide. The calculation is distributed
across machines.
Args:
local_eigenvectors: eg_utils.SplitVector holding the generalized
eigenvectors sharded across machines. Array Tree with leaves of shape
[l, ...] denoting v_l.
sharded_data: eg_utils.SplitVector holding data from our two data sources.
Array Tree with leaves of shape [b, ...].
mean_estimate: eg_utils.SplitVector containing an estimate of the mean of
the input data if it is not centered by default. This is used to calculate
the covariances. Array tree with leaves of shape [...].
epsilon: Add an optional isotropic term for the variance matrix to make it
positive definite. In this case, we're solving for: Av =
lambda*(B+epsilon*I)v.
maximize: unused- Solving Av = lambda * Bv is the only sensible approach for
PLS. We do not foresee a use case for (-Av) = lambda * Bv. Please contact
authors if you have a need for it.
Returns:
Tuple of two arrays of shape [k] denoting the numerator and denominator of
the rayleigh quotient. These can be summed and then divided outside of this
function in order to estimate the eigenvalues.
"""
del maximize
all_eigenvectors = jax.lax.all_gather(
eg_utils.normalize_eigenvectors(local_eigenvectors),
axis_name='devices',
axis=0,
tiled=True,
)
data_vector_x = []
data_vector_y = []
for sample in sharded_data:
if mean_estimate is not None:
sample = jax.tree_map(lambda x, y: x - y, sample, mean_estimate)
data_vector = eg_utils.tree_einsum(
'k..., b... -> kb',
all_eigenvectors,
sample,
)
data_vector_x.append(
jax.tree_util.tree_reduce(
lambda x, y: x + y,
data_vector.x, # pytype: disable=attribute-error # numpy-scalars
))
data_vector_y.append(
jax.tree_util.tree_reduce(
lambda x, y: x + y,
data_vector.y, # pytype: disable=attribute-error # numpy-scalars
))
sharded_numerator = 2 * jnp.einsum(
'kb, kb -> k',
data_vector_x[0],
data_vector_y[0],
) # <v,Av>
sharded_denominator = jnp.einsum(
'kb, kb -> k',
data_vector_x[1],
data_vector_x[1],
) + jnp.einsum(
'k..., k... -> k',
all_eigenvectors.y,
all_eigenvectors.y,
) # <v,Bv>
numerator = jax.lax.pmean(sharded_numerator, axis_name='devices')
denominator = jax.lax.pmean(sharded_denominator, axis_name='devices')
per_machine_batch_size = data_vector_x[0].shape[1]
numerator /= per_machine_batch_size
denominator /= per_machine_batch_size
if epsilon is not None:
# We can just add epsilon since we normalized the eigenvectors.
denominator += epsilon
return (
numerator,
denominator,
) # vAv and v(B + epsilonI)v
def unbiased_ica_rayleigh_quotient_components(
local_eigenvectors: chex.ArrayTree,
sharded_data: Tuple[chex.ArrayTree, chex.ArrayTree, chex.ArrayTree,],
mean_estimate: Optional[chex.ArrayTree] = None,
epsilon: Optional[float] = None,
maximize: bool = True
) -> Tuple[chex.Array, chex.Array]:
"""Use the Rayleigh quotient to estimate the ica generalized eigenvalues.
Calculates the numerator (vTAv) and denominator (vTBv) separately so we can
loop over the entire dataset then divide. The calculation is distributed
across machines.
Args:
local_eigenvectors: eg_utils.SplitVector holding the generalized
eigenvectors sharded across machines. Array Tree with leaves of shape
[l, ...] denoting v_l.
sharded_data: Tuple of eg_utils.SplitVectors holding independent batches of
data from the two data sources. Array Tree with leaves of shape [b, ...].
mean_estimate: eg_utils.SplitVector containing an estimate of the mean of
the input data if it is not centered by default. This is used to calculate
the covariances. Array tree with leaves of shape [...].
epsilon: Add an optional isotropic term for the variance matrix to make it
positive definite. In this case, we're solving for: Av =
lambda*(B+epsilon*I)v.
maximize: Whether to search for top-k eigenvectors of Av = lambda*Bv (True)
or the top-k of (-A)v = lambda*Bv (False).
Returns:
Tuple of two arrays of shape [k] denoting the numerator and denominator of
the rayleigh quotient. These can be summed and then divided outside of this
function in order to estimate the eigenvalues.
"""
all_eigenvectors = jax.lax.all_gather(
eg_utils.normalize_eigenvectors(local_eigenvectors),
axis_name='devices',
axis=0,
tiled=True,
)
if mean_estimate is not None:
sharded_data = tuple(
jax.tree_map(lambda x, y: x - y, sample, mean_estimate)
for sample in sharded_data)
(
kurtosis,
independent_covariance
) = eg_gradients.unbiased_ica_matrix_products(
all_eigenvectors,
sharded_data,
)
numerator = eg_utils.tree_einsum(
'k..., k... -> k',
kurtosis,
all_eigenvectors,
reduce_f=lambda x, y: x + y,
)
denominator = eg_utils.tree_einsum(
'k..., k... -> k',
independent_covariance,
all_eigenvectors,
reduce_f=lambda x, y: x + y,
)
if not maximize:
numerator = -numerator # pytype: disable=unsupported-operands # numpy-scalars
if epsilon is not None:
# We can just add epsilon since we normalized the eigenvectors.
denominator += epsilon
return numerator, denominator
def pca_generalised_eigengame_rayleigh_quotient_components(
local_eigenvectors: chex.ArrayTree,
sharded_data: chex.ArrayTree,
mean_estimate: Optional[eg_utils.SplitVector] = None,
epsilon: Optional[float] = None,
maximize: bool = True
) -> Tuple[chex.Array, chex.Array]:
"""Use the Rayleigh quotient to estimate the pca generalized eigenvalues.
Calculates the numerator (vTAv) and returns I separately so we can
loop over the entire dataset then divide. The calculation is distributed
across machines.
Args:
local_eigenvectors: eg_utils.SplitVector holding the generalized
eigenvectors sharded across machines. Array Tree with leaves of shape
[l, ...] denoting v_l.
sharded_data: Tuple of eg_utils.SplitVectors holding independent batches of
data from the two data sources. Array Tree with leaves of shape [b, ...].
mean_estimate: eg_utils.SplitVector containing an estimate of the mean of
the input data if it is not centered by default. This is used to calculate
the covariances. Array tree with leaves of shape [...].
epsilon: Add an optional isotropic term for the variance matrix to make it
positive definite. In this case, we're solving for: Av =
lambda*(B+epsilon*I)v.
maximize: unused- Solving Av = lambda * v is the only sensible approach for
PCA. We do not foresee a use case for (-Av) = lambda * v. Please contact
authors if you have a need for it.
Returns:
Tuple of two arrays of shape [k] denoting the numerator and denominator of
the rayleigh quotient. These can be summed and then divided outside of this
function in order to estimate the eigenvalues.
"""
del maximize
all_eigenvectors = jax.lax.all_gather(
eg_utils.normalize_eigenvectors(local_eigenvectors),
axis_name='devices',
axis=0,
tiled=True,
)
if mean_estimate is not None:
sharded_data = jax.tree_map(lambda x, y: x - y, sharded_data, mean_estimate)
data_vector_product = eg_utils.tree_einsum(
'b..., k... -> bk',
sharded_data,
all_eigenvectors,
reduce_f=lambda x, y: x + y,
)
local_numerator = jnp.einsum(
'bk, bk -> k',
data_vector_product,
data_vector_product,
)
per_machine_batch_size, total_eigenvector_count = data_vector_product.shape # pytype: disable=attribute-error # numpy-scalars
numerator = jax.lax.pmean(local_numerator, axis_name='devices')
numerator = numerator / per_machine_batch_size
denominator = jnp.ones(total_eigenvector_count)
if epsilon is not None:
# We can just add epsilon since we normalized the eigenvectors.
denominator += epsilon
return numerator, denominator
def matrix_inverse_rayleigh_quotient_components(
local_eigenvectors: chex.ArrayTree,
sharded_data: chex.ArrayTree,
mean_estimate: Optional[eg_utils.SplitVector] = None,
epsilon: Optional[float] = None,
maximize: bool = True,
) -> Tuple[chex.Array, chex.Array]:
"""Use the Rayleigh quotient to estimate the inv generalized eigenvalues.
Calculates the numerator (vTAv) and returns I separately so we can
loop over the entire dataset then divide. The calculation is distributed
across machines.
Args:
local_eigenvectors: eg_utils.SplitVector holding the generalized
eigenvectors sharded across machines. Array Tree with leaves of shape
[l, ...] denoting v_l.
sharded_data: Tuple of eg_utils.SplitVectors holding independent batches of
data from the two data sources. Array Tree with leaves of shape [b, ...].
mean_estimate: eg_utils.SplitVector containing an estimate of the mean of
the input data if it is not centered by default. This is used to calculate
the covariances. Array tree with leaves of shape [...].
epsilon: Add an optional isotropic term for the variance matrix to make it
positive definite. In this case, we're solving for: Av =
lambda*(B+epsilon*I)v.
maximize: unused- Solving Iv = lambda * Bv is the only sensible approach for
inverting B. We do not foresee a use case for (-Iv) = lambda * Bv. Please
contact authors if you have a need for it.
Returns:
Tuple of two arrays of shape [k] denoting the numerator and denominator of
the rayleigh quotient. These can be summed and then divided outside of this
function in order to estimate the eigenvalues.
"""
del maximize
all_eigenvectors = jax.lax.all_gather(
eg_utils.normalize_eigenvectors(local_eigenvectors),
axis_name='devices',
axis=0,
tiled=True,
)
if mean_estimate is not None:
sharded_data = jax.tree_map(lambda x, y: x - y, sharded_data, mean_estimate)
data_vector_product = eg_utils.tree_einsum(
'b..., k... -> bk',
sharded_data,
all_eigenvectors,
reduce_f=lambda x, y: x + y,
)
local_denominator = jnp.einsum(
'bk, bk -> k',
data_vector_product,
data_vector_product,
)
per_machine_batch_size, total_eigenvector_count = data_vector_product.shape # pytype: disable=attribute-error # numpy-scalars
denominator = jax.lax.pmean(local_denominator, axis_name='devices')
denominator = denominator / per_machine_batch_size
if epsilon is not None:
# We can just add epsilon since we normalized the eigenvectors.
denominator += epsilon
return jnp.ones(total_eigenvector_count), denominator
def calculate_eigenvalues(
local_eigenvectors: chex.ArrayTree,
sharded_data: chex.ArrayTree,
) -> chex.Array:
"""Calculates (vX)TXv, which gives the estimate of the eigenvalues.
We do this in a distributed fashion by first calculating Xv and then
concatenating it across machines, resulting in a larger effective batch size.
Args:
local_eigenvectors: Array Tree holding the generalized eigenvectors sharded
across machines. Array Tree with leaves of shape [l, ...] denoting v_l.
sharded_data: Array Tree holding data from our two data sources. Array Tree
with leaves of shape [b, ...].
Returns:
Duplicated copies of all the eigenvalues on all devices, Shape of:
[total_eigenvector_count]
"""
all_eigenvectors = jax.lax.all_gather(
local_eigenvectors,
axis_name='devices',
axis=0,
tiled=True,
)
# Calculate Xv for all v. Unlike the gradient, we can generate the eigenvalues
# with just this.
sharded_data_vector = eg_utils.tree_einsum(
'b..., k... -> kb',
sharded_data,
all_eigenvectors,
reduce_f=lambda x, y: x + y,
)
full_data_vector = jax.lax.all_gather(
sharded_data_vector,
axis_name='devices',
axis=1,
tiled=True,
)
return jnp.einsum('kb,kb->k', full_data_vector, full_data_vector)
def subspace_error(
approx_eigenvectors: chex.Array,
true_eigenvectors: chex.Array,
matrix_b: Optional[chex.Array] = None,
) -> float:
"""Compute subspace error between approximate solution and ground truth.
Given the top-k ground truth eigenvectors W* and approximations W to the EVP,
subspace error can be computed as:
1 - 1 / k * trace(W* pinv(W*) W pinv(W)).
Where:
W* = top-k eigenvectors of B^{-1/2} A B^{-1/2}
W = B^{1/2} V.
Let v be a solution to the GEVP, Av = lambda' Bv
Then w = B^{1/2} v is a solution to the normalized EVP,
B^{-1/2} A B^{-1/2} w = lambda w, with eigenvalue lambda = lambda'.
Leveraging this equivalence, we can measure subspace error of the GEVP
solution by first mapping it to the normalized case and computing subspace
error there.
Args:
approx_eigenvectors: Array of shape (k, d) approximate top-k solution to Av
= lambda Bv. This function assumes that the eigenvectors are flattened
into a single dimension.
true_eigenvectors: Array of shape (k, d) exact top-k solution to Av = lambda
Bv.
matrix_b: Array of shape (d, d) of the matrix B in Av = lambda Bv. Default
assumes B = I (i.e. simple eigenvalue problem instead of general)
Returns:
float, subspace error > 0
"""
k = approx_eigenvectors.shape[0]
if matrix_b is not None:
matrix_b_sqrt = scipy.linalg.sqrtm(matrix_b)
# Transform into a space where the general eigenvectors are orthogonal
# in the general eigenvalue problem
transformed_approx_eigenvectors = jnp.einsum(
'kD,DE->kE',
approx_eigenvectors,
matrix_b_sqrt,
)
transformed_true_eigenvectors = jnp.einsum(
'kD,DE->kE',
true_eigenvectors,
matrix_b_sqrt,
)
else:
# Keep it as is in simple eigenvector case.
transformed_approx_eigenvectors = approx_eigenvectors
transformed_true_eigenvectors = true_eigenvectors
# Normalize all the vectors
normalized_approx_eigenvectors = eg_utils.normalize_eigenvectors(
transformed_approx_eigenvectors,)
normalized_true_eigenvectors = eg_utils.normalize_eigenvectors(
transformed_true_eigenvectors,)
# Calculate the Penrose inverses.
approx_eigenvector_pinv = jnp.linalg.pinv(normalized_approx_eigenvectors)
true_eigenvectors_pinv = jnp.linalg.pinv(normalized_true_eigenvectors)
# Apinv(A) creates a projection into the space spanned by A. Therefore if row
# vectors of B spans a similar space, it will preserve it.
approximate_projector = jnp.einsum(
'kD,Ek->DE',
normalized_approx_eigenvectors,
approx_eigenvector_pinv,
)
true_projector = jnp.einsum(
'kD,Ek->DE',
normalized_true_eigenvectors,
true_eigenvectors_pinv,
)
# The trace will be shrunk if the projection removes components.
# Alternatively, this can be interpreted as the dot product of the matrices
# representing the projection maps dual space.
subspace_similarity = jnp.einsum(
'KD, KD',
approximate_projector,
true_projector,
)
# normalize and subtract from 1 so value > 0
return 1 - (subspace_similarity / k)
| eigengame-main | eigengame/eg_objectives.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Different Eigengame gradients and their associated helpers.
Einsum legend:
...: the massive data dimension of the eigenvector which we're trying to do
PCA/CCA/etc. on. The input data may not be contiguious, and may consist of
multiple shapes in a pytree (e.g. images, activations of multiple layers of a
neural net). Hence we use ellipses to denote it.
l: local eigenvector index -- the number of eigenvectors per machine.
k: global eigenvector index -- number of eigenvectors over all machines.
b: batch dimension, batch size per machine.
"""
from typing import Tuple, Optional
import chex
from eigengame import eg_utils
import jax
import jax.numpy as jnp
import numpy as np
SplitVector = eg_utils.SplitVector
def pca_unloaded_gradients(
local_eigenvectors: chex.ArrayTree,
sharded_data: chex.ArrayTree,
mask: chex.Array,
sliced_identity: chex.Array,
) -> chex.ArrayTree:
"""Calculates the gradients for each of the eigenvectors for EG unloaded.
Calculates the gradients of eigengame unloaded (see Algorithm 1. of
https://arxiv.org/pdf/2102.04152.pdf)
This is done in a distributed manner. Each TPU is responsible for updating
the whole of a subset of eigenvectors, and get a different batch of the data.
The training data is applied and then aggregated to increase the effective
batch size.
Args:
local_eigenvectors: eigenvectors sharded across machines in
a ShardedDeviceArray. ArrayTree with leaves of shape:
[eigenvectors_per_device, ...]
sharded_data: Training data sharded across machines. ArrayTree with leaves
of shape: [batch_size_per_device, ...]
mask: Mask with 1s below the diagonal and then sharded across devices, used
to calculate the penalty. Shape of: [eigenvectors_per_device,
total_eigenvector_count]
sliced_identity: Sharded copy of the identity matrix used to calculate the
reward. Shape of: [eigenvectors_per_device, total_eigenvector_count]
Returns:
Gradients for the local eigenvectors. ArrayTree with leaves of shape:
[eigenvectors_per_device, ...] on each device.
"""
# Collect the matrices from all the other machines.
all_eigenvectors = jax.lax.all_gather(
local_eigenvectors,
axis_name='devices',
axis=0,
tiled=True,
)
# Calculate X^TXv/b for all v. This and v are all you need to construct the
# gradient. This is done on each machine with a different batch.
data_vector_product = eg_utils.tree_einsum(
'b..., k... -> bk',
sharded_data,
all_eigenvectors,
reduce_f=lambda x, y: x + y,
)
# divide by the batch size
data_vector_product /= data_vector_product.shape[0] # pytype: disable=attribute-error # numpy-scalars
sharded_gram_vector = eg_utils.tree_einsum_broadcast(
'b..., bk -> k...',
sharded_data,
data_vector_product,
)
# Aggregate the gram matrix products across machines.
gram_vector = jax.lax.pmean(
sharded_gram_vector,
axis_name='devices',
)
# Calculate <Xv_i, Xv_j> for all vectors on each machine.
scale = eg_utils.tree_einsum(
'l..., k... -> lk',
local_eigenvectors,
gram_vector,
reduce_f=lambda x, y: x + y,
)
# The weights is 1 below diagonals
penalty = eg_utils.tree_einsum_broadcast(
'k..., lk, lk -> l...',
all_eigenvectors,
scale,
mask,
)
reward = eg_utils.get_local_slice(sliced_identity, gram_vector) # XtXv_i
return jax.tree_map(lambda x, y: x - y, reward, penalty)
def create_sharded_mask(eigenvector_count: int) -> chex.ArraySharded:
"""Defines a mask of 1s under the diagonal and shards it."""
mask = np.ones((eigenvector_count, eigenvector_count))
r, c = np.triu_indices(eigenvector_count)
mask[..., r, c] = 0
start_index = jax.process_index() * eigenvector_count // jax.process_count()
end_index = (jax.process_index()+1) * eigenvector_count // jax.process_count()
mask = mask[start_index:end_index]
mask = mask.reshape((
jax.local_device_count(),
eigenvector_count // jax.device_count(),
eigenvector_count,
))
return jax.device_put_sharded(list(mask), jax.local_devices())
def create_sharded_identity(eigenvector_count: int) -> chex.ArraySharded:
"""Create identity matrix which is then split across devices."""
identity = np.eye(eigenvector_count)
start_index = jax.process_index() * eigenvector_count // jax.process_count()
end_index = (jax.process_index() +
1) * eigenvector_count // jax.process_count()
identity = identity[start_index:end_index]
identity = identity.reshape((
jax.local_device_count(),
eigenvector_count // jax.device_count(),
eigenvector_count,
))
return jax.device_put_sharded(list(identity), jax.local_devices())
def _generalized_eg_matrix_inner_products(
local_eigenvectors: chex.ArrayTree,
all_eigenvectors: chex.ArrayTree,
b_vector_product: chex.ArrayTree,
a_vector_product: chex.ArrayTree,
aux_b_vector_product: chex.ArrayTree,
) -> Tuple[chex.Array, chex.Array, chex.Array, chex.Array,]:
"""Compute various inner product quantities used in the gradient.
In particular, the loss requires the various forms of inner product
<v_i, Av_j> in order to function. This function calculates all of them to keep
the gradient function less cluttered.
Args:
local_eigenvectors: ArrayTree with local copies of generalised eigen
vectors with leaves of shape [l, ...] denoting v_l.
all_eigenvectors: ArrayTree with all generalised eigen vectors with leaves
of shape [k, ...] denoting v_k
b_vector_product: ArrayTree with all B matrix eigenvector products with
leaves of shape [k, ...] denoting Bv_k
a_vector_product: ArrayTree with all A matrix eigenvector products with
leaves of shape [k, ...] denoting Av_k
aux_b_vector_product: ArrayTree with all B matrix eigenvector products from
the axuiliary variable with leaves of shape [k, ...] denoting Bv_k
Returns:
Tuple of arrays containing:
local_b_inner_product: <v_l, Bv_k>
local_a_inner_product: <v_l, Av_k>
b_inner_product_diag: <v_k, Bv_k>
a_inner_product_diag: : <v_k, Bv_k>
"""
# Evaluate the various forms of the inner products used in the gradient
local_aux_b_inner_product = eg_utils.tree_einsum(
'l... , k... -> lk',
local_eigenvectors,
aux_b_vector_product,
reduce_f=lambda x, y: x + y,
) # v_l B v_k, used in the penalty term
local_a_inner_product = eg_utils.tree_einsum(
'l..., k... -> lk',
local_eigenvectors,
a_vector_product,
reduce_f=lambda x, y: x + y,
) # v_l A v_k, used in the penalty term
b_inner_product_diag = eg_utils.tree_einsum(
'k..., k... -> k',
all_eigenvectors,
b_vector_product,
reduce_f=lambda x, y: x + y,
) # v_k B v_k, used in the penalty and reward terms
a_inner_product_diag = eg_utils.tree_einsum(
'k..., k... -> k',
all_eigenvectors,
a_vector_product,
reduce_f=lambda x, y: x + y,
) # v_k A v_k, used in the reward term
return (
local_aux_b_inner_product,
local_a_inner_product,
b_inner_product_diag,
a_inner_product_diag,
)
def _generalized_eg_gradient_reward(
local_b_inner_product_diag: chex.Array,
local_a_inner_product_diag: chex.Array,
local_b_vector_product: chex.ArrayTree,
local_a_vector_product: chex.ArrayTree,
) -> chex.ArrayTree:
"""Evaluates the reward term for the eigengame gradient for local vectors.
This attempts to maximise the rayleigh quotient for each eigenvector, and
by itself would find the eigenvector with the largest generalized eigenvalue.
The output corresponds to the equation for all l:
<v_l,Bv_l>Av_l - <v_l,Av_l>Bv_l
Args:
local_b_inner_product_diag: Array of shape [l] corresponding to <v_l,Bv_l>.
local_a_inner_product_diag: Array of shape [l] corresponding to <v_l,Av_l>.
local_b_vector_product: ArrayTree with local eigen vectors products with
B with leaves of shape [l, ...] denoting Bv_l.
local_a_vector_product: ArrayTree with local eigen vectors products with
A with leaves of shape [l, ...] denoting Av_l.
Returns:
The reward gradient for the eigenvectors living on the current machine.
Array tree with leaves of shape [l, ...]
"""
# Evaluates <v_l,Bv_l>Av_l
scaled_a_vectors = eg_utils.tree_einsum_broadcast(
'l..., l -> l...',
local_a_vector_product,
local_b_inner_product_diag,
)
# Evaluates <v_l,Av_l>Bv_l
scaled_b_vectors = eg_utils.tree_einsum_broadcast(
'l..., l -> l...',
local_b_vector_product,
local_a_inner_product_diag,
)
return jax.tree_map(
lambda x, y: x - y,
scaled_a_vectors,
scaled_b_vectors,
)
def _generalized_eg_gradient_penalty(
local_b_inner_product: chex.Array,
local_a_inner_product: chex.Array,
local_b_inner_product_diag: chex.Array,
b_inner_product_diag: chex.Array,
local_b_vector_product: chex.ArrayTree,
b_vector_product: chex.ArrayTree,
mask: chex.Array,
b_diag_min: float = 1e-6) -> chex.ArrayTree:
r"""Evaluates the penalty term for the eigengame gradient for local vectors.
This attempts to force each eigenvalue to be B orthogonal (i.e. <v,Bw> = 0) to
all its parents. Combining this with the reward terms means each vector
learns to maximise the eigenvalue whilst staying orthogonal, giving us the top
k generalized eigenvectors.
The output corresponds to the equation for all l on the local machine:
\\sum_{k<l}(<v_l,Bv_l>Bv_k - <v_l,Bv_k>Bv_l) <v_l,Av_k>/<v_k,Bv_k>
Note: If the gradient returned must be unbiased, then any estimated quantities
in the formula below must be independent and unbiased (.e.g, the numerator of
the first term (<v_l,Av_k>/<v_k,Bv_k>)<v_l,Bv_l>Bv_k must use independent,
unbiase estimates for each element of this product, otherwise the estimates
are correlated and will bias the computation). Furthermore, any terms in the
denominator must be deterministic (i.e., <v_k,Bv_k> must be computed without
using sample estimates which can be accomplished by introducing an auxiliary
learning variable).
Args:
local_b_inner_product: Array of shape [l,k] denoting <v_l, Bv_k>
local_a_inner_product: Array of shape [l,k] denoting <v_l, Av_k>
local_b_inner_product_diag: Array of shape [l] denoting <v_l, Bv_l>
b_inner_product_diag: Array of shape [k] denoting <v_k, Bv_k>. Insert an
auxiliary variable here for in order to debias the receprocal.
local_b_vector_product: ArrayTree with local eigen vectors products with
B with leaves of shape [l, ...] denoting Bv_l.
b_vector_product: ArrayTree with all eigen vectors products with
B with leaves of shape [k, ...] denoting Bv_k.
mask: Slice of a k x k matrix which is 1's under the diagonals and 0's
everywhere else. This is used to denote the parents of each vector. Array
of shape [l, k].
b_diag_min: Minimum value for the b_inner_product_diag. This value is
divided, so we use this to ensure we don't get a division by zero.
Returns:
The penalty gradient for the eigenvectors living on the current machine.
"""
# Calculate <v_l,Av_k>/<v_k,Bv_k> with mask
scale = jnp.einsum(
'lk, lk, k -> lk',
mask,
local_a_inner_product,
1 / jnp.maximum(b_inner_product_diag, b_diag_min),
)
# Calculate scale * <v_l,Bv_l>Bv_k term
global_term = eg_utils.tree_einsum_broadcast(
'k..., lk, l -> l...',
b_vector_product,
scale,
local_b_inner_product_diag,
)
# Calculate scale * <v_l,Bv_k>Bv_l term
local_term = eg_utils.tree_einsum_broadcast(
'l..., lk, lk -> l...',
local_b_vector_product,
scale,
local_b_inner_product,
)
return jax.tree_map(lambda x, y: x - y, global_term, local_term)
def generalized_eigengame_gradients(
*,
local_eigenvectors: chex.ArrayTree,
all_eigenvectors: chex.ArrayTree,
a_vector_product: chex.ArrayTree,
b_vector_product: chex.ArrayTree,
auxiliary_variables: eg_utils.AuxiliaryParams,
mask: chex.Array,
sliced_identity: chex.Array,
epsilon: Optional[float] = None,
maximize: bool = True,
) -> Tuple[chex.ArrayTree, eg_utils.AuxiliaryParams,]:
"""Solves for Av = lambda Bv using eigengame in a data parallel manner.
Algorithm pseudocode can be found in Algorithm 1 of overleaf doc:
https://ol.deepmind.host/read/kxdfdtfbsdxc
For moderately sized models this is fine, but for really large models (
moderate number of eigenvectors of >1m params) the memory overhead might be
strained in the parallel case.
Args:
local_eigenvectors: ArrayTree with local eigen vectors with leaves
of shape [l, ...] denoting v_l.
all_eigenvectors: ArrayTree with all eigen vectors with leaves
of shape [k, ...] denoting v_k.
a_vector_product: ArrayTree with all eigen vectors products with
A with leaves of shape [k, ...] denoting Av_k.
b_vector_product: ArrayTree with all eigen vectors products with
B with leaves of shape [k, ...] denoting Bv_k.
auxiliary_variables: AuxiliaryParams object which holds all the variables
which we want to update separately in order to avoid bias.
mask: Mask with 1s below the diagonal and then sharded across devices, used
to calculate the penalty. Shape of: [eigenvectors_per_device,
total_eigenvector_count]
sliced_identity: Sharded copy of the identity matrix used to calculate the
reward. Shape of: [eigenvectors_per_device, total_eigenvector_count]
epsilon: Add an isotropic term to the B matrix to make it
positive semi-definite. In this case, we're solving for: Av =
lambda*(B+epsilon*I)v.
maximize: Whether to search for top-k eigenvectors of Av = lambda*Bv (True)
or the top-k of (-A)v = lambda*Bv (False).
Returns:
returns the gradients for the eigenvectors and a new entry to update
auxiliary variable estimates.
"""
if not maximize:
a_vector_product = -a_vector_product # pytype: disable=unsupported-operands # numpy-scalars
# Add a small epsilon to the b vector product to make it positive definite.
# This can help with convergence.
if epsilon is not None:
b_vector_product = jax.tree_map(
lambda x, y: x + epsilon * y,
b_vector_product,
all_eigenvectors,
)
# Calculate the various matrix inner products.
(
# Get <v_l ,Bv_k> and <v_l, Av_k>.
local_aux_b_inner_product,
local_a_inner_product,
# Get <v_k ,Bv_k> and <v_k, Av_k>.
b_inner_product_diag,
a_inner_product_diag,
) = _generalized_eg_matrix_inner_products(
local_eigenvectors,
all_eigenvectors,
b_vector_product,
a_vector_product,
auxiliary_variables.b_vector_product,
)
# Get the local slices of these quantities. Going from k rows to l rows.
(
local_b_vector_product,
local_a_vector_product,
local_b_inner_product_diag,
local_a_inner_product_diag,
) = eg_utils.get_local_slice(
sliced_identity,
(
b_vector_product,
a_vector_product,
b_inner_product_diag,
a_inner_product_diag,
),
)
# Calculate the reward
reward = _generalized_eg_gradient_reward(
local_b_inner_product_diag,
local_a_inner_product_diag,
local_b_vector_product,
local_a_vector_product,
)
# Calculate the penalty using the associated auxiliary variables.
penalty = _generalized_eg_gradient_penalty(
local_aux_b_inner_product,
local_a_inner_product,
local_b_inner_product_diag,
auxiliary_variables.b_inner_product_diag,
local_b_vector_product,
auxiliary_variables.b_vector_product,
mask,
)
gradient = jax.tree_map(lambda x, y: x - y, reward, penalty)
# Propagate the existing auxiliary variables
new_auxiliary_variable = eg_utils.AuxiliaryParams(
b_vector_product=b_vector_product,
b_inner_product_diag=b_inner_product_diag,
)
return gradient, new_auxiliary_variable
def _biased_cca_matrix_products(
eigenvectors: SplitVector,
sharded_data: SplitVector,
) -> Tuple[SplitVector, SplitVector]:
"""Calculate and aggregate the equivalent gram matrices for CCA.
CCA of data sources with dim n, m is equivalent to solving the generalized
eigenvalue problem Av = lambda Bv, where v is dim n + m, A has the
covariances between the data sources and B the covariances within the data
sources. This function unpacks v then calculates Av and Bv from the two data
sources before aggregating them across machines.
The majority of the CCA specific logic lives here. The remainder of the
algorithm should work with any other generalized eigenvalue problem.
Args:
eigenvectors: SplitVector holding the generalized eigenvectors estimates for
the two data sources.
sharded_data: Tuple containing two independent batches of data from our
two data sources. Tuple of SplitVectors with leaves of shape [b, ...].
Array Tree with leaves of shape [k, ...] denoting v_k.
Returns:
Tuple of two SplitVectors, each containing array trees with leaves of shape
[k, ...] containing the gram matrix product of covariances between the data
sources (Av) and within the data (Bv) respectively.
"""
data_vector = eg_utils.tree_einsum(
'k...,b...-> bk',
eigenvectors,
sharded_data,
)
data_vector_x = jax.tree_util.tree_reduce(lambda x, y: x + y, data_vector.x) # pytype: disable=attribute-error # numpy-scalars
data_vector_y = jax.tree_util.tree_reduce(lambda x, y: x + y, data_vector.y) # pytype: disable=attribute-error # numpy-scalars
# divide by the batch size
data_vector_x /= data_vector_x.shape[0]
data_vector_y /= data_vector_y.shape[0]
def _gram_product(
data: chex.ArrayTree,
data_vector: chex.Array,
) -> chex.ArrayTree:
return eg_utils.tree_einsum_broadcast('b..., bk -> k...', data, data_vector)
sharded_variance_vector = SplitVector(
x=_gram_product(sharded_data.x, data_vector_x),
y=_gram_product(sharded_data.y, data_vector_y))
sharded_interaction_vector = SplitVector(
x=_gram_product(sharded_data.x, data_vector_y),
y=_gram_product(sharded_data.y, data_vector_x))
# Aggregate this across all machines.
variance_vector_product = jax.lax.pmean(
sharded_variance_vector, axis_name='devices')
interaction_vector_product = jax.lax.pmean(
sharded_interaction_vector, axis_name='devices')
return (
variance_vector_product,
interaction_vector_product,
) # Bv and Av
def biased_cca_gradients(
*,
local_eigenvectors: SplitVector,
sharded_data: SplitVector,
auxiliary_variables: eg_utils.AuxiliaryParams,
mask: chex.Array,
sliced_identity: chex.Array,
mean_estimate: Optional[SplitVector] = None,
epsilon: Optional[float] = None,
maximize: bool = True,
) -> Tuple[SplitVector, eg_utils.AuxiliaryParams,]:
"""Evaluates CCA gradients for two data sources with local data parallelism.
Algorithm pseudocode can be found in Algorithm 1 of overleaf doc:
https://ol.deepmind.host/read/kxdfdtfbsdxc
Args:
local_eigenvectors: SplitVector with local eigen vectors products. Array
tree with leaves of shape [l, ...] denoting v_l.
sharded_data: SplitVector containing a batch of data from our two data
sources. Array tree with leaves of shape [b, ...].
auxiliary_variables: AuxiliaryParams object which holds all the variables
which we want to update separately in order to avoid bias.
mask: Mask with 1s below the diagonal and then sharded across devices, used
to calculate the penalty. Shape of: [eigenvectors_per_device,
total_eigenvector_count]
sliced_identity: Sharded copy of the identity matrix used to calculate the
reward. Shape of: [eigenvectors_per_device, total_eigenvector_count]
mean_estimate: SplitVector containing an estimate of the mean of the input
data if it is not centered by default. This is used to calculate the
covariances. Array tree with leaves of shape [...].
epsilon: Add an isotropic term to the variance matrix to make it
positive semi-definite. In this case, we're solving for: Av =
lambda*(B+epsilon*I)v.
maximize: Whether to search for top-k eigenvectors of Av = lambda*Bv (True)
or the top-k of (-A)v = lambda*Bv (False). Due to the symmetry of the CCA
problem, setting to True or False should not change the performance.
Returns:
returns the gradients for the eigenvectors and a new entry to update
auxiliary variance estimates.
"""
# First, collect all the eigenvectors v_k
all_eigenvectors = jax.lax.all_gather(
local_eigenvectors,
axis_name='devices',
axis=0,
tiled=True,
)
if mean_estimate is not None:
sharded_data = jax.tree_map(lambda x, y: x - y, sharded_data, mean_estimate)
# Evaluate the matrix products for all eigenvectors on all machines using
# different batches and aggregate them to get Bv_k and Av_k.
variance_vector_product, interaction_vector_product = (
_biased_cca_matrix_products(all_eigenvectors, sharded_data)
)
return generalized_eigengame_gradients(
local_eigenvectors=local_eigenvectors,
all_eigenvectors=all_eigenvectors,
a_vector_product=interaction_vector_product,
b_vector_product=variance_vector_product,
auxiliary_variables=auxiliary_variables,
mask=mask,
sliced_identity=sliced_identity,
epsilon=epsilon,
maximize=maximize,
)
def _unbiased_cca_matrix_products(
eigenvectors: SplitVector,
sharded_data: Tuple[SplitVector, SplitVector],
) -> Tuple[SplitVector, SplitVector]:
"""Calculate and aggregate the equivalent gram matrices for CCA.
CCA of data sources with dim n, m is equivalent to solving the generalized
eigenvalue problem Av = lambda Bv, where v is dim n + m, A has the
covariances between the data sources and B the covariances within the data
sources. This function unpacks v then calculates Av and Bv from the two data
sources before aggregating them across machines.
The majority of the CCA specific logic lives here. The remainder of the
algorithm should work with any other generalized eigenvalue problem.
Args:
eigenvectors: SplitVector holding the generalized eigenvectors estimates for
the two data sources.
Array Tree with leaves of shape [k, ...] denoting v_k.
sharded_data: SplitVector holding data from our two data sources.
Array Tree with leaves of shape [b, ...].
Returns:
Tuple of two SplitVectors, each containing array trees with leaves of shape
[k, ...] containing the gram matrix product of covariances between the data
sources (Av) and within the data (Bv) respectively.
"""
data_vector_x = []
data_vector_y = []
for sample in sharded_data:
data_vector = eg_utils.tree_einsum(
'k...,b...-> bk',
eigenvectors,
sample,
)
data_vector_x_sample = jax.tree_util.tree_reduce(
lambda x, y: x + y,
data_vector.x, # pytype: disable=attribute-error # numpy-scalars
)
data_vector_y_sample = jax.tree_util.tree_reduce(
lambda x, y: x + y,
data_vector.y, # pytype: disable=attribute-error # numpy-scalars
)
# divide by the batch size
data_vector_x.append(data_vector_x_sample / data_vector_x_sample.shape[0])
data_vector_y.append(data_vector_y_sample / data_vector_y_sample.shape[0])
def _gram_product(
data: chex.ArrayTree,
data_vector: chex.Array,
) -> chex.ArrayTree:
return eg_utils.tree_einsum_broadcast('b..., bk -> k...', data, data_vector)
sharded_variance_vector = SplitVector(
x=_gram_product(sharded_data[0].x, data_vector_x[0]),
y=_gram_product(sharded_data[0].y, data_vector_y[0]))
sharded_interaction_vector = SplitVector(
x=_gram_product(sharded_data[1].x, data_vector_y[1]),
y=_gram_product(sharded_data[1].y, data_vector_x[1]))
# Aggregate this across all machines.
variance_vector_product = jax.lax.pmean(
sharded_variance_vector, axis_name='devices')
interaction_vector_product = jax.lax.pmean(
sharded_interaction_vector, axis_name='devices')
return (
variance_vector_product,
interaction_vector_product,
) # Bv and Av
def unbiased_cca_gradients(
*,
local_eigenvectors: SplitVector,
sharded_data: Tuple[SplitVector, SplitVector],
auxiliary_variables: eg_utils.AuxiliaryParams,
mask: chex.Array,
sliced_identity: chex.Array,
mean_estimate: Optional[SplitVector] = None,
epsilon: Optional[float] = None,
maximize: bool = True,
) -> Tuple[SplitVector, eg_utils.AuxiliaryParams,]:
"""Evaluates unbiased CCA gradients with data parallelism.
Algorithm pseudocode can be found in Algorithm 1 of overleaf doc:
https://ol.deepmind.host/read/kxdfdtfbsdxc
In this case, we take in two independent batches of data, one to calculate Av
and one to calculate Bv. Using two different samples results in an unbiased
gradient, but there is a bias-variance tradeoff in the performance.
Args:
local_eigenvectors: SplitVector with local eigen vectors products. Array
tree with leaves of shape [l, ...] denoting v_l.
sharded_data: Tuple containing two independent batches of data from our
two data sources. Tuple of SplitVectors with leaves of shape [b, ...].
auxiliary_variables: AuxiliaryParams object which holds all the variables
which we want to update separately in order to avoid bias.
mask: Mask with 1s below the diagonal and then sharded across devices, used
to calculate the penalty. Shape of: [eigenvectors_per_device,
total_eigenvector_count]
sliced_identity: Sharded copy of the identity matrix used to calculate the
reward. Shape of: [eigenvectors_per_device, total_eigenvector_count]
mean_estimate: SplitVector containing an estimate of the mean of the input
data if it is not centered by default. This is used to calculate the
covariances. Array tree with leaves of shape [...].
epsilon: Add an isotropic term to the variance matrix to make it
positive semi-definite. In this case, we're solving for: Av =
lambda*(B+epsilon*I)v.
maximize: Whether to search for top-k eigenvectors of Av = lambda*Bv (True)
or the top-k of (-A)v = lambda*Bv (False). Due to the symmetry of the CCA
problem, setting to True or False should not change the performance.
Returns:
returns the gradients for the eigenvectors and a new entry to update
auxiliary variance estimates.
"""
# First, collect all the eigenvectors v_k
all_eigenvectors = jax.lax.all_gather(
local_eigenvectors,
axis_name='devices',
axis=0,
tiled=True,
)
if mean_estimate is not None:
sharded_data = (
jax.tree_map(lambda x, y: x - y, sharded_data[0], mean_estimate),
jax.tree_map(lambda x, y: x - y, sharded_data[1], mean_estimate),
)
# Evaluate the matrix products for all eigenvectors on all machines using
# different batches and aggregate them to get Bv_k and Av_k.
(
variance_vector_product,
interaction_vector_product,
) = _unbiased_cca_matrix_products(
all_eigenvectors,
sharded_data,
)
return generalized_eigengame_gradients(
local_eigenvectors=local_eigenvectors,
all_eigenvectors=all_eigenvectors,
a_vector_product=interaction_vector_product,
b_vector_product=variance_vector_product,
auxiliary_variables=auxiliary_variables,
mask=mask,
sliced_identity=sliced_identity,
epsilon=epsilon,
maximize=maximize,
)
def _unbiased_pls_matrix_products(
eigenvectors: SplitVector,
sharded_data: Tuple[SplitVector, SplitVector],
) -> Tuple[SplitVector, SplitVector]:
"""Calculate and aggregate the equivalent gram matrices for PLS.
PLS of data source with dim n and response (label) of dim m is equivalent to
solving the generalized eigenvalue problem Av = lambda Bv, where v is
dim n + m, A has the covariances between the data source and response and B
the covariance within the data source and identity for the response. This
function unpacks v then calculates Av and Bv from the data source and response
before aggregating them across machines.
The majority of the PLS specific logic lives here. The remainder of the
algorithm should work with any other generalized eigenvalue problem.
PLS is performed as a specific case of CCA therefore some eg_utils have been
reused, e.g., SplitVector.
Args:
eigenvectors: SplitVector holding the generalized eigenvectors estimates for
the two data sources.
Array Tree with leaves of shape [k, ...] denoting v_k.
sharded_data: SplitVector holding data from our two data sources.
Array Tree with leaves of shape [b, ...].
Returns:
Tuple of two SplitVectors, each containing array trees with leaves of shape
[k, ...] containing the gram matrix product of covariances between the data
sources (Av) and within the data (Bv) respectively.
"""
data_vector_x = []
data_vector_y = []
for sample in sharded_data:
data_vector = eg_utils.tree_einsum(
'k...,b...-> bk',
eigenvectors,
sample,
)
data_vector_x_sample = jax.tree_util.tree_reduce(
lambda x, y: x + y,
data_vector.x, # pytype: disable=attribute-error # numpy-scalars
)
data_vector_y_sample = jax.tree_util.tree_reduce(
lambda x, y: x + y,
data_vector.y, # pytype: disable=attribute-error # numpy-scalars
)
# divide by the batch size
data_vector_x.append(data_vector_x_sample / data_vector_x_sample.shape[0])
data_vector_y.append(data_vector_y_sample / data_vector_y_sample.shape[0])
def _gram_product(
data: chex.ArrayTree,
data_vector: chex.Array,
) -> chex.ArrayTree:
return eg_utils.tree_einsum_broadcast('b..., bk -> k...', data, data_vector)
sharded_variance_vector = SplitVector(
x=_gram_product(sharded_data[0].x, data_vector_x[0]),
y=eigenvectors.y) # diff to CCA is here <--
sharded_interaction_vector = SplitVector(
x=_gram_product(sharded_data[1].x, data_vector_y[1]),
y=_gram_product(sharded_data[1].y, data_vector_x[1]))
# Aggregate this across all machines.
variance_vector_product = jax.lax.pmean(
sharded_variance_vector, axis_name='devices')
interaction_vector_product = jax.lax.pmean(
sharded_interaction_vector, axis_name='devices')
return (
variance_vector_product,
interaction_vector_product,
) # Bv and Av
def unbiased_pls_gradients(
*,
local_eigenvectors: SplitVector,
sharded_data: Tuple[SplitVector, SplitVector],
auxiliary_variables: eg_utils.AuxiliaryParams,
mask: chex.Array,
sliced_identity: chex.Array,
mean_estimate: Optional[SplitVector] = None,
epsilon: Optional[float] = None,
maximize: float = True,
) -> Tuple[SplitVector, eg_utils.AuxiliaryParams,]:
"""Evaluates unbiased PLS gradients with data parallelism.
Algorithm pseudocode can be found in Algorithm 1 of overleaf doc:
https://ol.deepmind.host/read/kxdfdtfbsdxc
In this case, we take in two independent batches of data, one to calculate Av
and one to calculate Bv. Using two different samples results in an unbiased
gradient, but there is a bias-variance tradeoff in the performance.
Args:
local_eigenvectors: SplitVector with local eigen vectors products. Array
tree with leaves of shape [l, ...] denoting v_l.
sharded_data: Tuple containing two independent batches of data from our
two data sources. Tuple of SplitVectors with leaves of shape [b, ...].
auxiliary_variables: AuxiliaryParams object which holds all the variables
which we want to update separately in order to avoid bias.
mask: Mask with 1s below the diagonal and then sharded across devices, used
to calculate the penalty. Shape of: [eigenvectors_per_device,
total_eigenvector_count]
sliced_identity: Sharded copy of the identity matrix used to calculate the
reward. Shape of: [eigenvectors_per_device, total_eigenvector_count]
mean_estimate: SplitVector containing an estimate of the mean of the input
data if it is not centered by default. This is used to calculate the
covariances. Array tree with leaves of shape [...].
epsilon: Add an isotropic term to the variance matrix to make it
positive semi-definite. In this case, we're solving for: Av =
lambda*(B+epsilon*I)v.
maximize: unused- Solving Av = lambda * Bv is the only sensible approach for
PLS. We do not foresee a use case for (-Av) = lambda * Bv. Please contact
authors if you have a need for it.
Returns:
returns the gradients for the eigenvectors and a new entry to update
auxiliary variance estimates.
"""
del maximize
# First, collect all the eigenvectors v_k
all_eigenvectors = jax.lax.all_gather(
local_eigenvectors,
axis_name='devices',
axis=0,
tiled=True,
)
if mean_estimate is not None:
sharded_data = (
jax.tree_map(lambda x, y: x - y, sharded_data[0], mean_estimate),
jax.tree_map(lambda x, y: x - y, sharded_data[1], mean_estimate),
)
# Evaluate the matrix products for all eigenvectors on all machines using
# different batches and aggregate them to get Bv_k and Av_k.
(
variance_vector_product,
interaction_vector_product,
) = _unbiased_pls_matrix_products(
all_eigenvectors,
sharded_data,
)
return generalized_eigengame_gradients(
local_eigenvectors=local_eigenvectors,
all_eigenvectors=all_eigenvectors,
a_vector_product=interaction_vector_product,
b_vector_product=variance_vector_product,
auxiliary_variables=auxiliary_variables,
mask=mask,
sliced_identity=sliced_identity,
epsilon=epsilon,
)
def pca_generalized_eigengame_gradients(
local_eigenvectors: chex.ArrayTree,
sharded_data: chex.ArrayTree,
auxiliary_variables: eg_utils.AuxiliaryParams,
mask: chex.Array,
sliced_identity: chex.Array,
mean_estimate: Optional[chex.ArrayTree] = None,
epsilon: Optional[float] = None,
maximize: bool = True,
)->Tuple[chex.ArrayTree, eg_utils.AuxiliaryParams,]:
"""Evaluates PCA gradients for two data sources with local data parallelism.
Implements PCA. In this case, we simply set the B matrix as I, which means
the problem is solving for Av=lambda v and we're back to the classic eigen
value problem.
This is not as lightweight as eigengame unloaded due to additional terms in
the calculation and handling of the auxiliary variables, and is mostly here to
demonstrate the flexibility of the generalized eigenvalue method. However,
adaptive optimisers may have an easier time with this since the gradients
for the generalized eigengame are naturally tangential to the unit sphere.
einsum legend:
l: local eigenvector index -- the number of eigenvectors per machine.
k: global eigenvector index -- number of eigenvectors over all machines.
Args:
local_eigenvectors: SplitVector with local eigen vectors products. Array
tree with leaves of shape [l, ...] denoting v_l.
sharded_data: SplitVector containing a batch of data from our two data
sources. Array tree with leaves of shape [b, ...].
auxiliary_variables: AuxiliaryParams object which holds all the variables
which we want to update separately in order to avoid bias.
mask: Mask with 1s below the diagonal and then sharded across devices, used
to calculate the penalty. Shape of: [eigenvectors_per_device,
total_eigenvector_count]
sliced_identity: Sharded copy of the identity matrix used to calculate the
reward. Shape of: [eigenvectors_per_device, total_eigenvector_count]
mean_estimate: SplitVector containing an estimate of the mean of the input
data if it is not centered by default. This is used to calculate the
covariances. Array tree with leaves of shape [...].
epsilon: Add an isotropic term to the A matrix to make it
positive semi-definite. In this case, we're solving for: (A+epsilon*I)v =
lambda*v.
maximize: unused- Solving Av = lambda * v is the only sensible approach for
PCA. We do not foresee a use case for (-Av) = lambda * v. Please contact
authors if you have a need for it.
Returns:
returns the gradients for the eigenvectors and a new entry to update
auxiliary variance estimates.
"""
del maximize
# First, collect all the eigenvectors v_k
all_eigenvectors = jax.lax.all_gather(
local_eigenvectors,
axis_name='devices',
axis=0,
tiled=True,
)
if mean_estimate is not None:
sharded_data = jax.tree_map(lambda x, y: x - y, sharded_data, mean_estimate)
# Calculate X^TXv/b for all v. This and v are all you need to construct the
# gradient. This is done on each machine with a different batch.
data_vector_product = eg_utils.tree_einsum(
'b..., k... -> bk',
sharded_data,
all_eigenvectors,
reduce_f=lambda x, y: x + y,
)
# divide by the batch size
data_vector_product /= data_vector_product.shape[0] # pytype: disable=attribute-error # numpy-scalars
sharded_gram_vector = eg_utils.tree_einsum_broadcast(
'b..., bk -> k...',
sharded_data,
data_vector_product,
)
gram_vector = jax.lax.pmean(
sharded_gram_vector,
axis_name='devices',
)
# Add a small epsilon to the gram vector (Av) to make it positive definite.
if epsilon is not None:
gram_vector = jax.tree_map(
lambda x, y: x + epsilon * y,
gram_vector,
all_eigenvectors,
)
return generalized_eigengame_gradients(
local_eigenvectors=local_eigenvectors,
all_eigenvectors=all_eigenvectors,
a_vector_product=gram_vector,
b_vector_product=all_eigenvectors, # Just return V here.
auxiliary_variables=auxiliary_variables,
mask=mask,
sliced_identity=sliced_identity,
epsilon=None,
)
def matrix_inverse_gradients(
local_eigenvectors: chex.ArrayTree,
sharded_data: chex.ArrayTree,
auxiliary_variables: eg_utils.AuxiliaryParams,
mask: chex.Array,
sliced_identity: chex.Array,
mean_estimate: Optional[chex.ArrayTree] = None,
epsilon: Optional[float] = None,
maximize: float = True,
) -> Tuple[chex.ArrayTree, eg_utils.AuxiliaryParams,]:
"""Finds the biggest matrix inverse components of B in a data parallel manner.
In this case, we set A = I, and the problem becomes v = lambda Bv. Assuming
only positive eigenvalues, The inverse of B may then be approximated as:
B^{-1} = sum_{i=1}^k lambda_i v_i v_i^T
with lambda_i>lambda_j for i<j meaning the approximation contains the most
significant terms.
einsum legend:
l: local eigenvector index -- the number of eigenvectors per machine.
k: global eigenvector index -- number of eigenvectors over all machines.
Args:
local_eigenvectors: SplitVector with local eigen vectors products. Array
tree with leaves of shape [l, ...] denoting v_l.
sharded_data: SplitVector containing a batch of data from our two data
sources. Array tree with leaves of shape [b, ...].
auxiliary_variables: AuxiliaryParams object which holds all the variables
which we want to update separately in order to avoid bias.
mask: Mask with 1s below the diagonal and then sharded across devices, used
to calculate the penalty. Shape of: [eigenvectors_per_device,
total_eigenvector_count]
sliced_identity: Sharded copy of the identity matrix used to calculate the
reward. Shape of: [eigenvectors_per_device, total_eigenvector_count]
mean_estimate: SplitVector containing an estimate of the mean of the input
data if it is not centered by default. This is used to calculate the
covariances. Array tree with leaves of shape [...].
epsilon: Add an isotropic term to the variance matrix to make it
positive semi-definite. In this case, we're solving for: v =
lambda*(B+epsilon*I)v.
maximize: unused- Solving Iv = lambda * Bv is the only sensible approach for
inverting B. We do not foresee a use case for (-Iv) = lambda * Bv. Please
contact authors if you have a need for it.
Returns:
returns the gradients for the eigenvectors and a new entry to update
auxiliary variance estimates.
"""
del maximize
# First, collect all the eigenvectors v_k
all_eigenvectors = jax.lax.all_gather(
local_eigenvectors,
axis_name='devices',
axis=0,
tiled=True,
)
if mean_estimate is not None:
sharded_data = jax.tree_map(lambda x, y: x - y, sharded_data, mean_estimate)
# Calculate X^TXv/b for all v. This and v are all you need to construct the
# gradient. This is done on each machine with a different batch.
data_vector_product = eg_utils.tree_einsum(
'b..., k... -> bk',
sharded_data,
all_eigenvectors,
reduce_f=lambda x, y: x + y,
)
# divide by the batch size
data_vector_product /= data_vector_product.shape[0] # pytype: disable=attribute-error # numpy-scalars
sharded_gram_vector = eg_utils.tree_einsum_broadcast(
'b..., bk -> k...',
sharded_data,
data_vector_product,
)
gram_vector = jax.lax.pmean(
sharded_gram_vector,
axis_name='devices',
)
return generalized_eigengame_gradients(
local_eigenvectors=local_eigenvectors,
all_eigenvectors=all_eigenvectors,
a_vector_product=all_eigenvectors,
b_vector_product=gram_vector,
auxiliary_variables=auxiliary_variables,
mask=mask,
sliced_identity=sliced_identity,
epsilon=epsilon,
)
def unbiased_ica_matrix_products(
eigenvectors: chex.ArrayTree,
sharded_data: Tuple[chex.ArrayTree, chex.ArrayTree, chex.ArrayTree]
) -> Tuple[chex.ArrayTree, chex.ArrayTree]:
"""Evaluates the matrix vector products required for ICA.
In this case, we need to product of the following matrices with the
eigenvectors:
A = E(<x_i, x_i>x_i x_i^T) - tr(B)B - 2 B^2
(computed with samples (0), (1,0), (1,0) for each term respectively)
B = E(x_i x_i^T)
(computed with sample (2))
In order to have unbiased gradients when we sample, we need to have
independent samples for each random variance which is multiplied together.
Since E(x)E(y)=/=E(xy) in general unless x, y are independent.
So tr(B)B - 2 B^2 will need two data batches, and B by itself will need a
third batch.
This is further complicated by the fact that in order for some of the products
of averages to be computed (e.g. the tr(B)B term), we will need multiple
pmeans.
Args:
eigenvectors: Array Tree holding all eigen vectors products. Array tree
with leaves of shape [k, ...] denoting v_k.
sharded_data: Triple of array trees containing independently sampled
batches of input data, consisting array tree with leaves of shape [k,
...].
Returns:
returns a Tuple of two Array Trees. First Denoting Av, second denoting Bv
with leaves of shape v_k.
"""
# Compute <x_i, x_i> term for the first term of A
data_norm = eg_utils.tree_einsum(
'b..., b...->b',
sharded_data[0],
sharded_data[0],
reduce_f=lambda x, y: x + y,
)
per_machine_batch_size = data_norm.shape[0] # pytype: disable=attribute-error # numpy-scalars
data_vector_products = []
for sample in sharded_data:
# Calculate Xv for each sample and divide by the per machine batch size
vector_products = eg_utils.tree_einsum(
'b..., k... -> bk',
sample,
eigenvectors,
reduce_f=lambda x, y: x + y,
)
# device by the per machine batch size
data_vector_products.append(vector_products / per_machine_batch_size)
# estimate of E(<x_i, x_i>x_i x_i^T)v. For this term, we need to use
# the same data for everything since it's a single expectation.
fourth_power_term = eg_utils.tree_einsum_broadcast(
'b..., bk, b -> k...',
sharded_data[0],
data_vector_products[0],
data_norm,
)
# estimate of trace of B = XTX/n. We need to psum immediately here since
# This needs to be multiplied to B, which will also need to be psumed later.
average_data_norm = eg_utils.tree_einsum(
'b..., b...->',
sharded_data[1],
sharded_data[1],
reduce_f=lambda x, y: x + y,
) / per_machine_batch_size
average_data_norm = jax.lax.pmean(
average_data_norm,
axis_name='devices',
)
# An estimate of Bv = XTXv using a sample independent from Tr(B).
covariance_product = eg_utils.tree_einsum_broadcast(
'b..., bk -> k...',
sharded_data[0],
data_vector_products[0],
)
mean_covariance_product = jax.lax.pmean(
covariance_product,
axis_name='devices',
)
# B^2v. This is computed with batches 0, 1
covariance_squared_product = eg_utils.tree_einsum_broadcast(
'b..., bk -> k...',
sharded_data[1],
eg_utils.tree_einsum(
'b..., k... -> bk',
sharded_data[1],
mean_covariance_product,
reduce_f=lambda x, y: x + y,
) / per_machine_batch_size,
)
# Calculate a portion of the A matrix and then pmean
# this is (E(<x_i,x_i>x_ix_iT) - 2 B^2)v
kurtosis = jax.lax.pmean(
jax.tree_map(
lambda x, y: x - 2 * y,
fourth_power_term,
covariance_squared_product,
),
axis_name='devices',
)
# We don't include the Tr(B)B term since that is already pmeaned.
kurtosis = jax.tree_map(
lambda x, y: x - average_data_norm * y,
kurtosis,
mean_covariance_product,
)
# Finally, calculate another covariace matrix using another independent sample
independent_covariance_product = jax.lax.pmean(
eg_utils.tree_einsum_broadcast(
'b..., bk -> k...',
sharded_data[2],
data_vector_products[2],
),
axis_name='devices',
)
return kurtosis, independent_covariance_product
def unbiased_ica_gradients(
*,
local_eigenvectors: chex.ArrayTree,
sharded_data: Tuple[chex.ArrayTree, chex.ArrayTree, chex.ArrayTree],
sliced_identity: chex.Array,
mask: chex.Array,
auxiliary_variables: Optional[eg_utils.AuxiliaryParams],
mean_estimate: Optional[chex.ArrayTree] = None,
epsilon: Optional[float] = None,
maximize: Optional[bool] = True,
) -> Tuple[chex.ArrayTree, eg_utils.AuxiliaryParams,]:
"""Evaluates the gradients required to compute ICA on the dataset.
For ICA, we are attempting to separate out multiple components of a data
source. We can reduce the problem into a generalized eigenvalue problem
using the following matrices:
A = E(<x_i, x_i>x_i x_i^T) - tr(B)B - 2 B^2
B = E(x_i x_i^T)
However, the twist here is that we need three separate data samples in
order to avoid bias -- one for the first term of A, one for the remaning
terms of A and one for B.
Args:
local_eigenvectors: Array tree with local eigen vectors products. Array
tree with leaves of shape [l, ...] denoting v_l.
sharded_data: Triple of array trees containing independently sampled
batches of input data, consisting array tree with leaves of shape [k,
...].
sliced_identity: Sharded copy of the identity matrix used to calculate the
reward. Shape of: [eigenvectors_per_device, total_eigenvector_count]
mask: Mask with 1s below the diagonal and then sharded across devices, used
to calculate the penalty. Shape of: [eigenvectors_per_device,
total_eigenvector_count]
auxiliary_variables: AuxiliaryParams object which holds all the variables
which we want to update separately in order to avoid bias.
mean_estimate: SplitVector containing an estimate of the mean of the input
data if it is not centered by default. This is used to calculate the
covariances. Array tree with leaves of shape [...].
epsilon: Add an isotropic term to the variance matrix to make it
positive semi-definite. In this case, we're solving for: v =
lambda*(B+epsilon*I)v.
maximize: Whether to search for top-k eigenvectors of Av = lambda*Bv (True)
or the top-k of (-A)v = lambda*Bv (False)
Returns:
returns the gradients for the eigenvectors and a new entry to update
auxiliary variance estimates.
"""
# First, collect all the eigenvectors v_k
all_eigenvectors = jax.lax.all_gather(
local_eigenvectors,
axis_name='devices',
axis=0,
tiled=True,
)
if mean_estimate is not None:
sharded_data = tuple(
jax.tree_map(lambda x, y: x - y, sample, mean_estimate)
for sample in sharded_data
)
kurtosis, independent_covariance = unbiased_ica_matrix_products(
all_eigenvectors,
sharded_data,
)
return generalized_eigengame_gradients(
local_eigenvectors=local_eigenvectors,
all_eigenvectors=all_eigenvectors,
a_vector_product=kurtosis,
b_vector_product=independent_covariance,
auxiliary_variables=auxiliary_variables,
mask=mask,
sliced_identity=sliced_identity,
epsilon=epsilon,
maximize=maximize,
)
| eigengame-main | eigengame/eg_gradients.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Config file holding the default training configuration for EigenGame."""
from jaxline import base_config
import ml_collections
def get_config() -> ml_collections.ConfigDict:
"""Creates and populates the fields of the ConfigDict."""
config = base_config.get_base_config()
config.interval_type = 'steps'
config.training_steps = 10_000_000
config.save_checkpoint_interval = 10_000
config.log_train_data_interval = 100
config.experiment_kwargs = ml_collections.ConfigDict(
dict(
config=dict(
eigenvector_count=128,
eval_batches=128,
epsilon=1e-4,
maximize=True,
track_mean=True,
optimizer_schedule_config=dict(
warm_up_step=10_000,
end_step=1_000_000,
base_lr=2e-4,
end_lr=1e-6),
optimizer_config=dict(
b1=0.9,
b2=0.999,
eps=1e-8,
),
aux_optimizer_config=dict(
learning_rate=1e-3,
),
dataset_config=dict(),
preprocess_config=dict(),
)))
config.train_checkpoint_all_hosts = True
return config
| eigengame-main | eigengame/eg_base_config.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| eigengame-main | eigengame/examples/__init__.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Config file holding the training configuaration for the trivial eigengame."""
from eigengame import eg_base_config
from eigengame import eg_utils
import ml_collections
def get_config() -> ml_collections.ConfigDict:
"""Creates and populates the fields of the ConfigDict."""
config = eg_base_config.get_config()
config.interval_type = 'steps'
config.training_steps = 100_000
config.save_checkpoint_interval = 100
config.log_train_data_interval = 10
config.experiment_kwargs.config.epsilon = 0.
config.experiment_kwargs.config.maximize = False
config.experiment_kwargs.config.experiment_type = eg_utils.ExperimentType.PLS
config.experiment_kwargs.config.track_mean = False
config.experiment_kwargs.config.eigenvector_count = 8
config.experiment_kwargs.config.optimizer_schedule_config.warm_up_step = 10000
config.experiment_kwargs.config.optimizer_schedule_config.end_step = 100_000
config.experiment_kwargs.config.optimizer_schedule_config.base_lr = 25e-5
config.experiment_kwargs.config.optimizer_schedule_config.end_lr = 25e-6
config.experiment_kwargs.config.aux_optimizer_config.learning_rate = 25e-4
config.experiment_kwargs.config.dataset_config = dict(
global_batch_size=512,
repeat=True,
seed=0,
n_samples=512,
n_latents=4,
eigenvector_count=config.experiment_kwargs.config.eigenvector_count,
)
config.experiment_kwargs.config.optimizer_schedule_config.end_step = int(1e8)
config.checkpoint_dir = '/tmp/eigengame_pls_test/'
return config
| eigengame-main | eigengame/examples/synthetic_dataset_pls/config.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Creates a correlated dataset of (x, y) pairs for PLS."""
from typing import Dict, Iterator, Tuple
import chex
from eigengame import eg_utils
import jax
import numpy as np
import scipy.linalg
import tensorflow as tf
SplitVector = eg_utils.SplitVector
def get_data_generator(
batch_size: int,
repeat: bool,
shuffle_buffer: int = 1024,
data_seed: int = 0,
shuffle_seed: int = 12345,
n_samples: int = 500,
n_latents: int = 8,
**kwargs
) -> Iterator[tf.Tensor]:
"""Pulls data and creates a generator using TensorFlow Datasets."""
del kwargs
x, y = generate_dataset(n_samples=n_samples,
n_latents=n_latents,
seed=data_seed)
data_set = tf.data.Dataset.from_tensor_slices((x, y))
data_set = data_set.shuffle(shuffle_buffer, shuffle_seed)
data_set = data_set.prefetch(tf.data.AUTOTUNE)
data_set = data_set.batch(batch_size, drop_remainder=True)
data_set = data_set.batch(jax.local_device_count(), drop_remainder=True)
if repeat:
data_set = data_set.repeat()
return iter(data_set)
def preprocess_sample(
sample: Tuple[tf.Tensor, tf.Tensor]) -> Dict[chex.Array, chex.Array]:
"""Convert samples to chex Arrays."""
x, y = sample
output = {}
output['x'] = x.numpy()
output['y'] = y.numpy()
return output # pytype: disable=bad-return-type # numpy-scalars
def get_preprocessed_generator(
batch_size: int,
repeat: bool,
shuffle_buffer: int = 1024,
seed: int = 0,
n_samples: int = 500,
n_latents: int = 8,
**kwargs
) -> Iterator[Tuple[Dict[chex.Array, chex.Array],
Dict[chex.Array, chex.Array]]]:
"""Returns a generator which has been preprocessed."""
del kwargs
num_minibatches = 2 # need 2 independent minibatches for PLS
rnd = np.random.RandomState(seed=seed)
shuffle_seeds = [rnd.randint(12345, 999999) for _ in range(num_minibatches)]
data_generators = []
for shuffle_seed in shuffle_seeds:
dg = get_data_generator(batch_size,
repeat,
shuffle_buffer,
seed,
shuffle_seed,
n_samples,
n_latents)
data_generators.append(dg)
for batches in zip(*data_generators):
yield tuple([preprocess_sample(batch) for batch in batches])
def generate_dataset(n_samples: int = 500,
n_latents: int = 8,
seed: int = 0) -> Tuple[chex.Array, chex.Array]:
"""Generates the dataset."""
# Synthetic example modified from sklearn example:
# https://scikit-learn.org/stable/auto_examples/cross_decomposition/plot_compare_cross_decomposition.html
rnd = np.random.RandomState(seed=seed)
latents = [rnd.normal(size=n_samples) for _ in range(n_latents)]
latents = np.array(latents).T
latents = np.repeat(latents, repeats=2, axis=1)
n_noise = 2 * n_latents * n_samples
shape_noise = (n_samples, 2 * n_latents)
x = latents + rnd.normal(size=n_noise).reshape(shape_noise)
y = latents + rnd.normal(size=n_noise).reshape(shape_noise)
return x, y
def generate_ground_truths(
n_samples: int = 500,
n_latents: int = 8,
seed: int = 0) -> Tuple[chex.Array, chex.Array, chex.Array, chex.Array]:
"""Generates the covariance matrix along with the true eigenvalue/vectors."""
x, y = generate_dataset(n_samples=n_samples, n_latents=n_latents, seed=seed)
n_samples, x_dim = x.shape
y_dim = y.shape[1]
dim = x_dim + y_dim
covariance_xx = np.dot(x.T, x) / n_samples
covariance_xy = np.dot(x.T, y) / n_samples
a_matrix = np.zeros((dim, dim))
a_matrix[:x_dim, x_dim:] = covariance_xy
a_matrix[x_dim:, :x_dim] = covariance_xy.T
b_matrix = np.eye(dim)
b_matrix[:x_dim, :x_dim] = covariance_xx
# Solve for Av = lambda v to get the ground_truths
true_eigenvalues, true_eigenvectors = scipy.linalg.eigh(
a_matrix,
b_matrix
)
# Order the eigenvalues and vectors from biggest to smallest
idxs = np.argsort(true_eigenvalues)[::-1]
# You need to transpose this, since eigh returns eigenvectors on columns!
true_eigenvectors = true_eigenvectors[:, idxs].T
true_eigenvalues = true_eigenvalues[idxs]
return (
a_matrix,
b_matrix,
true_eigenvalues,
true_eigenvectors,
)
def get_sharded_ground_truths(
total_eigenvector_count: int,
n_samples: int = 500,
n_latents: int = 8,
seed: int = 0
) -> Tuple[chex.ArraySharded, chex.ArraySharded, chex.ArraySharded,
SplitVector]:
"""Shards the ground truths to different machines."""
(
a_matrix,
b_matrix,
true_eigenvalues,
true_eigenvectors,
) = generate_ground_truths(n_samples=n_samples,
n_latents=n_latents,
seed=seed)
shard_shape = (
jax.local_device_count(),
total_eigenvector_count // jax.local_device_count(),
)
dim = a_matrix.shape[0]
# We need to shard the eigenvalues and eigenvectors to the corresponding
# machines responsible for them.
true_eigenvalues = true_eigenvalues[:total_eigenvector_count].reshape(
shard_shape,)
true_eigenvectors = true_eigenvectors[:total_eigenvector_count].reshape(
shard_shape + (dim,),)
x_dim = n_latents * 2
x_eigenvector, y_eigenvector = np.split(
true_eigenvectors,
(x_dim,),
axis=-1,
)
# The true eigenvectors also need to be converted to SplitVector.
true_generalized_eigenvectors = SplitVector(
x=jax.device_put_sharded(
list(x_eigenvector),
jax.local_devices(),
),
y=jax.device_put_sharded(
list(y_eigenvector),
jax.local_devices(),
))
true_eigenvalues = jax.device_put_sharded(
list(true_eigenvalues),
jax.local_devices(),
)
return (a_matrix, b_matrix, true_eigenvalues, true_generalized_eigenvectors)
| eigengame-main | eigengame/examples/synthetic_dataset_pls/data_pipeline.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| eigengame-main | eigengame/examples/synthetic_dataset_pls/__init__.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of eigengame PLS on a trivial dataset."""
import functools
from typing import Callable, Dict, Iterator, Tuple
from absl import app
from absl import flags
import chex
from eigengame import eg_experiment
from eigengame import eg_objectives
from eigengame import eg_utils
from eigengame.examples.synthetic_dataset_pls import data_pipeline
import jax
from jaxline import platform
import ml_collections
FLAGS = flags.FLAGS
class Experiment(eg_experiment.AbstractEigenGameExperiment):
"""Run PLS on low dimensional synthetic data."""
NON_BROADCAST_CHECKPOINT_ATTRS = {
'_a_matrix': 'a_matrix',
'_b_matrix': 'b_matrix',
'_target_eigenvalues': 'target_eigenvalues',
'_target_eigenvectors': 'target_eigenvectors',
**eg_experiment.AbstractEigenGameExperiment.NON_BROADCAST_CHECKPOINT_ATTRS
}
def build_dataset(
self,
dataset_config: ml_collections.ConfigDict,
) -> Iterator[Tuple[chex.ArrayTree, chex.ArrayTree]]:
"""Initialize ground truths and returns iterator of samples."""
# Initialize the ground truths
(
self._a_matrix,
self._b_matrix,
self._target_eigenvalues,
self._target_eigenvectors,
) = data_pipeline.get_sharded_ground_truths(
dataset_config.eigenvector_count,
dataset_config.n_samples,
dataset_config.n_latents,
dataset_config.seed
)
global_batch_size = dataset_config.global_batch_size
per_device_batch_size = global_batch_size // jax.local_device_count()
def data_iterator(
) -> Iterator[Tuple[Dict[chex.Array, chex.Array],
Dict[chex.Array, chex.Array]]]:
"""Function to create the iterator which samples from the dataset."""
sample_device_batch = data_pipeline.get_preprocessed_generator(
batch_size=per_device_batch_size,
repeat=dataset_config.repeat,
seed=dataset_config.seed,
n_samples=dataset_config.n_samples,
n_latents=dataset_config.n_latents,
)
while True:
batches = next(sample_device_batch)
# shard batches and repackage
sharded_batches = []
for batch in batches:
x = batch['x']
y = batch['y']
x_sharded = jax.device_put_sharded(list(x), jax.local_devices())
y_sharded = jax.device_put_sharded(list(y), jax.local_devices())
sharded_batch = {'x': x_sharded, 'y': y_sharded}
sharded_batches.append(sharded_batch)
yield tuple(batches)
# We need a separate function call here, since the otherwise, the
# initialization of the ground truths would be executed the first time
# next() is called instead of when when build_dataset is called.
return data_iterator()
def build_preprocess_function(
self,
preprocess_config: ml_collections.ConfigDict,
) -> Callable[[chex.ArrayTree, chex.PRNGKey], chex.ArrayTree]:
"""Simple preprocessing."""
def trivial_preprocess(
batch: chex.ArrayTree,
_: chex.PRNGKey,
) -> Tuple[eg_utils.SplitVector, eg_utils.SplitVector]:
split_batch = [eg_utils.SplitVector(x=b['x'], y=b['y']) for b in batch]
return tuple(split_batch)
return trivial_preprocess
@functools.partial(
jax.pmap,
in_axes=0,
out_axes=0,
axis_name='devices',
static_broadcasted_argnums=0,
)
def _eval_similarity(
self,
eigenvectors: chex.Array,
target_vectors: chex.Array,
) -> Tuple[chex.Array, chex.Array]:
"""pmaps the cosine similarity function."""
cosine_similarity = eg_objectives.cosine_similarity(
eigenvectors,
target_vectors,
)
return cosine_similarity # pytype: disable=bad-return-type # numpy-scalars
def evaluate(
self,
global_step: int,
rng: chex.Array,
**unused_kwargs,
) -> Dict[str, chex.Array]:
"""Override the evaluate function to return cosine similarity."""
replicated_cosine_similarity = self._eval_similarity(
self._eigenvectors, self._target_eigenvectors)
cosine_similarities = eg_utils.get_first(replicated_cosine_similarity)
return eg_utils.per_vector_metric_log( # pytype: disable=bad-return-type # numpy-scalars
'cosine_similarity',
cosine_similarities,
)
if __name__ == '__main__':
flags.mark_flag_as_required('config')
app.run(functools.partial(
platform.main, Experiment,
checkpointer_factory=eg_experiment.create_checkpointer))
| eigengame-main | eigengame/examples/synthetic_dataset_pls/experiment.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Config file holding the training configuaration for the trivial eigengame."""
from eigengame import eg_base_config
from eigengame import eg_utils
import ml_collections
def get_config() -> ml_collections.ConfigDict:
"""Creates and populates the fields of the ConfigDict."""
config = eg_base_config.get_config()
config.interval_type = 'steps'
config.training_steps = 10_000_000
config.save_checkpoint_interval = 10_000
config.log_train_data_interval = 10
config.experiment_kwargs.config.epsilon = 0.
config.experiment_kwargs.config.experiment_type = eg_utils.ExperimentType.CCA
config.experiment_kwargs.config.track_mean = False
config.experiment_kwargs.config.eigenvector_count = 16
config.experiment_kwargs.config.eigenvector_count = 16
config.experiment_kwargs.config.dataset_config = dict(
eigenvector_count=config.experiment_kwargs.config.eigenvector_count,
y_size=16,
x_size=16,
seed=42,
global_batch_size=256,
)
config.experiment_kwargs.config.optimizer_schedule_config.end_step = int(1e8)
config.checkpoint_dir = '/tmp/eigengame_cca_test/'
return config
| eigengame-main | eigengame/examples/synthetic_dataset_cca/config.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Creates a trivial dataset of pair of correlated normals for the CCA."""
from typing import Tuple, Union
import chex
from eigengame import eg_utils
import jax
import jax.numpy as jnp
import numpy as np
import scipy.linalg
SplitVector = eg_utils.SplitVector
def generate_ground_truths(
key: chex.PRNGKey,
x_size: int,
y_size: int,
) -> Tuple[chex.Array, chex.Array, chex.Array]:
"""Generates the covariance matrix along with the true eigenvalue/vectors."""
total_len = x_size + y_size
# Create a matrix with 1's on the upper and bottom right squares.
variance_mask = np.zeros(shape=(total_len, total_len))
# This usually wouldn't work on TPU, but we only run this once before training
variance_mask[:x_size, :x_size] = 1
variance_mask[x_size:, x_size:] = 1
# Create a random gram matrix for our covariance.
random_matrix = jax.random.normal(key, shape=(total_len, total_len))
covariance_matrix = np.einsum(
'dn, dm -> nm',
random_matrix,
random_matrix,
) / np.sqrt(total_len)
variance_matrix = variance_mask * covariance_matrix # B
interaction_matrix = (1 - variance_mask) * covariance_matrix # A
# Solve for Av = lambda Bv to get the ground_truths
true_eigenvalues, true_eigenvectors = scipy.linalg.eigh(
interaction_matrix,
variance_matrix,
)
# Order the eigenvalues and vectors from biggest to smallest
idxs = np.argsort(true_eigenvalues)[::-1]
# You need to transpose this, since eigh returns eigenvectors on columns!
true_eigenvectors = true_eigenvectors[:, idxs].T
true_eigenvalues = true_eigenvalues[idxs]
return (
covariance_matrix,
true_eigenvalues,
true_eigenvectors,
)
def generate_correlated_data(
key: chex.PRNGKey,
x_size: int,
y_size: int,
covariance: chex.Array,
batch_size: int,
) -> SplitVector:
"""Returns a pair of correlated data given a covariance matrix."""
dimension = x_size + y_size
merged_data = jax.random.multivariate_normal(
key=key,
mean=jnp.zeros(dimension),
cov=covariance,
shape=(batch_size,),
)
# In general splits are really slow on TPU and should be avoided. However,
# since this is a trivial small synthetic example, it doesn't matter too much.
x_data, y_data = jnp.split(
merged_data,
(x_size,),
axis=-1,
)
return SplitVector(x=x_data, y=y_data)
def get_sharded_ground_truths(
key: chex.PRNGKey,
total_eigenvector_count: int,
x_size: int,
y_size: int,
) -> Tuple[chex.ArraySharded, chex.ArraySharded, SplitVector]:
"""Shards the ground truths to different machines."""
(
covariance_matrix,
true_eigenvalues,
true_eigenvectors,
) = generate_ground_truths(
key,
x_size,
y_size,
)
shard_shape = (
jax.local_device_count(),
total_eigenvector_count // jax.local_device_count(),
)
# We need to shard the eigenvalues and eigenvectors to the corresponding
# machines responsible for them.
true_eigenvalues = true_eigenvalues[:total_eigenvector_count].reshape(
shard_shape,)
true_eigenvectors = true_eigenvectors[:total_eigenvector_count].reshape(
shard_shape + (x_size + y_size,),)
x_eigenvector, y_eigenvector = np.split(
true_eigenvectors,
(x_size,),
axis=-1,
)
# The true eigenvectors also need to be converted to SplitVector.
true_generalized_eigenvectors = SplitVector(
x=jax.device_put_sharded(
list(x_eigenvector),
jax.local_devices(),
),
y=jax.device_put_sharded(
list(y_eigenvector),
jax.local_devices(),
))
true_eigenvalues = jax.device_put_sharded(
list(true_eigenvalues),
jax.local_devices(),
)
return covariance_matrix, true_eigenvalues, true_generalized_eigenvectors
def linear_correlation_data(
rng: chex.PRNGKey,
data_dimension: int,
batch_size: int,
diagonal_variance: Union[float, chex.Array] = 1
) -> SplitVector:
"""Generate high dimensional correlated data with known ground truths.
We generate two samples independent, X_1 and X_2 with a diagonal variance. Our
correlated data is thereby:
X = X_1
Y = aX_1 + bX_2
Where a^2 + b^2 = 1. This means that the correlation equal to a. This can be
sampled, and we can get the ground truths without dealing with a massive
covariance matrix. a is a linear range, so we get a range of spectrums.
The ground truth eigenvectors can be generated by the
get_linearly_correlated_eigenvectors function.
Args:
rng: RNG key for jax to generate samples.
data_dimension: int denoting the data dimension for the x and y data
batch_size: Number of samples we generate.
diagonal_variance: Defines the varianes of X and Y. This is applied
identically to the two data sources.
Returns:
CCA vector containing correlated data with a linear spectrum
"""
keys = jax.random.split(rng, 2)
correlation = np.linspace(1, 0, num=data_dimension, endpoint=True)
noise = np.sqrt(1 - correlation**2)
x = jax.random.normal(
keys[1], shape=(
batch_size,
data_dimension,
)) * diagonal_variance
y = jax.random.normal(
keys[0], shape=(
batch_size,
data_dimension,
)) * diagonal_variance
return SplitVector(
x=x,
y=noise * y + correlation * x,
)
def get_linearly_correlated_eigenvectors(
data_dimension: int,
eigenvector_count: int,
) -> chex.Array:
"""Ground truth Eigenvalues for linearly correlated data."""
ground_truth = jnp.reshape(
jnp.eye(eigenvector_count, data_dimension),
(
jax.device_count(),
eigenvector_count // jax.device_count(),
data_dimension,
),
)
normalized_vector = eg_utils.normalize_eigenvectors(
SplitVector(
x=ground_truth,
y=ground_truth,
))
return jax.device_put_sharded(
normalized_vector,
jax.devices(),
)
| eigengame-main | eigengame/examples/synthetic_dataset_cca/data_pipeline.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| eigengame-main | eigengame/examples/synthetic_dataset_cca/__init__.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of eigengame CCA on a trivial dataset."""
import functools
from typing import Callable, Dict, Iterator, Tuple
from absl import app
from absl import flags
import chex
from eigengame import eg_experiment
from eigengame import eg_objectives
from eigengame import eg_utils
from eigengame.examples.synthetic_dataset_cca import data_pipeline
import jax
import jax.numpy as jnp
from jaxline import platform
import ml_collections
FLAGS = flags.FLAGS
class Experiment(eg_experiment.AbstractEigenGameExperiment):
"""Run CCA on low dimensional synthetic data."""
NON_BROADCAST_CHECKPOINT_ATTRS = {
'_covariance': 'covariance',
'_target_eigenvalues': 'target_eigenvalues',
'_target_eigenvectors': 'target_eigenvectors',
**eg_experiment.AbstractEigenGameExperiment.NON_BROADCAST_CHECKPOINT_ATTRS
}
def build_dataset(
self,
dataset_config: ml_collections.ConfigDict,
) -> Iterator[chex.ArrayTree]:
"""Initialize ground truths and returns iterator of samples."""
# Initialize the ground truths
key = jax.random.PRNGKey(dataset_config.seed)
if jax.host_count() > 1:
# In the case of multihost training, we want deach host to get a different
# sample.
key = jax.random.split(key, jax.host_count())[jax.host_id()]
(
self._covariance,
self._target_eigenvalues,
self._target_eigenvectors,
) = data_pipeline.get_sharded_ground_truths(
key,
dataset_config.eigenvector_count,
dataset_config.x_size,
dataset_config.y_size,
)
global_batch_size = dataset_config.global_batch_size
per_device_batch_size = global_batch_size // jax.device_count()
def data_iterator(key: chex.PRNGKey):
"""Function to create the iterator which samples from the distribution."""
sample_from_key = jax.pmap(
functools.partial(
data_pipeline.generate_correlated_data,
x_size=dataset_config.x_size,
y_size=dataset_config.y_size,
covariance=self._covariance,
batch_size=per_device_batch_size,
),)
while True:
num_keys = jax.local_device_count() + 1
key, *sharded_keys = jax.random.split(key, num_keys)
batch = sample_from_key(jnp.asarray(sharded_keys))
# some experiment types (e.g., CCA) require multiple i.i.d. samples
# to construct unbiased gradient estimates. see third_party/py/eigegame/
# eg_experiment.py/get_experiment_type for self._num_samples info
if self._num_samples <= 1: # num_samples determined by experiment_type
yield batch
else:
batches = [batch]
for _ in range(self._num_samples -1):
key, *sharded_keys = jax.random.split(key, num_keys)
batch = sample_from_key(jnp.asarray(sharded_keys))
batches.append(batch)
yield tuple(batches)
# We need a separate function call here, since the otherwise, the
# initialization of the ground truths would be executed the first time
# next() is called instead of when when build_dataset is called.
return data_iterator(key)
def build_preprocess_function(
self,
preprocess_config: ml_collections.ConfigDict,
) -> Callable[[chex.ArrayTree, chex.PRNGKey], chex.ArrayTree]:
"""No need to do any preprocessing."""
return lambda batch, _: batch
@functools.partial(
jax.pmap,
in_axes=0,
out_axes=0,
axis_name='devices',
static_broadcasted_argnums=0,
)
def _eval_similarity(
self,
eigenvectors: chex.Array,
target_vectors: chex.Array,
) -> Tuple[chex.Array, chex.Array]:
"""pmaps the cosine similarity function."""
cosine_similarity = eg_objectives.cosine_similarity(
eigenvectors,
target_vectors,
)
return cosine_similarity # pytype: disable=bad-return-type # numpy-scalars
def evaluate(
self,
global_step: int,
rng: chex.Array,
**unused_kwargs,
) -> Dict[str, chex.Array]:
"""Override the evaluate function to return cosine similarity."""
replicated_cosine_similarity = self._eval_similarity(
self._eigenvectors, self._target_eigenvectors)
cosine_similarities = eg_utils.get_first(replicated_cosine_similarity)
return eg_utils.per_vector_metric_log( # pytype: disable=bad-return-type # numpy-scalars
'cosine_similarity',
cosine_similarities,
)
if __name__ == '__main__':
flags.mark_flag_as_required('config')
app.run(functools.partial(
platform.main, Experiment,
checkpointer_factory=eg_experiment.create_checkpointer))
| eigengame-main | eigengame/examples/synthetic_dataset_cca/experiment.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Config file holding the training configuaration for the trivial eigengame."""
from eigengame import eg_base_config
from eigengame import eg_utils
import ml_collections
def get_config() -> ml_collections.ConfigDict:
"""Creates and populates the fields of the ConfigDict."""
config = eg_base_config.get_config()
config.interval_type = 'steps'
config.training_steps = 100_000
config.save_checkpoint_interval = 100
config.log_train_data_interval = 10
config.experiment_kwargs.config.epsilon = 1e-8
config.experiment_kwargs.config.maximize = False
config.experiment_kwargs.config.experiment_type = eg_utils.ExperimentType.ICA
config.experiment_kwargs.config.track_mean = False
config.experiment_kwargs.config.eigenvector_count = 8
config.experiment_kwargs.config.optimizer_schedule_config.warm_up_step = 10000
config.experiment_kwargs.config.optimizer_schedule_config.end_step = 100_000
config.experiment_kwargs.config.optimizer_schedule_config.base_lr = 25e-4
config.experiment_kwargs.config.optimizer_schedule_config.end_lr = 25e-5
config.experiment_kwargs.config.aux_optimizer_config.learning_rate = 25e-2
config.experiment_kwargs.config.dataset_config = dict(
global_batch_size=512,
repeat=True,
seed=0,
n_samples=2000,
eigenvector_count=config.experiment_kwargs.config.eigenvector_count,
)
config.experiment_kwargs.config.optimizer_schedule_config.end_step = int(1e8)
config.checkpoint_dir = '/tmp/eigengame_ica_test/'
return config
| eigengame-main | eigengame/examples/synthetic_dataset_ica/config.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Creates a dataset of mixed periodic signals for ICA."""
from typing import Iterator, Tuple
import chex
import jax
import numpy as np
import scipy
from scipy import signal
import tensorflow as tf
def get_data_generator(
batch_size: int,
repeat: bool,
shuffle_buffer: int = 1024,
data_seed: int = 0,
shuffle_seed: int = 12345,
n_samples: int = 2000,
**kwargs
) -> Iterator[tf.Tensor]:
"""Pulls data and creates a generator using TensorFlow Datasets."""
del kwargs
mixed_sources = generate_dataset(n_samples=n_samples, seed=data_seed)
data_set = tf.data.Dataset.from_tensor_slices(mixed_sources)
data_set = data_set.shuffle(shuffle_buffer, shuffle_seed)
data_set = data_set.prefetch(tf.data.AUTOTUNE)
data_set = data_set.batch(batch_size, drop_remainder=True)
data_set = data_set.batch(jax.local_device_count(), drop_remainder=True)
if repeat:
data_set = data_set.repeat()
return iter(data_set)
def preprocess_sample(
sample: tf.Tensor) -> chex.Array:
"""Convert samples to chex Arrays."""
return sample.numpy()
def get_preprocessed_generator(
batch_size: int,
repeat: bool,
shuffle_buffer: int = 1024,
seed: int = 0,
n_samples: int = 2000,
**kwargs
) -> Iterator[Tuple[chex.Array, chex.Array, chex.Array]]:
"""Returns a generator which has been preprocessed."""
del kwargs
num_minibatches = 3 # need 3 independent minibatches for ICA
rnd = np.random.RandomState(seed=seed)
shuffle_seeds = [rnd.randint(12345, 999999) for _ in range(num_minibatches)]
data_generators = []
for shuffle_seed in shuffle_seeds:
dg = get_data_generator(batch_size,
repeat,
shuffle_buffer,
seed,
shuffle_seed,
n_samples)
data_generators.append(dg)
for batches in zip(*data_generators):
yield tuple([preprocess_sample(batch) for batch in batches])
def generate_dataset(n_samples: int = 2000, seed: int = 0) -> chex.Array:
"""Generates the dataset."""
# Synthetic example modified from sklearn example:
# https://scikit-learn.org/stable/auto_examples/decomposition/plot_ica_blind_source_separation.html
# Generate three (+1) signals, linearly mix them, and then add Guassian noise
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
# Dummy signal so k % num_device = 0 for tpu test
s4 = np.cos(time) # Signal 4: sinusoidal signal
s5 = np.cos(4 * time) # Signal 5: sinusoidal signal
s6 = np.cos(8 * time) # Signal 6: sinusoidal signal
s7 = np.cos(16 * time) # Signal 7: sinusoidal signal
s8 = np.cos(32 * time) # Signal 8: sinusoidal signal
# Stack signals on columns
sources = np.c_[s1, s2, s3, s4, s5, s6, s7, s8]
# Add noise
rnd = np.random.RandomState(seed=seed)
sources += 0.2 * rnd.normal(size=sources.shape)
sources /= sources.std(axis=0) # Standardize data
# Mix data
mix = np.zeros((8, 8))
mix[3:, 3:] = np.eye(5)
mix_main = np.array([[1.0, 1.0, 1.0],
[0.5, 2.0, 1.0],
[1.5, 1.0, 2.0]]) # Mixing matrix
mix[:3, :3] = mix_main
mixed_sources = np.dot(sources, mix.T) # Generate observations
return mixed_sources
def generate_ground_truths(
n_samples: int = 2000,
seed: int = 0) -> Tuple[chex.Array, chex.Array, chex.Array, chex.Array]:
"""Generates the covariance matrix along with the true eigenvalue/vectors."""
mixed_sources = generate_dataset(n_samples=n_samples, seed=seed)
n_samples = mixed_sources.shape[0]
covariance = np.dot(mixed_sources.T, mixed_sources) / n_samples
kurtosis = sum([np.outer(xi, xi) * np.inner(xi, xi) for xi in mixed_sources])
kurtosis /= n_samples
kurtosis -= np.trace(covariance) * covariance + 2 * covariance.dot(covariance)
# Solve for Av = lambda v to get the ground_truths
true_eigenvalues, true_eigenvectors = scipy.linalg.eigh(
kurtosis,
covariance
)
# Order the eigenvalues and vectors from smallest to biggest
idxs = np.argsort(true_eigenvalues)
# You need to transpose this, since eigh returns eigenvectors on columns!
true_eigenvectors = true_eigenvectors[:, idxs].T
true_eigenvalues = true_eigenvalues[idxs]
return (
kurtosis,
covariance,
true_eigenvalues,
true_eigenvectors,
)
def get_sharded_ground_truths(
total_eigenvector_count: int,
n_samples: int = 2000,
seed: int = 0
) -> Tuple[chex.ArraySharded, chex.ArraySharded, chex.ArraySharded,
chex.ArraySharded]:
"""Shards the ground truths to different machines."""
(
kurtosis_matrix,
covariance_matrix,
true_eigenvalues,
true_eigenvectors,
) = generate_ground_truths(n_samples=n_samples, seed=seed)
shard_shape = (
jax.device_count(),
total_eigenvector_count // jax.device_count(),
)
dim = kurtosis_matrix.shape[0]
# We need to shard the eigenvalues and eigenvectors to the corresponding
# machines responsible for them.
true_eigenvalues = true_eigenvalues[:total_eigenvector_count].reshape(
shard_shape,)
true_eigenvectors = true_eigenvectors[:total_eigenvector_count].reshape(
shard_shape + (dim,),)
# The true eigenvectors also need to be converted to CCAVector.
true_generalized_eigenvectors = jax.device_put_sharded(
list(true_eigenvectors),
jax.devices(),
)
true_eigenvalues = jax.device_put_sharded(
list(true_eigenvalues),
jax.devices(),
)
return (kurtosis_matrix, covariance_matrix, true_eigenvalues, # pytype: disable=bad-return-type # numpy-scalars
true_generalized_eigenvectors)
| eigengame-main | eigengame/examples/synthetic_dataset_ica/data_pipeline.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| eigengame-main | eigengame/examples/synthetic_dataset_ica/__init__.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of eigengame ICA on a trivial dataset."""
import functools
from typing import Callable, Dict, Iterator, Tuple
from absl import app
from absl import flags
import chex
from eigengame import eg_experiment
from eigengame import eg_objectives
from eigengame import eg_utils
from eigengame.examples.synthetic_dataset_ica import data_pipeline
import jax
from jaxline import platform
import ml_collections
FLAGS = flags.FLAGS
class Experiment(eg_experiment.AbstractEigenGameExperiment):
"""Run ICA on low dimensional synthetic data."""
NON_BROADCAST_CHECKPOINT_ATTRS = {
'_kurtosis': 'kurtosis',
'_covariance': 'covariance',
'_target_eigenvalues': 'target_eigenvalues',
'_target_eigenvectors': 'target_eigenvectors',
**eg_experiment.AbstractEigenGameExperiment.NON_BROADCAST_CHECKPOINT_ATTRS
}
def build_dataset(
self,
dataset_config: ml_collections.ConfigDict,
) -> Iterator[Tuple[chex.ArrayTree, chex.ArrayTree, chex.ArrayTree]]:
"""Initialize ground truths and returns iterator of samples."""
# Initialize the ground truths
(
self._kurtosis,
self._covariance,
self._target_eigenvalues,
self._target_eigenvectors,
) = data_pipeline.get_sharded_ground_truths(
dataset_config.eigenvector_count,
)
global_batch_size = dataset_config.global_batch_size
per_device_batch_size = global_batch_size // jax.local_device_count()
def data_iterator(
) -> Iterator[Tuple[chex.ArrayTree, chex.ArrayTree, chex.ArrayTree]]:
"""Function to create the iterator which samples from the dataset."""
sample_device_batch = data_pipeline.get_preprocessed_generator(
batch_size=per_device_batch_size,
repeat=dataset_config.repeat,
seed=dataset_config.seed,
n_samples=dataset_config.n_samples,
)
while True:
batches = next(sample_device_batch)
batches = [
jax.device_put_sharded(list(batch), jax.local_devices())
for batch in batches
]
yield tuple(batches)
# We need a separate function call here, since the otherwise, the
# initialization of the ground truths would be executed the first time
# next() is called instead of when when build_dataset is called.
return data_iterator()
def build_preprocess_function(
self,
preprocess_config: ml_collections.ConfigDict,
) -> Callable[[chex.ArrayTree, chex.PRNGKey], chex.ArrayTree]:
"""No need to do any preprocessing."""
return lambda batch, _: batch
@functools.partial(
jax.pmap,
in_axes=0,
out_axes=0,
axis_name='devices',
static_broadcasted_argnums=0,
)
def _eval_similarity(
self,
eigenvectors: chex.Array,
target_vectors: chex.Array,
) -> Tuple[chex.Array, chex.Array]:
"""pmaps the cosine similarity function."""
cosine_similarity = eg_objectives.cosine_similarity(
eigenvectors,
target_vectors,
)
return cosine_similarity # pytype: disable=bad-return-type # numpy-scalars
def evaluate(
self,
global_step: int,
rng: chex.Array,
**unused_kwargs,
) -> Dict[str, chex.Array]:
"""Updates the evaluate function to also return cosine similarity."""
log_dict = super().evaluate(global_step, rng, **unused_kwargs)
replicated_cosine_similarity = self._eval_similarity(
self._eigenvectors, self._target_eigenvectors)
cosine_similarities = eg_utils.get_first(replicated_cosine_similarity)
log_dict.update(eg_utils.per_vector_metric_log(
'cosine_similarity', cosine_similarities,
))
return log_dict # pytype: disable=bad-return-type # numpy-scalars
if __name__ == '__main__':
flags.mark_flag_as_required('config')
app.run(functools.partial(
platform.main, Experiment,
checkpointer_factory=eg_experiment.create_checkpointer))
| eigengame-main | eigengame/examples/synthetic_dataset_ica/experiment.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Config file holding the training configuaration for the trivial eigengame."""
from eigengame import eg_base_config
from eigengame import eg_utils
import ml_collections
def get_config() -> ml_collections.ConfigDict:
"""Creates and populates the fields of the ConfigDict."""
config = eg_base_config.get_config()
config.interval_type = 'steps'
config.training_steps = 1_000_000
config.save_checkpoint_interval = 1_000
config.log_train_data_interval = 10
config.experiment_kwargs.config.epsilon = 0.
config.experiment_kwargs.config.experiment_type = eg_utils.ExperimentType.PCA
config.experiment_kwargs.config.track_mean = False
config.experiment_kwargs.config.eigenvector_count = 256
config.experiment_kwargs.config.dataset_config = dict(
eigenvector_count=config.experiment_kwargs.config.eigenvector_count,
dim=1000,
seed=42,
global_batch_size=4096,
)
config.experiment_kwargs.config.optimizer_schedule_config.end_step = int(1e8)
config.checkpoint_dir = '/tmp/eigengame_pca_test/'
return config
| eigengame-main | eigengame/examples/synthetic_dataset_pca/config.py |
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Creates a trivial dataset of pair of correlated normals for the PCA."""
from typing import Tuple
import chex
import jax
import jax.numpy as jnp
import numpy as np
import scipy.linalg
def generate_ground_truths(
key: chex.PRNGKey,
dim: int,
) -> Tuple[chex.Array, chex.Array, chex.Array]:
"""Generates the covariance matrix along with the true eigenvalue/vectors."""
# Create a random gram matrix for our covariance.
random_matrix = jax.random.normal(key, shape=(dim, dim))
covariance_matrix = np.einsum(
'dn, dm -> nm',
random_matrix,
random_matrix,
) / np.sqrt(dim)
# Solve for Av = lambda v to get the ground_truths
true_eigenvalues, true_eigenvectors = scipy.linalg.eigh(
covariance_matrix,
)
# Order the eigenvalues and vectors from biggest to smallest
idxs = np.argsort(true_eigenvalues)[::-1]
# You need to transpose this, since eigh returns eigenvectors on columns!
true_eigenvectors = true_eigenvectors[:, idxs].T
true_eigenvalues = true_eigenvalues[idxs]
return (
covariance_matrix,
true_eigenvalues,
true_eigenvectors,
)
def generate_data(
key: chex.PRNGKey,
dim: int,
covariance: chex.Array,
batch_size: int,
) -> chex.Array:
"""Returns a vector of data given a covariance matrix."""
data = jax.random.multivariate_normal(
key=key,
mean=jnp.zeros(dim),
cov=covariance,
shape=(batch_size,),
)
return data
def get_sharded_ground_truths(
key: chex.PRNGKey,
total_eigenvector_count: int,
dim: int,
) -> Tuple[chex.ArraySharded, chex.ArraySharded, chex.ArraySharded]:
"""Shards the ground truths to different machines."""
(
covariance_matrix,
true_eigenvalues,
true_eigenvectors,
) = generate_ground_truths(
key,
dim,
)
shard_shape = (
jax.local_device_count(),
total_eigenvector_count // jax.local_device_count(),
)
# We need to shard the eigenvalues and eigenvectors to the corresponding
# machines responsible for them.
true_eigenvalues = true_eigenvalues[:total_eigenvector_count].reshape(
shard_shape,)
true_eigenvectors = true_eigenvectors[:total_eigenvector_count].reshape(
shard_shape + (dim,),)
# The true eigenvectors also need to be converted to CCAVector.
true_generalized_eigenvectors = jax.device_put_sharded(
list(true_eigenvectors),
jax.local_devices(),
)
true_eigenvalues = jax.device_put_sharded(
list(true_eigenvalues),
jax.local_devices(),
)
return covariance_matrix, true_eigenvalues, true_generalized_eigenvectors # pytype: disable=bad-return-type # numpy-scalars
| eigengame-main | eigengame/examples/synthetic_dataset_pca/data_pipeline.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.