python_code
stringlengths 0
258k
|
---|
"""Training algorithm track submission functions for LibriSpeech."""
from typing import Dict, Iterator, List, Tuple
import numpy as np
import torch
import torch.distributed.nn as dist_nn
from algorithmic_efficiency import spec
from algorithmic_efficiency.pytorch_utils import pytorch_setup
USE_PYTORCH_DDP = pytorch_setup()[0]
def get_batch_size(workload_name):
# Return the global batch size.
del workload_name
return 256
def get_learning_rate(step, hyperparams):
warmup_steps = hyperparams.warmup_steps
if step < warmup_steps:
current_lr = (step * hyperparams.base_lr) / warmup_steps
else:
decay_factor = (1 + np.cos(step / hyperparams.training_steps * np.pi)) * 0.5
current_lr = hyperparams.base_lr * decay_factor
return current_lr
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del workload
del model_state
del rng
optimizer = torch.optim.AdamW(
params=model_params.parameters(),
lr=0.0,
betas=(hyperparameters.beta1, hyperparameters.beta2),
eps=hyperparameters.epsilon,
weight_decay=hyperparameters.weight_decay)
return {'optimizer': optimizer}
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params)."""
del current_params_types
del eval_results
del model_state
del loss_type
optimizer = optimizer_state['optimizer']
optimizer.zero_grad()
current_model = current_param_container
(logits, logits_padding), _ = workload.model_fn(
current_model,
batch,
None,
spec.ForwardPassMode.TRAIN,
rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(batch['targets'], (logits, logits_padding))
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
if USE_PYTORCH_DDP:
# Use dist_nn.all_reduce to ensure correct loss and gradient scaling.
summed_loss = dist_nn.all_reduce(summed_loss)
n_valid_examples = dist_nn.all_reduce(n_valid_examples)
loss = summed_loss / n_valid_examples
loss.backward()
for g in optimizer.param_groups:
g['lr'] = get_learning_rate(global_step, hyperparameters)
if hasattr(hyperparameters, 'grad_clip'):
torch.nn.utils.clip_grad_norm_(
current_model.parameters(), max_norm=hyperparameters.grad_clip)
optimizer.step()
return optimizer_state, current_param_container, None
# Not allowed to update the model parameters, hyperparameters, global step, or
# optimzier state.
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
return next(input_queue)
|
from typing import Dict, Iterator, List, Tuple
import numpy as np
import torch
import torch.distributed.nn as dist_nn
from algorithmic_efficiency import spec
from algorithmic_efficiency.pytorch_utils import pytorch_setup
USE_PYTORCH_DDP = pytorch_setup()[0]
def get_batch_size(workload_name):
batch_sizes = {'wmt': 128}
return batch_sizes[workload_name]
def create_learning_rate_scheduler(
factors='constant * linear_warmup * rsqrt_decay',
base_learning_rate=0.5,
warmup_steps=1000,
decay_factor=0.5,
steps_per_decay=20000,
steps_per_cycle=100000):
"""Creates learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* rsqrt_normalized_decay: divide by square root of max(step/warmup_steps, 1)
* decay_every: Every k steps decay the learning rate by decay_factor.
* cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.
Args:
factors: string, factors separated by "*" that defines the schedule.
base_learning_rate: float, the starting constant for the lr schedule.
warmup_steps: int, how many steps to warm up for in the warmup schedule.
decay_factor: float, the amount to decay the learning rate by.
steps_per_decay: int, how often to decay the learning rate.
steps_per_cycle: int, steps per cycle when using cosine decay.
Returns:
a function learning_rate(step): float -> {"learning_rate": float}, the
step-dependent lr.
"""
factors = [n.strip() for n in factors.split('*')]
def step_fn(step):
"""Step to learning rate function."""
ret = 1.0
for name in factors:
if name == 'constant':
ret *= base_learning_rate
elif name == 'linear_warmup':
ret *= np.minimum(1.0, step / warmup_steps)
elif name == 'rsqrt_decay':
ret /= np.sqrt(np.maximum(step, warmup_steps))
elif name == 'rsqrt_normalized_decay':
ret *= np.sqrt(warmup_steps)
ret /= np.sqrt(np.maximum(step, warmup_steps))
elif name == 'decay_every':
ret *= (decay_factor**(step // steps_per_decay))
elif name == 'cosine_decay':
progress = np.maximum(0.0,
(step - warmup_steps) / float(steps_per_cycle))
ret *= np.maximum(0.0, 0.5 * (1.0 + np.cos(np.pi * (progress % 1.0))))
else:
raise ValueError(f'Unknown factor {name}.')
return ret
return step_fn
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del workload
del model_state
del rng
optimizer_state = {
'optimizer':
torch.optim.Adam(
model_params.parameters(),
lr=hyperparameters.learning_rate,
betas=(1.0 - hyperparameters.one_minus_beta_1, 0.98),
eps=hyperparameters.epsilon),
}
optimizer_state['scheduler'] = create_learning_rate_scheduler(
base_learning_rate=hyperparameters.learning_rate)
return optimizer_state
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params)."""
del current_params_types
del hyperparameters
del loss_type
del eval_results
current_model = current_param_container
current_param_container.train()
optimizer = optimizer_state['optimizer']
optimizer.zero_grad()
logits, _ = workload.model_fn(
params=current_model,
augmented_and_preprocessed_input_batch=batch,
model_state=model_state,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
update_batch_norm=False)
targets = batch['targets']
weights = batch['weights']
loss_dict = workload.loss_fn(targets, logits, weights, label_smoothing=0.1)
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
if USE_PYTORCH_DDP:
# Use dist_nn.all_reduce to ensure correct loss and gradient scaling.
summed_loss = dist_nn.all_reduce(summed_loss)
n_valid_examples = dist_nn.all_reduce(n_valid_examples)
loss = summed_loss / n_valid_examples
loss.backward()
lr = optimizer_state['scheduler'](global_step).item()
for g in optimizer.param_groups:
g['lr'] = lr
optimizer.step()
return (optimizer_state, current_param_container, None)
# Not allowed to update the model parameters, hyperparameters, global step, or
# optimzier state.
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
return next(input_queue)
|
"""Training algorithm track submission functions for WMT."""
import functools
from typing import Dict, Iterator, List, Tuple
from flax import jax_utils
import jax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
def get_batch_size(workload_name):
batch_sizes = {'wmt': 128}
return batch_sizes[workload_name]
def create_learning_rate_scheduler(
factors='constant * linear_warmup * rsqrt_decay',
base_learning_rate=0.5,
warmup_steps=1000,
decay_factor=0.5,
steps_per_decay=20000,
steps_per_cycle=100000):
"""Creates learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* rsqrt_normalized_decay: divide by square root of max(step/warmup_steps, 1)
* decay_every: Every k steps decay the learning rate by decay_factor.
* cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.
Args:
factors: string, factors separated by "*" that defines the schedule.
base_learning_rate: float, the starting constant for the lr schedule.
warmup_steps: int, how many steps to warm up for in the warmup schedule.
decay_factor: float, the amount to decay the learning rate by.
steps_per_decay: int, how often to decay the learning rate.
steps_per_cycle: int, steps per cycle when using cosine decay.
Returns:
a function learning_rate(step): float -> {"learning_rate": float}, the
step-dependent lr.
"""
factors = [n.strip() for n in factors.split('*')]
def step_fn(step):
"""Step to learning rate function."""
ret = 1.0
for name in factors:
if name == 'constant':
ret *= base_learning_rate
elif name == 'linear_warmup':
ret *= jnp.minimum(1.0, step / warmup_steps)
elif name == 'rsqrt_decay':
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == 'rsqrt_normalized_decay':
ret *= jnp.sqrt(warmup_steps)
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == 'decay_every':
ret *= (decay_factor**(step // steps_per_decay))
elif name == 'cosine_decay':
progress = jnp.maximum(0.0,
(step - warmup_steps) / float(steps_per_cycle))
ret *= jnp.maximum(0.0,
0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))
else:
raise ValueError(f'Unknown factor {name}.')
return jnp.asarray(ret, dtype=jnp.float32)
return step_fn
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del model_params
del model_state
del rng
learning_rate_fn = create_learning_rate_scheduler(
base_learning_rate=hyperparameters.learning_rate, warmup_steps=1000)
opt_init_fn, opt_update_fn = optax.adam(
b1=1.0 - hyperparameters.one_minus_beta_1,
b2=0.98,
eps=hyperparameters.epsilon,
learning_rate=learning_rate_fn)
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
@functools.partial(
jax.pmap,
in_axes=(None, None, 0, 0, 0, 0, None),
axis_name='batch',
static_broadcasted_argnums=(0, 1, 6))
def pmapped_train_step(workload,
opt_update_fn,
optimizer_state,
current_param_container,
batch,
dropout_rng,
hyperparameters):
"""Perform a single training step."""
del hyperparameters
def _loss_fn(params):
"""Loss function used for training."""
logits, _ = workload.model_fn(
params,
batch,
model_state=None,
mode=spec.ForwardPassMode.TRAIN,
rng=dropout_rng,
update_batch_norm=False)
targets = batch['targets']
weights = batch['weights']
loss_dict = workload.loss_fn(targets, logits, weights, label_smoothing=0.1)
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
return summed_loss, n_valid_examples
grad_fn = jax.value_and_grad(_loss_fn, has_aux=True)
(summed_loss, n_valid_examples), grad = grad_fn(current_param_container)
# Get correct global mean loss and grad.
(summed_loss, n_valid_examples, grad) = jax.lax.psum(
(summed_loss, n_valid_examples, grad), axis_name='batch')
grad = jax.tree_map(lambda x: x / n_valid_examples, grad)
updates, new_optimizer_state = opt_update_fn(
grad, optimizer_state, current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_optimizer_state, updated_params
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params)."""
del current_params_types
del eval_results
del global_step
del model_state
del loss_type
optimizer_state, opt_update_fn = optimizer_state
dropout_rngs = jax.random.split(rng, jax.local_device_count())
new_optimizer_state, updated_params = pmapped_train_step(
workload,
opt_update_fn,
optimizer_state,
current_param_container,
batch,
dropout_rngs,
hyperparameters)
return (new_optimizer_state, opt_update_fn), updated_params, None
# Not allowed to update the model parameters, hyperparameters, global step, or
# optimzier state.
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
return next(input_queue)
|
"""Training algorithm track submission functions for ImageNet."""
from typing import Dict, Iterator, List, Tuple
import torch
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.optim.lr_scheduler import LinearLR
from torch.optim.lr_scheduler import SequentialLR
from algorithmic_efficiency import spec
def get_batch_size(workload_name):
# Return the global batch size.
del workload_name
return 1024
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del model_state
del rng
batch_size = get_batch_size('imagenet_vit')
base_lr = hyperparameters.learning_rate * batch_size / 1024.
optimizer_state = {
'optimizer':
torch.optim.Adam(
model_params.parameters(),
lr=base_lr,
betas=(hyperparameters.beta1, hyperparameters.beta2),
eps=hyperparameters.epsilon),
}
steps_per_epoch = workload.num_train_examples // batch_size
scheduler1 = LinearLR(
optimizer_state['optimizer'],
start_factor=1e-10,
end_factor=1.,
total_iters=hyperparameters.warmup_epochs * steps_per_epoch)
cosine_epochs = max(
hyperparameters.num_epochs - hyperparameters.warmup_epochs, 1)
scheduler2 = CosineAnnealingLR(
optimizer_state['optimizer'], T_max=cosine_epochs * steps_per_epoch)
optimizer_state['scheduler'] = SequentialLR(
optimizer_state['optimizer'],
schedulers=[scheduler1, scheduler2],
milestones=[hyperparameters.warmup_epochs * steps_per_epoch])
return optimizer_state
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params)."""
del current_params_types
del loss_type
del eval_results
del global_step
current_model = current_param_container
current_param_container.train()
optimizer_state['optimizer'].zero_grad()
logits_batch, new_model_state = workload.model_fn(
params=current_model,
augmented_and_preprocessed_input_batch=batch,
model_state=model_state,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(
label_batch=batch['targets'], logits_batch=logits_batch)
loss = loss_dict['summed'] / loss_dict['n_valid_examples']
loss.backward()
optimizer_state['optimizer'].step()
optimizer_state['scheduler'].step()
return (optimizer_state, current_param_container, new_model_state)
# Not allowed to update the model parameters, hyperparameters, global step, or
# optimzier state.
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
return next(input_queue)
|
"""Training algorithm track submission functions for ImageNet."""
import functools
from typing import Dict, Iterator, List, Tuple
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
def get_batch_size(workload_name):
# Return the global batch size.
del workload_name
return 1024
def create_learning_rate_fn(hparams: spec.Hyperparameters,
steps_per_epoch: int):
"""Create learning rate schedule."""
base_learning_rate = hparams.learning_rate * \
get_batch_size('imagenet_vit') / 1024.
warmup_fn = optax.linear_schedule(
init_value=0.,
end_value=base_learning_rate,
transition_steps=hparams.warmup_epochs * steps_per_epoch)
cosine_epochs = max(hparams.num_epochs - hparams.warmup_epochs, 1)
cosine_fn = optax.cosine_decay_schedule(
init_value=base_learning_rate,
decay_steps=cosine_epochs * steps_per_epoch)
schedule_fn = optax.join_schedules(
schedules=[warmup_fn, cosine_fn],
boundaries=[hparams.warmup_epochs * steps_per_epoch])
return schedule_fn
def optimizer(hyperparameters: spec.Hyperparameters, num_train_examples: int):
steps_per_epoch = num_train_examples // get_batch_size('imagenet_vit')
learning_rate_fn = create_learning_rate_fn(hyperparameters, steps_per_epoch)
opt_init_fn, opt_update_fn = optax.adam(
b1=hyperparameters.beta1,
b2=hyperparameters.beta2,
eps=hyperparameters.epsilon,
learning_rate=learning_rate_fn)
return opt_init_fn, opt_update_fn
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del model_params
del model_state
del rng
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
opt_init_fn, opt_update_fn = optimizer(hyperparameters,
workload.num_train_examples)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, None, 0, 0, 0, None, 0, 0),
static_broadcasted_argnums=(0, 1))
def pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
hyperparameters,
batch,
rng):
def _loss_fn(params):
"""loss function used for training."""
logits, new_model_state = workload.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.TRAIN,
rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(batch['targets'], logits)
loss = loss_dict['summed'] / loss_dict['n_valid_examples']
weight_penalty_params = jax.tree_util.tree_leaves(params)
weight_l2 = sum(jnp.sum(x**2) for x in weight_penalty_params if x.ndim > 1)
weight_penalty = hyperparameters.l2 * 0.5 * weight_l2
loss = loss + weight_penalty
return loss, (new_model_state, logits)
grad_fn = jax.value_and_grad(_loss_fn, has_aux=True)
aux, grad = grad_fn(current_param_container)
grad = lax.pmean(grad, axis_name='batch')
new_model_state, _ = aux[1]
updates, new_optimizer_state = opt_update_fn(grad, optimizer_state,
current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_model_state, new_optimizer_state, updated_params
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
del global_step
optimizer_state, opt_update_fn = optimizer_state
per_device_rngs = jax.random.split(rng, jax.local_device_count())
new_model_state, new_optimizer_state, new_params = pmapped_train_step(
workload, opt_update_fn, model_state, optimizer_state,
current_param_container, hyperparameters, batch, per_device_rngs)
return (new_optimizer_state, opt_update_fn), new_params, new_model_state
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
return next(input_queue)
|
"""Training algorithm track submission functions for LibriSpeech."""
import functools
from typing import Dict, Iterator, List, Tuple
from absl import logging
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
import optax
from algorithmic_efficiency import spec
_GRAD_CLIP_EPS = 1e-6
def get_batch_size(workload_name):
# Return the global batch size.
del workload_name
return 256
def get_learning_rate(step, hyperparams):
warmup_steps = hyperparams.warmup_steps
if step < warmup_steps:
current_lr = (step * hyperparams.base_lr) / warmup_steps
else:
decay_factor = (1 + np.cos(step / hyperparams.training_steps * np.pi)) * 0.5
current_lr = hyperparams.base_lr * decay_factor
return current_lr
def optimizer(hyperparameters: spec.Hyperparameters, num_train_examples: int):
opt_init_fn, opt_update_fn = optax.inject_hyperparams(optax.adamw)(
b1=hyperparameters.beta1,
b2=hyperparameters.beta2,
eps=hyperparameters.epsilon,
weight_decay=hyperparameters.weight_decay,
learning_rate=0.0)
return opt_init_fn, opt_update_fn
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del model_state
del rng
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
opt_init_fn, opt_update_fn = optimizer(hyperparameters,
workload.num_train_examples)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
def l2_regularization(params, l2_decay_rank_threshold):
"""Computes the squared l2 norm of the given parameters.
This function will only filter for parameters with
rank >= l2_decay_rank_threshold. So if this threshold is set to 2, then all
1d (and lower) parameter arrays, including all bias and batch norm params,
will be ignored in this computation.
Args:
params: Pytree containing parameters.
l2_decay_rank_threshold: The calculation will only include parameters with
param.ndim >= l2_decay_rank_threshold. Set to 2 to ignore all bias and
batch_norm params in the model.
Returns:
weight_l2: the squared l2 norm of all params matching the threshold.
"""
weight_penalty_params = jax.tree_util.tree_leaves(params)
weight_l2 = sum(
jnp.sum(x**2)
for x in weight_penalty_params
if x.ndim >= l2_decay_rank_threshold)
return weight_l2
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, None, 0, 0, 0, None, 0, 0, None),
static_broadcasted_argnums=(0, 1))
def pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
hyperparameters,
batch,
rng,
lr):
optimizer_state.hyperparams['learning_rate'] = lr
def _loss_fn(params):
"""loss function used for training."""
(logits, logit_paddings), new_model_state = workload.model_fn(
params,
batch,
model_state,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(batch['targets'], (logits, logit_paddings))
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
return summed_loss, (n_valid_examples, new_model_state)
grad_fn = jax.value_and_grad(_loss_fn, has_aux=True)
(summed_loss, (n_valid_examples, new_model_state)), grad = grad_fn(
current_param_container)
# Get correct global mean loss and grad.
(summed_loss, n_valid_examples, grad) = lax.psum(
(summed_loss, n_valid_examples, grad), axis_name='batch')
loss = summed_loss / n_valid_examples
grad = jax.tree_map(lambda x: x / n_valid_examples, grad)
grad_clip = hyperparameters.grad_clip
grad_norm = jnp.sqrt(l2_regularization(grad, 0))
scaled_grad = jax.tree_map(
lambda x: x / (grad_norm + _GRAD_CLIP_EPS) * grad_clip, grad)
grad = jax.lax.cond(grad_norm > grad_clip,
lambda _: scaled_grad,
lambda _: grad,
None)
updates, new_optimizer_state = opt_update_fn(grad, optimizer_state,
current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_model_state, new_optimizer_state, updated_params, loss, grad_norm
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params)."""
del current_params_types
del eval_results
del loss_type
lr = get_learning_rate(global_step, hyperparameters)
optimizer_state, opt_update_fn = optimizer_state
per_device_rngs = jax.random.split(rng, jax.local_device_count())
outputs = pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
hyperparameters,
batch,
per_device_rngs,
lr)
new_model_state, new_optimizer_state, new_params, loss, grad_norm = outputs
if global_step <= 1000 or global_step % 100 == 0:
logging.info('%d) loss = %0.3f, grad_norm = %0.3f lr = %0.6f',
global_step,
loss.mean(),
grad_norm.mean(),
lr)
if workload.metrics_logger is not None:
workload.metrics_logger.append_scalar_metrics(
{
'train_step_ctc_loss': loss.mean(),
'grad_norm': grad_norm.mean(),
'learning_rate': lr,
},
global_step)
return (new_optimizer_state, opt_update_fn), new_params, new_model_state
# Not allowed to update the model parameters, hyperparameters, global step, or
# optimzier state.
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
return next(input_queue)
|
"""Training algorithm track submission functions for LibriSpeech."""
from typing import Dict, Iterator, List, Tuple
import numpy as np
import torch
import torch.distributed.nn as dist_nn
from algorithmic_efficiency import spec
from algorithmic_efficiency.pytorch_utils import pytorch_setup
USE_PYTORCH_DDP = pytorch_setup()[0]
device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
ctc_loss = torch.nn.CTCLoss(blank=0, reduction='none')
def get_batch_size(workload_name):
# Return the global batch size.
del workload_name
return 256
def get_learning_rate(step, hyperparams):
warmup_steps = hyperparams.warmup_steps
if step < warmup_steps:
current_lr = (step * hyperparams.base_lr) / warmup_steps
else:
decay_factor = (1 + np.cos(step / hyperparams.training_steps * np.pi)) * 0.5
current_lr = hyperparams.base_lr * decay_factor
return current_lr
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del workload
del model_state
del rng
optimizer = torch.optim.AdamW(
params=model_params.parameters(),
lr=0.0,
betas=(hyperparameters.beta1, hyperparameters.beta2),
eps=hyperparameters.epsilon,
weight_decay=hyperparameters.weight_decay)
return {'optimizer': optimizer}
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params)."""
del current_params_types
del eval_results
del model_state
del loss_type
optimizer = optimizer_state['optimizer']
optimizer.zero_grad()
current_model = current_param_container
(logits, logits_padding), _ = workload.model_fn(
current_model,
batch,
None,
spec.ForwardPassMode.TRAIN,
rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(batch['targets'], (logits, logits_padding))
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
if USE_PYTORCH_DDP:
# Use dist_nn.all_reduce to ensure correct loss and gradient scaling.
summed_loss = dist_nn.all_reduce(summed_loss)
n_valid_examples = dist_nn.all_reduce(n_valid_examples)
loss = summed_loss / n_valid_examples
loss.backward()
for g in optimizer.param_groups:
g['lr'] = get_learning_rate(global_step, hyperparameters)
if hasattr(hyperparameters, 'grad_clip'):
torch.nn.utils.clip_grad_norm_(
current_model.parameters(), max_norm=hyperparameters.grad_clip)
optimizer.step()
return optimizer_state, current_param_container, None
# Not allowed to update the model parameters, hyperparameters, global step, or
# optimzier state.
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
return next(input_queue)
|
from typing import Dict, Iterator, List, Tuple
import torch
import torch.distributed.nn as dist_nn
from algorithmic_efficiency import spec
from algorithmic_efficiency.pytorch_utils import pytorch_setup
USE_PYTORCH_DDP = pytorch_setup()[0]
def get_batch_size(workload_name):
# Return the global batch size.
batch_sizes = {'ogbg': 32768}
return batch_sizes[workload_name]
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates an Adam optimizer."""
del workload
del model_state
del rng
optimizer_state = {
'optimizer':
torch.optim.Adam(
model_params.parameters(), lr=hyperparameters.learning_rate),
}
return optimizer_state
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del hyperparameters
del loss_type
del eval_results
del global_step
current_model = current_param_container
current_model.train()
optimizer_state['optimizer'].zero_grad()
logits, new_model_state = workload.model_fn(
params=current_model,
augmented_and_preprocessed_input_batch=batch,
model_state=model_state,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(batch['targets'], logits, batch['weights'])
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
if USE_PYTORCH_DDP:
# Use dist_nn.all_reduce to ensure correct loss and gradient scaling.
summed_loss = dist_nn.all_reduce(summed_loss)
n_valid_examples = dist_nn.all_reduce(n_valid_examples)
loss = summed_loss / n_valid_examples
loss.backward()
optimizer_state['optimizer'].step()
return optimizer_state, current_param_container, new_model_state
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
return next(input_queue)
|
from typing import Dict, Iterator, List, Tuple
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
def get_batch_size(workload_name):
# Return the global batch size.
batch_sizes = {'ogbg': 2048}
return batch_sizes[workload_name]
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates an Adam optimizer."""
del model_params
del model_state
del rng
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
opt_init_fn, opt_update_fn = opt_init_fn, opt_update_fn = optax.adam(
learning_rate=hyperparameters.learning_rate)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
def train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
hyperparameters,
batch,
rng):
del hyperparameters
def _loss_fn(params):
logits_batch, new_model_state = workload.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.TRAIN,
rng,
update_batch_norm=True)
mask_batch = batch['weights']
loss_dict = workload.loss_fn(batch['targets'], logits_batch, mask_batch)
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
return summed_loss, (n_valid_examples, new_model_state)
grad_fn = jax.value_and_grad(_loss_fn, has_aux=True)
(summed_loss, (n_valid_examples, new_model_state)), grad = grad_fn(
current_param_container)
# Get correct global mean grad.
(summed_loss, n_valid_examples, grad) = lax.psum(
(summed_loss, n_valid_examples, grad), axis_name='batch')
grad = jax.tree_map(lambda x: x / n_valid_examples, grad)
updates, new_optimizer_state = opt_update_fn(
grad, optimizer_state, current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_model_state, new_optimizer_state, updated_params
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
del global_step
optimizer_state, opt_update_fn = optimizer_state
pmapped_train_step = jax.pmap(
train_step,
axis_name='batch',
in_axes=(None, None, 0, 0, 0, None, 0, 0),
static_broadcasted_argnums=(0, 1))
dropout_rngs = jax.random.split(rng, jax.local_device_count())
new_model_state, new_optimizer_state, new_params = pmapped_train_step(
workload, opt_update_fn, model_state, optimizer_state,
current_param_container, hyperparameters, batch, dropout_rngs)
return (new_optimizer_state, opt_update_fn), new_params, new_model_state
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
return next(input_queue)
|
"""Training algorithm track submission functions for ImageNet."""
from typing import Dict, Iterator, List, Tuple
import torch
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.optim.lr_scheduler import LinearLR
from torch.optim.lr_scheduler import SequentialLR
from algorithmic_efficiency import spec
def get_batch_size(workload_name):
# Return the global batch size.
del workload_name
return 1024
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del model_state
del rng
batch_size = get_batch_size('imagenet_resnet')
base_lr = hyperparameters.learning_rate * batch_size / 256.
optimizer_state = {
'optimizer':
torch.optim.SGD(
model_params.parameters(),
lr=base_lr,
momentum=hyperparameters.momentum,
weight_decay=hyperparameters.l2,
nesterov=True),
}
steps_per_epoch = workload.num_train_examples // batch_size
scheduler1 = LinearLR(
optimizer_state['optimizer'],
start_factor=1e-10,
end_factor=1.,
total_iters=hyperparameters.warmup_epochs * steps_per_epoch)
cosine_epochs = max(
hyperparameters.num_epochs - hyperparameters.warmup_epochs, 1)
scheduler2 = CosineAnnealingLR(
optimizer_state['optimizer'], T_max=cosine_epochs * steps_per_epoch)
optimizer_state['scheduler'] = SequentialLR(
optimizer_state['optimizer'],
schedulers=[scheduler1, scheduler2],
milestones=[hyperparameters.warmup_epochs * steps_per_epoch])
return optimizer_state
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params)."""
del current_params_types
del hyperparameters
del loss_type
del eval_results
del global_step
current_model = current_param_container
current_param_container.train()
optimizer_state['optimizer'].zero_grad()
logits_batch, new_model_state = workload.model_fn(
params=current_model,
augmented_and_preprocessed_input_batch=batch,
model_state=model_state,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(
label_batch=batch['targets'], logits_batch=logits_batch)
loss = loss_dict['summed'] / loss_dict['n_valid_examples']
loss.backward()
optimizer_state['optimizer'].step()
optimizer_state['scheduler'].step()
return (optimizer_state, current_param_container, new_model_state)
# Not allowed to update the model parameters, hyperparameters, global step, or
# optimzier state.
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
return next(input_queue)
|
"""Training algorithm track submission functions for ImageNet."""
import functools
from typing import Dict, Iterator, List, Tuple
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
def get_batch_size(workload_name):
# Return the global batch size.
del workload_name
return 1024
def create_learning_rate_fn(hparams: spec.Hyperparameters,
steps_per_epoch: int):
"""Create learning rate schedule."""
base_learning_rate = hparams.learning_rate * \
get_batch_size('imagenet_resnet') / 256.
warmup_fn = optax.linear_schedule(
init_value=0.,
end_value=base_learning_rate,
transition_steps=hparams.warmup_epochs * steps_per_epoch)
cosine_epochs = max(hparams.num_epochs - hparams.warmup_epochs, 1)
cosine_fn = optax.cosine_decay_schedule(
init_value=base_learning_rate,
decay_steps=cosine_epochs * steps_per_epoch)
schedule_fn = optax.join_schedules(
schedules=[warmup_fn, cosine_fn],
boundaries=[hparams.warmup_epochs * steps_per_epoch])
return schedule_fn
def optimizer(hyperparameters: spec.Hyperparameters, num_train_examples: int):
steps_per_epoch = num_train_examples // get_batch_size('imagenet_resnet')
learning_rate_fn = create_learning_rate_fn(hyperparameters, steps_per_epoch)
opt_init_fn, opt_update_fn = optax.sgd(
nesterov=True,
momentum=hyperparameters.momentum,
learning_rate=learning_rate_fn)
return opt_init_fn, opt_update_fn
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del model_params
del model_state
del rng
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
opt_init_fn, opt_update_fn = optimizer(hyperparameters,
workload.num_train_examples)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, None, 0, 0, 0, None, 0, 0),
static_broadcasted_argnums=(0, 1))
def pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
hyperparameters,
batch,
rng):
def _loss_fn(params):
"""loss function used for training."""
variables = {'params': params, **model_state}
logits, new_model_state = workload.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.TRAIN,
rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(batch['targets'], logits)
loss = loss_dict['summed'] / loss_dict['n_valid_examples']
weight_penalty_params = jax.tree_util.tree_leaves(variables['params'])
weight_l2 = sum(jnp.sum(x**2) for x in weight_penalty_params if x.ndim > 1)
weight_penalty = hyperparameters.l2 * 0.5 * weight_l2
loss = loss + weight_penalty
return loss, (new_model_state, logits)
grad_fn = jax.value_and_grad(_loss_fn, has_aux=True)
aux, grad = grad_fn(current_param_container)
grad = lax.pmean(grad, axis_name='batch')
new_model_state, _ = aux[1]
updates, new_optimizer_state = opt_update_fn(grad, optimizer_state,
current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_model_state, new_optimizer_state, updated_params
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
del global_step
optimizer_state, opt_update_fn = optimizer_state
per_device_rngs = jax.random.split(rng, jax.local_device_count())
new_model_state, new_optimizer_state, new_params = pmapped_train_step(
workload, opt_update_fn, model_state, optimizer_state,
current_param_container, hyperparameters, batch, per_device_rngs)
return (new_optimizer_state, opt_update_fn), new_params, new_model_state
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
return next(input_queue)
|
from typing import Dict, Iterator, List, Tuple
import torch
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.optim.lr_scheduler import LinearLR
from torch.optim.lr_scheduler import SequentialLR
from algorithmic_efficiency import spec
def get_batch_size(workload_name):
batch_sizes = {'criteo1tb': 524_288}
return batch_sizes[workload_name]
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del rng
del model_state
base_lr = hyperparameters.learning_rate
optimizer_state = {
'optimizer':
torch.optim.AdamW(
model_params.parameters(),
lr=base_lr,
weight_decay=hyperparameters.weight_decay,
betas=(hyperparameters.beta1, 0.999)),
}
scheduler1 = LinearLR(
optimizer_state['optimizer'],
start_factor=1e-12,
end_factor=1.,
total_iters=hyperparameters.warmup_steps)
scheduler2 = CosineAnnealingLR(
optimizer_state['optimizer'],
T_max=(workload.step_hint - hyperparameters.warmup_steps),
)
optimizer_state['scheduler'] = SequentialLR(
optimizer_state['optimizer'],
schedulers=[scheduler1, scheduler2],
milestones=[hyperparameters.warmup_steps])
return optimizer_state
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params)."""
current_model = current_param_container
current_param_container.train()
optimizer_state['optimizer'].zero_grad()
logits_batch, new_model_state = workload.model_fn(
params=current_model,
augmented_and_preprocessed_input_batch=batch,
model_state=model_state,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
dropout_rate=None,
aux_dropout_rate=None,
update_batch_norm=False)
loss_dict = workload.loss_fn(
label_batch=batch['targets'], logits_batch=logits_batch)
loss = loss_dict['summed'] / loss_dict['n_valid_examples']
loss.backward()
optimizer_state['optimizer'].step()
optimizer_state['scheduler'].step()
return (optimizer_state, current_param_container, new_model_state)
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
return next(input_queue)
|
"""Training algorithm track submission functions for Criteo1TB DLRM-Small."""
import functools
from typing import Dict, Iterator, List, Tuple
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
def get_batch_size(workload_name):
# Return the global batch size.
del workload_name
return 524_288 // 2
def create_learning_rate_fn(workload: spec.Workload,
hparams: spec.Hyperparameters):
"""Create learning rate schedule."""
warmup_fn = optax.linear_schedule(
init_value=0.,
end_value=hparams.learning_rate,
transition_steps=hparams.warmup_steps)
cosine_fn = optax.cosine_decay_schedule(
init_value=hparams.learning_rate,
decay_steps=(workload.step_hint - hparams.warmup_steps))
schedule_fn = optax.join_schedules(
schedules=[warmup_fn, cosine_fn], boundaries=[hparams.warmup_steps])
return schedule_fn
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del model_params
del model_state
del rng
learning_rate_fn = create_learning_rate_fn(workload, hyperparameters)
opt_init_fn, opt_update_fn = optax.adamw(
learning_rate=learning_rate_fn,
b1=hyperparameters.beta1,
weight_decay=hyperparameters.weight_decay)
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, None, 0, 0, 0, 0, 0),
static_broadcasted_argnums=(0, 1))
def pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
batch,
rng):
def _loss_fn(params):
"""loss function used for training."""
logits, new_model_state = workload.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.TRAIN,
rng,
update_batch_norm=False)
loss_dict = workload.loss_fn(batch['targets'], logits)
loss = loss_dict['summed'] / loss_dict['n_valid_examples']
return loss, new_model_state
grad_fn = jax.value_and_grad(_loss_fn, has_aux=True)
(loss, new_model_state), grad = grad_fn(current_param_container)
(loss, grad) = lax.pmean((loss, grad), axis_name='batch')
grad_norm = jnp.sqrt(
sum(jnp.sum(g**2) for g in jax.tree_util.tree_leaves(grad)))
updates, new_optimizer_state = opt_update_fn(grad, optimizer_state,
current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_model_state, new_optimizer_state, updated_params, loss, grad_norm
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
# del global_step
optimizer_state, opt_update_fn = optimizer_state
per_device_rngs = jax.random.split(rng, jax.local_device_count())
new_model_state, new_optimizer_state, new_params, loss, grad_norm = pmapped_train_step(
workload, opt_update_fn, model_state, optimizer_state,
current_param_container, batch, per_device_rngs)
if workload.metrics_logger is not None:
workload.metrics_logger.append_scalar_metrics(
{
'loss': loss[0],
'grad_norm': grad_norm[0],
}, global_step)
return (new_optimizer_state, opt_update_fn), new_params, new_model_state
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
return next(input_queue)
|
"""Training algorithm track submission functions for FastMRI."""
from typing import Dict, Iterator, List, Tuple
import torch
from torch.optim.lr_scheduler import StepLR
from algorithmic_efficiency import spec
def get_batch_size(workload_name):
# Return the global batch size.
batch_sizes = {'fastmri': 8}
return batch_sizes[workload_name]
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del workload
del model_state
del rng
base_lr = hyperparameters.learning_rate * get_batch_size('fastmri')
optimizer_state = {
'optimizer':
torch.optim.RMSprop(
model_params.parameters(),
lr=base_lr,
weight_decay=hyperparameters.l2),
}
optimizer_state['scheduler'] = StepLR(
optimizer_state['optimizer'],
step_size=hyperparameters.lr_step_size,
gamma=hyperparameters.lr_gamma)
return optimizer_state
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params)."""
del current_params_types
del loss_type
del eval_results
current_model = current_param_container
current_param_container.train()
optimizer_state['optimizer'].zero_grad()
outputs_batch, new_model_state = workload.model_fn(
params=current_model,
augmented_and_preprocessed_input_batch=batch,
model_state=model_state,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(
label_batch=batch['targets'], logits_batch=outputs_batch)
loss = loss_dict['summed'] / loss_dict['n_valid_examples']
loss.backward()
optimizer_state['optimizer'].step()
steps_per_epoch = workload.num_train_examples // get_batch_size('fastmri')
if (global_step + 1) % steps_per_epoch == 0:
optimizer_state['scheduler'].step()
return (optimizer_state, current_param_container, new_model_state)
# Not allowed to update the model parameters, hyperparameters, global step, or
# optimzier state.
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
return next(input_queue)
|
"""Training algorithm track submission functions for FastMRI in Jax."""
import functools
from typing import Dict, Iterator, List, Tuple
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
def get_batch_size(workload_name):
# Return the global batch size.
del workload_name
return 64
def create_learning_rate_fn(hparams: spec.Hyperparameters,
steps_per_epoch: int):
"""Create learning rate schedule."""
max_num_train_steps = 500 * steps_per_epoch
decay_epoch_period = hparams.lr_step_size * steps_per_epoch
decay_events = range(decay_epoch_period,
max_num_train_steps,
decay_epoch_period)
schedule_fn = optax.piecewise_constant_schedule(
init_value=hparams.learning_rate,
boundaries_and_scales={t: hparams.lr_gamma for t in decay_events})
return schedule_fn
def optimizer(hyperparameters: spec.Hyperparameters, num_train_examples: int):
steps_per_epoch = num_train_examples // get_batch_size('imagenet_resnet')
learning_rate_fn = create_learning_rate_fn(hyperparameters, steps_per_epoch)
opt_init_fn, opt_update_fn = optax.rmsprop(
learning_rate=learning_rate_fn,
decay=0.99)
return opt_init_fn, opt_update_fn
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del model_params
del model_state
del rng
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
opt_init_fn, opt_update_fn = optimizer(hyperparameters,
workload.num_train_examples)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, None, 0, 0, None, 0, 0),
static_broadcasted_argnums=(0, 1, 4))
def pmapped_train_step(workload,
opt_update_fn,
optimizer_state,
current_param_container,
hyperparameters,
batch,
rng):
def _loss_fn(params):
"""loss function used for training."""
logits, _ = workload.model_fn(
params,
batch,
model_state=None,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(batch['targets'], logits)
loss = loss_dict['summed'] / loss_dict['n_valid_examples']
weight_penalty_params = jax.tree_util.tree_leaves(params)
weight_l2 = sum(jnp.sum(x**2) for x in weight_penalty_params if x.ndim > 1)
weight_penalty = hyperparameters.l2 * 0.5 * weight_l2
loss = loss + weight_penalty
return loss
grad_fn = jax.grad(_loss_fn)
grad = grad_fn(current_param_container)
grad = lax.pmean(grad, axis_name='batch')
updates, new_optimizer_state = opt_update_fn(grad, optimizer_state,
current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_optimizer_state, updated_params
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del model_state
del loss_type
del eval_results
del global_step
optimizer_state, opt_update_fn = optimizer_state
per_device_rngs = jax.random.split(rng, jax.local_device_count())
new_optimizer_state, new_params = pmapped_train_step(
workload, opt_update_fn, optimizer_state,
current_param_container, hyperparameters, batch, per_device_rngs)
return (new_optimizer_state, opt_update_fn), new_params, None
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
return next(input_queue)
|
"""Training algorithm track submission functions for CIFAR10."""
import functools
from typing import Dict, Iterator, List, Tuple
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
def get_batch_size(workload_name):
# Return the global batch size.
batch_sizes = {'cifar': 128}
return batch_sizes[workload_name]
def cosine_decay(lr, step, total_steps):
ratio = jnp.maximum(0., step / total_steps)
mult = 0.5 * (1. + jnp.cos(jnp.pi * ratio))
return mult * lr
def create_learning_rate_fn(hparams: spec.Hyperparameters,
steps_per_epoch: int):
"""Create learning rate schedule."""
base_learning_rate = hparams.learning_rate * get_batch_size('cifar') / 128.
warmup_fn = optax.linear_schedule(
init_value=0.,
end_value=base_learning_rate,
transition_steps=hparams.warmup_epochs * steps_per_epoch)
cosine_epochs = max(hparams.num_epochs - hparams.warmup_epochs, 1)
cosine_fn = optax.cosine_decay_schedule(
init_value=base_learning_rate,
decay_steps=cosine_epochs * steps_per_epoch)
schedule_fn = optax.join_schedules(
schedules=[warmup_fn, cosine_fn],
boundaries=[hparams.warmup_epochs * steps_per_epoch])
return schedule_fn
def optimizer(hyperparameters: spec.Hyperparameters, num_train_examples: int):
steps_per_epoch = num_train_examples // get_batch_size('cifar')
learning_rate_fn = create_learning_rate_fn(hyperparameters, steps_per_epoch)
opt_init_fn, opt_update_fn = optax.sgd(
nesterov=True,
momentum=hyperparameters.momentum,
learning_rate=learning_rate_fn)
return opt_init_fn, opt_update_fn
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del model_params
del model_state
del rng
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
opt_init_fn, opt_update_fn = optimizer(hyperparameters,
workload.num_train_examples)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, None, 0, 0, 0, None, 0, 0),
static_broadcasted_argnums=(0, 1))
def pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
hyperparameters,
batch,
rng):
def _loss_fn(params):
"""loss function used for training."""
logits, new_model_state = workload.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.TRAIN,
rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(batch['targets'], logits)
loss = loss_dict['summed'] / loss_dict['n_valid_examples']
weight_penalty_params = jax.tree_util.tree_leaves(params)
weight_l2 = sum(jnp.sum(x**2) for x in weight_penalty_params if x.ndim > 1)
weight_penalty = hyperparameters.l2 * 0.5 * weight_l2
loss = loss + weight_penalty
return loss, new_model_state
grad_fn = jax.value_and_grad(_loss_fn, has_aux=True)
(_, new_model_state), grad = grad_fn(current_param_container)
grad = lax.pmean(grad, axis_name='batch')
updates, new_optimizer_state = opt_update_fn(grad, optimizer_state,
current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_optimizer_state, updated_params, new_model_state
# Not allowed to update the model parameters, hyperparameters, global step, or
# optimzier state.
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del global_step
del eval_results
optimizer_state, opt_update_fn = optimizer_state
per_device_rngs = jax.random.split(rng, jax.local_device_count())
new_optimizer_state, new_params, new_model_state = pmapped_train_step(
workload, opt_update_fn, model_state, optimizer_state,
current_param_container, hyperparameters, batch, per_device_rngs)
return (new_optimizer_state, opt_update_fn), new_params, new_model_state
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
return next(input_queue)
|
"""Training algorithm track submission functions for CIFAR10."""
from typing import Dict, Iterator, List, Tuple
import torch
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.optim.lr_scheduler import LinearLR
from torch.optim.lr_scheduler import SequentialLR
from algorithmic_efficiency import spec
def get_batch_size(workload_name):
# Return the global batch size.
batch_sizes = {'cifar': 128}
return batch_sizes[workload_name]
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del workload
del model_state
del rng
base_lr = hyperparameters.learning_rate * get_batch_size('cifar') / 128.
optimizer_state = {
'optimizer':
torch.optim.SGD(
model_params.parameters(),
lr=base_lr,
momentum=hyperparameters.momentum,
weight_decay=hyperparameters.l2),
}
scheduler1 = LinearLR(
optimizer_state['optimizer'],
start_factor=1e-5,
end_factor=1.,
total_iters=hyperparameters.warmup_epochs)
cosine_epochs = max(
hyperparameters.num_epochs - hyperparameters.warmup_epochs, 1)
scheduler2 = CosineAnnealingLR(
optimizer_state['optimizer'], T_max=cosine_epochs)
optimizer_state['scheduler'] = SequentialLR(
optimizer_state['optimizer'],
schedulers=[scheduler1, scheduler2],
milestones=[hyperparameters.warmup_epochs])
return optimizer_state
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params)."""
del current_params_types
del hyperparameters
del loss_type
del eval_results
current_model = current_param_container
current_param_container.train()
optimizer_state['optimizer'].zero_grad()
logits_batch, new_model_state = workload.model_fn(
params=current_model,
augmented_and_preprocessed_input_batch=batch,
model_state=model_state,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(
label_batch=batch['targets'], logits_batch=logits_batch)
loss = loss_dict['summed'] / loss_dict['n_valid_examples']
loss.backward()
optimizer_state['optimizer'].step()
steps_per_epoch = workload.num_train_examples // get_batch_size('cifar')
if (global_step + 1) % steps_per_epoch == 0:
optimizer_state['scheduler'].step()
return (optimizer_state, current_param_container, new_model_state)
# Not allowed to update the model parameters, hyperparameters, global step, or
# optimzier state.
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
return next(input_queue)
|
"""Template submission module.
See https://github.com/mlcommons/algorithmic-efficiency/blob/main/RULES.md#allowed-submissions
and https://github.com/mlcommons/algorithmic-efficiency/blob/main/RULES.md#disallowed-submissions
for guidelines.
"""
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates a Nesterov optimizer and a learning rate schedule.
Returns:
optimizer state
optimizer_update_fn
"""
pass
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""
Returns:
(new_optimizer_state, update_fn)
new_params
new_model_state
"""
pass
def get_batch_size(workload_name):
"""
Returns batch size for each workload.
Valid workload_name values are in
["wmt",
"ogbg",
"criteo1tb",
"fastmri",
"imagenet_resnet",
"imagenet_vit",
"librispeech_deepspeech",
"librispeech_conformer"]
Returns:
batch_size
"""
pass
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
Tip:
If you would just like the next batch from the input queue return next(input_queue).
Returns:
batch: next batch of input data
"""
pass
|
import json
import os
import re
from absl import logging
import pandas as pd
TRIAL_LINE_REGEX = '(.*) --- Tuning run (\d+)/(\d+) ---'
METRICS_LINE_REGEX = '(.*) Metrics: ({.*})'
TRIAL_DIR_REGEX = 'trial_(\d+)'
MEASUREMENTS_FILENAME = 'eval_measurements.csv'
#### File IO helper functions ###
def get_logfile_paths(logdir):
"""Gets all files ending in .log in logdir
"""
filenames = os.listdir(logdir)
logfile_paths = []
for f in filenames:
if f.endswith(".log"):
f = os.path.join(logdir, f)
logfile_paths.append(f)
return logfile_paths
### Logfile reading helper functions ###
def decode_metrics_line(line):
"""Convert metrics line to dict.
Args:
line: str
Returns:
dict_of_lists: dict where keys are metric names and vals
are lists of values.
e.g. {'loss':[5.1, 3.2, 1.0],
'step':[100, 200, 300]}
"""
eval_results = []
dict_str = re.match(METRICS_LINE_REGEX, line).group(2)
dict_str = dict_str.replace("'", "\"")
dict_str = dict_str.replace("(", "")
dict_str = dict_str.replace(")", "")
dict_str = dict_str.replace("DeviceArray", "")
dict_str = dict_str.replace(", dtype=float32", "")
dict_str = dict_str.replace("nan", "0")
metrics_dict = json.loads(dict_str)
for item in metrics_dict['eval_results']:
if isinstance(item, dict):
eval_results.append(item)
keys = eval_results[0].keys()
dict_of_lists = {}
for key in keys:
dict_of_lists[key] = []
for eval_results_dict in eval_results:
for key in eval_results_dict.keys():
val = eval_results_dict[key]
dict_of_lists[key].append(val)
return dict_of_lists
def get_trials_dict(logfile):
"""Get a dict of dicts with metrics for each
tuning run.
Returns:
trials_dict: Dict of dicts where outer dict keys
are trial indices and inner dict key-value pairs
are metrics and list of values.
e.g. {'trial_0': {'loss':[5.1, 3.2, 1.0],
'step':[100, 200, 300]},
'trial_1': {'loss':[5.1, 3.2, 1.0],
'step':[100, 200, 300]}}
"""
trial = 0
metrics_lines = {}
with open(logfile, 'r') as f:
for line in f:
if re.match(TRIAL_LINE_REGEX, line):
trial = re.match(TRIAL_LINE_REGEX, line).group(2)
if re.match(METRICS_LINE_REGEX, line):
metrics_lines[trial] = decode_metrics_line(line)
if len(metrics_lines) == 0:
raise ValueError(f'Log file does not have a metrics line {logfile}')
return metrics_lines
### Results formatting helper functions ###
def get_trials_df_dict(logfile):
"""Get a dict with dataframes with metrics for each
tuning run.
Preferable format for saving dataframes for tables.
Args:
logfile: str path to logfile.
Returns:
DataFrame where indices are index of eval and
columns are metric names.
"""
trials_dict = get_trials_dict(logfile)
trials_df_dict = {}
for trial in trials_dict.keys():
metrics = trials_dict[trial]
trials_df_dict[trial] = pd.DataFrame(metrics)
return trials_df_dict
def get_trials_df(logfile):
"""Gets a df of per trial results from a logfile.
Args:
experiment_dir: str
Returns:
df: DataFrame where indices are trials, columns are
metric names and values are lists.
e.g
+---------+-----------------+-----------------+
| | loss | step |
|---------+-----------------+-----------------|
| trial_0 | [5.1, 3.2, 1.0] | [100, 200, 300] |
| trial_1 | [5.1, 3.2, 1.0] | [100, 200, 300] |
+---------+-----------------+-----------------+
"""
trials_dict = get_trials_dict(logfile)
df = pd.DataFrame(trials_dict).transpose()
return df
## Get scoring code
def get_experiment_df(experiment_dir):
"""Gets a df of per trial results from an experiment dir.
The output df can be provided as input to
scoring.compute_performance_profiles.
Args:
experiment_dir: path to experiment directory containing
results for workloads.
The directory structure is assumed to be:
+ experiment_dir
+ <workload>
+ <trial>
- eval_measurements.csv
Returns:
df: DataFrame where indices are trials, columns are
metric names and values are lists.
e.g
+----+-----------+---------+--------------------+--------------------+
| | workload | trial | validation/accuracy| score |
|----+-----------+---------+--------------------+--------------------|
| 0 | mnist_jax | trial_1 | [0.0911, 0.0949] | [10.6396, 10.6464] |
+----+-----------+---------+--------------------+--------------------+
"""
df = pd.DataFrame()
workload_dirs = os.listdir(experiment_dir)
for workload in workload_dirs:
data = {
'workload': workload,
}
trial_dirs = [
t for t in os.listdir(os.path.join(experiment_dir, workload))
if re.match(TRIAL_DIR_REGEX, t)
]
for trial in trial_dirs:
eval_measurements_filepath = os.path.join(
experiment_dir,
workload,
trial,
MEASUREMENTS_FILENAME,
)
try:
trial_df = pd.read_csv(eval_measurements_filepath)
except FileNotFoundError as e:
logging.info(f'Could not read {eval_measurements_filepath}')
continue
data['trial'] = trial
for column in trial_df.columns:
values = trial_df[column].to_numpy()
data[column] = values
trial_df = pd.DataFrame([data])
df = pd.concat([df, trial_df], ignore_index=True)
return df
|
from absl.testing import absltest
import scoring_utils
TEST_LOGFILE = 'test_data/adamw_fastmri_jax_04-18-2023-13-10-58.log'
TEST_DIR = 'test_data/experiment_dir'
NUM_EVALS = 18
class Test(absltest.TestCase):
def test_get_trials_dict(self):
trials_dict = scoring_utils.get_trials_dict(TEST_LOGFILE)
self.assertEqual(len(trials_dict['1']['global_step']), NUM_EVALS)
def test_get_trials_df_dict(self):
trials_dict = scoring_utils.get_trials_df_dict(TEST_LOGFILE)
for trial in trials_dict:
df = trials_dict[trial]
self.assertEqual(len(df.index), NUM_EVALS)
def test_get_trials_df(self):
df = scoring_utils.get_trials_df(TEST_LOGFILE)
for column in df.columns:
self.assertEqual(len(df.at['1', column]), NUM_EVALS)
def test_get_experiment_df(self):
df = scoring_utils.get_experiment_df(TEST_DIR)
if __name__ == '__main__':
absltest.main()
|
"""Performance and scoring code.
The three primary methods exposed by the `scoring` module are:
- `compute_performance_profiles`: generates performance profiles for a set of
submissions over all workloads as defined in the scoring rules:
https://github.com/mlcommons/algorithmic-efficiency/blob/main/RULES.md
- `compute_leaderboard_score`: computes final scores from performance profiles.
- `plot_performance_profiles`: plot performance profiles for a set of
submissions.
The two primary inputs to `compute_performance_profiles` are
1. A dictionary of pandas DataFrames, where each key is a globally unique
identifier for a submission and each value is a DataFrame containing one row
per trial per workload in that submission. At minimum, this DataFrame should
include a column of np.arrays indicating time (e.g., 'global_step'), a column
of np.arrays indicating performance (e.g., 'validation/accuracy') for each
workload and a column 'workload' that indicates the workload identifier.
2. A dictionary of workload metadata describing each workload in the form:
{
'workload_identifier': {
'target': VALUE,
'metric': 'validation/error_rate',
}
}
The keys in this dictionary should match the workload identifiers used in
the dictionary of submissions.
"""
import itertools
import operator
import os
import re
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import algorithmic_efficiency.workloads.workloads as workloads_registry
WORKLOADS = workloads_registry.WORKLOADS
WORKLOAD_NAME_PATTERN = '(.*)(_jax|_pytorch)'
BASE_WORKLOADS_DIR = 'algorithmic_efficiency/workloads/'
MIN_EVAL_METRICS = [
'ce_loss',
'error_rate',
'ctc_loss',
'wer',
'l1_loss',
]
MAX_EVAL_METRICS = ['average_precision', 'ssim', 'accuracy', 'bleu_score']
def generate_eval_cols(metrics):
splits = ['train', 'validation', 'test']
return [f'{split}/{col}' for split, col in itertools.product(splits, metrics)]
MINIMIZE_REGISTRY = {k: True for k in generate_eval_cols(MIN_EVAL_METRICS)}
MINIMIZE_REGISTRY.update(
{k: False for k in generate_eval_cols(MAX_EVAL_METRICS)})
MINIMIZE_REGISTRY['train_cost'] = True
def check_if_minimized(col_name):
"""Guess if the eval metric column name should be minimized or not."""
for prefix in ['best_', 'final_']:
col_name = col_name.replace(prefix, '')
for col in MINIMIZE_REGISTRY:
if col in col_name:
return MINIMIZE_REGISTRY[col]
raise ValueError(f'Column {col_name} not found in `MINIMIZE_REGISTRY` as '
'either a column name or a substring of a column name.')
def get_index_that_reaches_best(workload_df, metric_col):
"""Get the eval index in which a workload reaches the best on metric_col.
Args:
workload_df: A subset of a submission's trials DataFrame that
includes only the trials in a single workload.
metric_col: Name of array column in workload_df
(e.g., `validation/l1_loss`).
Returns:
Tuple of trial index, time index, and best value where the workload
reached the best metric_col. Return (-1, -1, -1) if no undiverged trials.
"""
is_minimized = check_if_minimized(metric_col)
series = workload_df[metric_col]
series = series[series != np.nan]
op = np.min if is_minimized else np.max
best = series.apply(op)
op_idx = np.argmin if is_minimized else np.argmax
best_idx = series.apply(op_idx)
if best.empty:
return -1, -1, -1
else:
trial = best.idxmin() if is_minimized else best.idxmax()
return trial, best_idx[trial], best[trial]
def get_index_that_reaches_target(workload_df,
validation_metric,
test_metric,
validation_target,
test_target):
"""Get the eval index in which a workload reaches the target metric_col.
Args:
workload_df: A subset of a submission's trials DataFrame that
includes only the trials in a single workload.
metric_col: Name of array column in workload_df (e.g., `validation/l1_loss`).
target: Target value for metric_col.
Returns:
Tuple of trial index and time index where the workload reached the target
metric_col. Return (-1, -1) if not reached.
"""
is_minimized = check_if_minimized(validation_metric)
validation_series = workload_df[validation_metric]
test_series = workload_df[test_metric]
validation_series = validation_series[validation_series != np.nan]
validation_series = validation_series[test_series != np.nan]
test_series = test_series[validation_series != np.nan]
test_series = test_series[test_series != np.nan]
op = operator.le if is_minimized else operator.ge
validation_target_reached = validation_series.apply(
lambda x: op(x, validation_target))
test_target_reached = test_series.apply(lambda x: op(x, test_target))
target_reached = pd.Series(validation_target_reached[0]
& test_target_reached[0])
# Remove trials that never reach the target
target_reached = target_reached[target_reached.apply(np.any)]
# If we have no trials that have reached the target, return -1. Else, return
# the eval index of the earliest point the target is reached.
if target_reached.empty:
return -1, -1
else:
index_reached = target_reached.apply(np.argmax)
trial = index_reached.idxmin()
return trial, index_reached[trial]
def get_times_for_submission(submission,
submission_tag,
time_col='global_step',
verbosity=1):
"""Get times to target for each workload in a submission.
Args:
submission: A DataFrame containing one row for each trial in each workload
for a given submission.
submission_tag: Globally unique identified for a submission.
time_col: A string indicating which column to use for time.
verbosity: Debug level of information; choice of (1, 2, 3).
Returns:
DataFrame with columns `submission`, `workload`, and time_col.
"""
workloads = []
submission_name = submission_tag.split('.')[1]
for workload, group in submission.groupby('workload'):
workload_name = re.match(WORKLOAD_NAME_PATTERN, workload).group(1)
framework = re.match(WORKLOAD_NAME_PATTERN, workload).group(2)
workload_metadata = WORKLOADS[workload_name]
# Extend path according to framework.
workload_metadata['workload_path'] = os.path.join(
BASE_WORKLOADS_DIR,
workload_metadata['workload_path'] + f'{framework}',
'workload.py')
workload_init_kwargs = {}
workload_obj = workloads_registry.import_workload(
workload_path=workload_metadata['workload_path'],
workload_class_name=workload_metadata['workload_class_name'],
workload_init_kwargs=workload_init_kwargs)
metric_name = workload_obj.target_metric_name
validation_metric = f'validation/{metric_name}'
test_metric = f'test/{metric_name}'
validation_target = workload_obj.validation_target_value
test_target = workload_obj.test_target_value
trial_idx, time_idx = get_index_that_reaches_target(
group, validation_metric, test_metric, validation_target, test_target)
if time_idx > -1:
time_val = group[time_col].loc[trial_idx][time_idx]
else:
time_val = float('inf')
workloads.append({
'submission': submission_name,
'workload': workload,
time_col: time_val,
})
if verbosity > 0:
print(' hparams:')
if time_idx > -1:
hparams = group.loc[trial_idx, 'hparams']
for key, val in hparams.items():
print(f' - {key}: {val}')
else:
print('Submission did not reach target')
df = pd.DataFrame.from_records(workloads)
df = df.pivot(index='submission', columns='workload', values=time_col)
return df
def compute_performance_profiles(results,
time_col='global_step',
min_tau=1.0,
max_tau=None,
reference_submission_tag=None,
num_points=100,
scale='linear',
verbosity=0):
"""Compute performance profiles for a set of submission by some time column.
Args:
results: Dict where keys are submission names and values are a DataFrame of
trials where each row is a trial and each column is a field for a given
trial. Results should contain keys for each workload's metric, time_col,
'workload'. See file header comment for more details.
time_col: A string indicating which column to use for time.
min_tau: Minimum tau to use for plotting.
max_tau: Maximum tau to use for plotting.
reference_submission_tag: If specified, must be an element of
`submission_tags`. Used as the denominator for computing tau. Otherwise,
the minimum time to target is computed per-workload and used as the
denominator for tau.
num_points: Number of points to use for plotting.
scale: Linear or log scale for the x-axis.
verbosity: Debug level of information; choice of (1, 2, 3).
Returns:
A DataFrame of performance profiles for the set of submissions given in
`results` based on `time_col`. Each row represents a submission and each
column represents rho(tau) for some value of tau (df.volumns are the
different values of tau).
"""
dfs = []
for submission_tag, result in results.items():
print(f'\nComputing performance profile with respect to `{time_col}` for '
f'{submission_tag}')
dfs.append(
get_times_for_submission(result, submission_tag, time_col, verbosity))
df = pd.concat(dfs)
if verbosity > 0:
print(f'\n`{time_col}` to reach target:')
with pd.option_context('display.max_rows',
None,
'display.max_columns',
None,
'display.width',
1000):
print(df)
# Divide by the fastest.
if reference_submission_tag is None:
df.update(df.div(df.min(axis=0), axis=1))
else:
df.update(df.div(df.loc[reference_submission_tag, :], axis=1))
if verbosity > 0:
print(f'\n`{time_col}` to reach target normalized to best:')
with pd.option_context('display.max_rows',
None,
'display.max_columns',
None,
'display.width',
1000):
print(df)
# If no max_tau is supplied, choose the value of tau that would plot all non
# inf or nan data.
if max_tau is None:
max_tau = df.replace(float('inf'), -1).replace(np.nan, -1).values.max()
if scale == 'linear':
points = np.linspace(min_tau, max_tau, num=num_points)
elif scale == 'log':
points = np.logspace(
np.log10(min_tau), np.log10(max_tau), num=num_points, base=10.0)
def rho(r, tau):
return (r <= tau).sum(axis=1) / len(r.columns)
perf_df = pd.concat([rho(df, tau) for tau in points], axis=1)
cols = points
if scale == 'log':
cols = np.log10(points)
perf_df.columns = cols
return perf_df
def compute_leaderboard_score(df, normalize=False):
"""Compute leaderboard score by taking integral of performance profile.
Args:
df: pd.DataFrame returned from `compute_performance_profiles`.
normalize: divide by the range of the performance profile's tau.
Returns:
pd.DataFrame with one column of scores indexed by submission.
"""
scores = np.trapz(df, x=df.columns)
if normalize:
scores /= df.columns.max() - df.columns.min()
return pd.DataFrame(scores, columns=['score'], index=df.index)
def maybe_save_figure(save_dir, name, ext='pdf'):
"""Maybe save the current matplotlib.pyplot figure."""
if save_dir:
path = os.path.join(save_dir, f'{name}.{ext}')
with open(path, 'wb') as fout:
plt.savefig(fout, format=ext)
def maybe_save_df_to_csv(save_dir, df, path, **to_csv_kwargs):
if save_dir:
path = os.path.join(save_dir, path)
with open(path, 'w') as fout:
df.to_csv(fout, **to_csv_kwargs)
def plot_performance_profiles(perf_df,
df_col,
scale='linear',
save_dir=None,
figsize=(30, 10),
font_size=18):
"""Plot performance profiles.
Args:
perf_df: A DataFrame of performance profiles where each row represents a
submission and each column represents rho(tau) for some value of tau
(df.volumns are the different values of tau).
df_col: The column in the original submission results DataFrame used to
compute the performance profile. This argument is only used for axis
and file naming.
scale: Whether or not the data in perf_df is on a linear or log scale. This
argument is only used for axis and file naming.
save_dir: If a valid directory is provided, save both the plot and perf_df
to the provided directory.
figsize: The size of the plot.
font_size: The font size to use for the legend.
Returns:
None. If a valid save_dir is provided, save both the plot and perf_df.
"""
fig = perf_df.T.plot(figsize=figsize)
df_col_display = f'log10({df_col})' if scale == 'log' else df_col
fig.set_xlabel(
f'Ratio of `{df_col_display}` to best submission', size=font_size)
fig.set_ylabel('Proportion of workloads', size=font_size)
fig.legend(prop={'size': font_size}, bbox_to_anchor=(1.0, 1.0))
maybe_save_figure(save_dir, f'performance_profile_by_{df_col_display}')
maybe_save_df_to_csv(save_dir,
perf_df,
f'performance_profile_{df_col_display}.csv')
|
import os
from absl import app
from absl import flags
from absl import logging
import scoring_utils
from algorithmic_efficiency import workloads
import scoring
flags.DEFINE_string(
'experiment_path',
None,
'Path to experiment directory containing workload directories.')
flags.DEFINE_string('submission_tag', 'my.submission', 'Submission tag.')
flags.DEFINE_string('output_dir',
'scoring_results',
'Path to save performance profile table and plot.')
FLAGS = flags.FLAGS
def main(_):
df = scoring_utils.get_experiment_df(FLAGS.experiment_path)
results = {
FLAGS.submission_tag: df,
}
performance_profile_df = scoring.compute_performance_profiles(
results,
time_col='score',
min_tau=1.0,
max_tau=None,
reference_submission_tag=None,
num_points=100,
scale='linear',
verbosity=0)
if not os.path.exists(FLAGS.output_dir):
os.mkdir(FLAGS.output_dir)
scoring.plot_performance_profiles(
performance_profile_df, 'score', save_dir=FLAGS.output_dir)
logging.info(performance_profile_df)
if __name__ == '__main__':
flags.mark_flag_as_required('experiment_path')
app.run(main)
|
"""Submission file for a LAMB optimizer with warmup+cosine LR in PyTorch."""
import math
from typing import Dict, Iterator, List, Tuple
from absl import logging
import torch
from torch import Tensor
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.optim.lr_scheduler import LinearLR
from torch.optim.lr_scheduler import SequentialLR
from algorithmic_efficiency import spec
# Modified from github.com/pytorch/pytorch/blob/v1.12.1/torch/optim/adamw.py
class LAMB(torch.optim.Optimizer):
def __init__(self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0.0):
if not 0.0 <= lr:
raise ValueError(f'Invalid learning rate: {lr}')
if not 0.0 <= eps:
raise ValueError(f'Invalid epsilon value: {eps}')
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f'Invalid beta parameter at index 0: {betas[0]}')
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f'Invalid beta parameter at index 1: {betas[1]}')
if not 0.0 <= weight_decay:
raise ValueError(f'Invalid weight_decay value: {weight_decay}')
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
state_values = list(self.state.values())
step_is_tensor = (len(state_values) != 0) and torch.is_tensor(
state_values[0]['step'])
if not step_is_tensor:
for s in state_values:
s['step'] = torch.tensor(float(s['step']))
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step."""
self._cuda_graph_capture_health_check()
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
state_steps = []
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('NAdamW does not support sparse gradients')
grads.append(p.grad)
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = torch.tensor(0.)
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(
p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(
p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
state_steps.append(state['step'])
lamb(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
state_steps,
beta1=beta1,
beta2=beta2,
lr=group['lr'],
weight_decay=group['weight_decay'],
eps=group['eps'])
return loss
def lamb(params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float):
if not all(isinstance(t, torch.Tensor) for t in state_steps):
raise RuntimeError(
'API has changed, `state_steps` argument must contain a list of' +
' singleton tensors')
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step_t = state_steps[i]
# Update step.
step_t += 1
# Decay the first and second moment running average coefficient.
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
step = step_t.item()
bias_correction1 = 1 - beta1**step
bias_correction2 = 1 - beta2**step
bias_correction2_sqrt = math.sqrt(bias_correction2)
denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps)
update = exp_avg / denom
update.div_(bias_correction1)
update.add_(weight_decay * param)
# Scale updates by trust ratio.
param_norm = torch.linalg.norm(param)
update_norm = torch.linalg.norm(update)
# Set trust_ratio to 1 in case where parameters would never be updated.
if param_norm == 0. or update_norm == 0.:
trust_ratio = 1.
else:
trust_ratio = param_norm / update_norm
param.add_(update, alpha=-lr * trust_ratio)
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates a LAMB optimizer and a learning rate schedule."""
del model_state
del rng
optimizer_state = {
'optimizer':
LAMB(
model_params.parameters(),
lr=hyperparameters.learning_rate,
betas=(hyperparameters.beta1, hyperparameters.beta2),
eps=1e-8,
weight_decay=hyperparameters.weight_decay)
}
def pytorch_cosine_warmup(step_hint: int, hyperparameters, optimizer):
warmup_steps = int(hyperparameters.warmup_factor * step_hint)
warmup = LinearLR(
optimizer, start_factor=1e-10, end_factor=1., total_iters=warmup_steps)
cosine_steps = max(step_hint - warmup_steps, 1)
cosine_decay = CosineAnnealingLR(optimizer, T_max=cosine_steps)
return SequentialLR(
optimizer, schedulers=[warmup, cosine_decay], milestones=[warmup_steps])
optimizer_state['scheduler'] = pytorch_cosine_warmup(
workload.step_hint, hyperparameters, optimizer_state['optimizer'])
return optimizer_state
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
current_model = current_param_container
current_model.train()
optimizer_state['optimizer'].zero_grad()
logits_batch, new_model_state = workload.model_fn(
params=current_model,
augmented_and_preprocessed_input_batch=batch,
model_state=model_state,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
update_batch_norm=True)
label_smoothing = (
hyperparameters.label_smoothing if hasattr(hyperparameters,
'label_smoothing') else 0.0)
if hasattr(hyperparameters, 'grad_clip'):
grad_clip = hyperparameters.grad_clip
else:
grad_clip = None
loss, _ = workload.loss_fn(
label_batch=batch['targets'],
logits_batch=logits_batch,
mask_batch=batch.get('weights'),
label_smoothing=label_smoothing)
loss.backward()
if grad_clip is not None:
torch.nn.utils.clip_grad_norm_(
current_model.parameters(), max_norm=grad_clip)
optimizer_state['optimizer'].step()
optimizer_state['scheduler'].step()
# Log training metrics - loss, grad_norm, batch_size.
if global_step <= 100 or global_step % 500 == 0:
with torch.no_grad():
parameters = [p for p in current_model.parameters() if p.grad is not None]
grad_norm = torch.norm(
torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2)
if workload.metrics_logger is not None:
workload.metrics_logger.append_scalar_metrics(
{
'loss': loss.item(),
'grad_norm': grad_norm.item(),
}, global_step)
logging.info('%d) loss = %0.3f, grad_norm = %0.3f',
global_step,
loss.item(),
grad_norm.item())
return (optimizer_state, current_param_container, new_model_state)
def get_batch_size(workload_name):
# Return the global batch size.
if workload_name == 'criteo1tb':
return 262_144
elif workload_name == 'fastmri':
return 32
elif workload_name == 'imagenet_resnet':
return 1024
elif workload_name == 'imagenet_vit':
return 1024
elif workload_name == 'librispeech_conformer':
return 256
elif workload_name == 'librispeech_deepspeech':
return 256
elif workload_name == 'ogbg':
return 512
elif workload_name == 'wmt':
return 128
elif workload_name == 'mnist':
return 16
else:
raise ValueError(f'Unsupported workload name: {workload_name}.')
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
batch = next(input_queue)
return batch
|
"""Submission file for a LAMB optimizer with warmup+cosine LR in Jax."""
import functools
from typing import Dict, Iterator, List, Tuple
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
_GRAD_CLIP_EPS = 1e-6
def scale_by_learning_rate(learning_rate, flip_sign=True):
m = -1 if flip_sign else 1
if callable(learning_rate):
return optax.scale_by_schedule(lambda count: m * learning_rate(count))
return optax.scale(m * learning_rate)
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates a LAMB optimizer and a learning rate schedule."""
del model_params
del model_state
del rng
def jax_cosine_warmup(step_hint: int, hyperparameters):
# Create learning rate schedule.
warmup_steps = int(hyperparameters.warmup_factor * step_hint)
warmup_fn = optax.linear_schedule(
init_value=0.,
end_value=hyperparameters.learning_rate,
transition_steps=warmup_steps)
cosine_steps = max(step_hint - warmup_steps, 1)
cosine_fn = optax.cosine_decay_schedule(
init_value=hyperparameters.learning_rate, decay_steps=cosine_steps)
schedule_fn = optax.join_schedules(
schedules=[warmup_fn, cosine_fn], boundaries=[warmup_steps])
return schedule_fn
# Create optimizer + LR schedule.
lr_schedule_fn = jax_cosine_warmup(workload.step_hint, hyperparameters)
opt_init_fn, opt_update_fn = optax.lamb(
learning_rate=lr_schedule_fn,
b1=1 - hyperparameters.one_minus_beta1,
b2=hyperparameters.beta2,
eps=1e-8,
weight_decay=hyperparameters.weight_decay)
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, None, 0, 0, 0, 0, 0, None, None),
static_broadcasted_argnums=(0, 1),
donate_argnums=(2, 3, 4))
def pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
batch,
rng,
grad_clip,
label_smoothing):
def _loss_fn(params):
"""Loss function used for training."""
logits, new_model_state = workload.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.TRAIN,
rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(
label_batch=batch['targets'],
logits_batch=logits,
mask_batch=batch.get('weights'),
label_smoothing=label_smoothing)
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
return summed_loss, (n_valid_examples, new_model_state)
grad_fn = jax.value_and_grad(_loss_fn, has_aux=True)
(summed_loss, (n_valid_examples, new_model_state)), grad = grad_fn(
current_param_container)
# Get correct global mean loss and grad.
(summed_loss, n_valid_examples, grad) = lax.psum(
(summed_loss, n_valid_examples, grad), axis_name='batch')
loss = summed_loss / n_valid_examples
grad = jax.tree_map(lambda x: x / n_valid_examples, grad)
grad_norm = jnp.sqrt(
sum(jnp.sum(g**2) for g in jax.tree_util.tree_leaves(grad)))
if grad_clip is not None:
grad_scaling_factor = grad_clip / (grad_norm + _GRAD_CLIP_EPS)
grad_scaling_factor = jax.lax.clamp(min=0.0, x=grad_scaling_factor, max=1.0)
grad = jax.tree_map(lambda x: x * grad_scaling_factor, grad)
updates, new_optimizer_state = opt_update_fn(grad, optimizer_state,
current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_optimizer_state, updated_params, new_model_state, loss, grad_norm
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
optimizer_state, opt_update_fn = optimizer_state
per_device_rngs = jax.random.split(rng, jax.local_device_count())
if hasattr(hyperparameters, 'label_smoothing'):
label_smoothing = hyperparameters.label_smoothing
else:
label_smoothing = 0.0
if hasattr(hyperparameters, 'grad_clip'):
grad_clip = hyperparameters.grad_clip
else:
grad_clip = None
outputs = pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
batch,
per_device_rngs,
grad_clip,
label_smoothing)
new_optimizer_state, new_params, new_model_state, loss, grad_norm = outputs
# Log loss, grad_norm.
if global_step % 100 == 0 and workload.metrics_logger is not None:
workload.metrics_logger.append_scalar_metrics(
{
'loss': loss[0],
'grad_norm': grad_norm[0],
}, global_step)
return (new_optimizer_state, opt_update_fn), new_params, new_model_state
def get_batch_size(workload_name):
# Return the global batch size.
if workload_name == 'criteo1tb':
return 262_144
elif workload_name == 'fastmri':
return 32
elif workload_name == 'imagenet_resnet':
return 1024
elif workload_name == 'imagenet_vit':
return 1024
elif workload_name == 'librispeech_conformer':
return 256
elif workload_name == 'librispeech_deepspeech':
return 256
elif workload_name == 'ogbg':
return 512
elif workload_name == 'wmt':
return 128
elif workload_name == 'mnist':
return 16
else:
raise ValueError(f'Unsupported workload name: {workload_name}.')
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
batch = next(input_queue)
return batch
|
"""Submission file for an NAdamW optimizer with warmup+cosine LR in PyTorch."""
import math
from typing import Dict, Iterator, List, Tuple
from absl import logging
import torch
from torch import Tensor
import torch.distributed.nn as dist_nn
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.optim.lr_scheduler import LinearLR
from torch.optim.lr_scheduler import SequentialLR
from algorithmic_efficiency import spec
from algorithmic_efficiency.pytorch_utils import pytorch_setup
USE_PYTORCH_DDP = pytorch_setup()[0]
# Modified from github.com/pytorch/pytorch/blob/v1.12.1/torch/optim/adamw.py.
class NAdamW(torch.optim.Optimizer):
r"""Implements NAdamW algorithm.
See Table 1 in https://arxiv.org/abs/1910.05446 for the implementation of
the NAdam algorithm (there is also a comment in the code which highlights
the only difference of NAdamW and AdamW).
For further details regarding the algorithm we refer to
`Decoupled Weight Decay Regularization`_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay coefficient (default: 1e-2)
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=1e-2):
if not 0.0 <= lr:
raise ValueError(f'Invalid learning rate: {lr}')
if not 0.0 <= eps:
raise ValueError(f'Invalid epsilon value: {eps}')
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f'Invalid beta parameter at index 0: {betas[0]}')
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f'Invalid beta parameter at index 1: {betas[1]}')
if not 0.0 <= weight_decay:
raise ValueError(f'Invalid weight_decay value: {weight_decay}')
defaults = {
'lr': lr, 'betas': betas, 'eps': eps, 'weight_decay': weight_decay
}
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
state_values = list(self.state.values())
step_is_tensor = (len(state_values) != 0) and torch.is_tensor(
state_values[0]['step'])
if not step_is_tensor:
for s in state_values:
s['step'] = torch.tensor(float(s['step']))
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
self._cuda_graph_capture_health_check()
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
state_steps = []
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('NAdamW does not support sparse gradients')
grads.append(p.grad)
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = torch.tensor(0.)
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(
p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(
p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
state_steps.append(state['step'])
nadamw(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
state_steps,
beta1=beta1,
beta2=beta2,
lr=group['lr'],
weight_decay=group['weight_decay'],
eps=group['eps'])
return loss
def nadamw(params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float) -> None:
r"""Functional API that performs NAdamW algorithm computation.
See NAdamW class for details.
"""
if not all(isinstance(t, torch.Tensor) for t in state_steps):
raise RuntimeError(
'API has changed, `state_steps` argument must contain a list of' +
' singleton tensors')
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step_t = state_steps[i]
# Update step.
step_t += 1
# Perform stepweight decay.
param.mul_(1 - lr * weight_decay)
# Decay the first and second moment running average coefficient.
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
# Only difference between NAdamW and AdamW in this implementation.
# The official PyTorch implementation of NAdam uses a different algorithm.
# We undo these ops later on, which could cause numerical issues but saves
# us from having to make an extra copy of the gradients.
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
step = step_t.item()
bias_correction1 = 1 - beta1**step
bias_correction2 = 1 - beta2**step
step_size = lr / bias_correction1
bias_correction2_sqrt = math.sqrt(bias_correction2)
denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps)
param.addcdiv_(exp_avg, denom, value=-step_size)
exp_avg.sub_(grad, alpha=1 - beta1).div_(beta1)
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates a NAdamW optimizer and a learning rate schedule."""
del model_state
del rng
optimizer_state = {
'optimizer':
NAdamW(
model_params.parameters(),
lr=hyperparameters.learning_rate,
betas=(1.0 - hyperparameters.one_minus_beta1,
hyperparameters.beta2),
eps=1e-8,
weight_decay=hyperparameters.weight_decay),
}
def pytorch_cosine_warmup(step_hint: int, hyperparameters, optimizer):
warmup_steps = int(hyperparameters.warmup_factor * step_hint)
warmup = LinearLR(
optimizer, start_factor=1e-10, end_factor=1., total_iters=warmup_steps)
cosine_steps = max(step_hint - warmup_steps, 1)
cosine_decay = CosineAnnealingLR(optimizer, T_max=cosine_steps)
return SequentialLR(
optimizer, schedulers=[warmup, cosine_decay], milestones=[warmup_steps])
optimizer_state['scheduler'] = pytorch_cosine_warmup(
workload.step_hint, hyperparameters, optimizer_state['optimizer'])
return optimizer_state
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
current_model = current_param_container
current_model.train()
optimizer_state['optimizer'].zero_grad()
logits_batch, new_model_state = workload.model_fn(
params=current_model,
augmented_and_preprocessed_input_batch=batch,
model_state=model_state,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
update_batch_norm=True)
label_smoothing = (
hyperparameters.label_smoothing if hasattr(hyperparameters,
'label_smoothing') else 0.0)
if hasattr(hyperparameters, 'grad_clip'):
grad_clip = hyperparameters.grad_clip
else:
grad_clip = None
loss_dict = workload.loss_fn(
label_batch=batch['targets'],
logits_batch=logits_batch,
mask_batch=batch.get('weights'),
label_smoothing=label_smoothing)
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
if USE_PYTORCH_DDP:
# Use dist_nn.all_reduce to ensure correct loss and gradient scaling.
summed_loss = dist_nn.all_reduce(summed_loss)
n_valid_examples = dist_nn.all_reduce(n_valid_examples)
loss = summed_loss / n_valid_examples
loss.backward()
if grad_clip is not None:
torch.nn.utils.clip_grad_norm_(
current_model.parameters(), max_norm=grad_clip)
optimizer_state['optimizer'].step()
optimizer_state['scheduler'].step()
# Log training metrics - loss, grad_norm, batch_size.
if global_step <= 100 or global_step % 500 == 0:
with torch.no_grad():
parameters = [p for p in current_model.parameters() if p.grad is not None]
grad_norm = torch.norm(
torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2)
if workload.metrics_logger is not None:
workload.metrics_logger.append_scalar_metrics(
{
'loss': loss.item(),
'grad_norm': grad_norm.item(),
}, global_step)
logging.info('%d) loss = %0.3f, grad_norm = %0.3f',
global_step,
loss.item(),
grad_norm.item())
return (optimizer_state, current_param_container, new_model_state)
def get_batch_size(workload_name):
# Return the global batch size.
if workload_name == 'criteo1tb':
return 262_144
elif workload_name == 'fastmri':
return 32
elif workload_name == 'imagenet_resnet':
return 1024
elif workload_name == 'imagenet_vit':
return 1024
elif workload_name == 'librispeech_conformer':
return 256
elif workload_name == 'librispeech_deepspeech':
return 256
elif workload_name == 'ogbg':
return 512
elif workload_name == 'wmt':
return 128
elif workload_name == 'mnist':
return 16
else:
raise ValueError(f'Unsupported workload name: {workload_name}.')
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
batch = next(input_queue)
return batch
|
"""Submission file for an NAdamW optimizer with warmup+cosine LR in Jax."""
import functools
# isort: off
# We have to turn off isort here to resolve a conflict between isort and yapf.
from typing import (Any,
Callable,
Dict,
Iterator,
List,
NamedTuple,
Optional,
Tuple,
Union)
# isort: on
import chex
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
_GRAD_CLIP_EPS = 1e-6
# Forked from
# github.com/google/init2winit/blob/master/init2winit/optimizer_lib/alias.py
def nadamw(
learning_rate: Union[float, optax.Schedule],
b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-8,
eps_root: float = 0.0,
debias: bool = True,
weight_decay: float = 0.0,
weight_decay_mask: Optional[Union[Any, Callable[[optax.Params],
Any]]] = None,
) -> optax.GradientTransformation:
"""Rescale updates according to the NAdam algorithm.
References:
There seem to be multiple versions of NAdam. The original version is here
https://openreview.net/forum?id=OM0jvwB8jIp57ZJjtNEZ (the official PyTorch
implementation also follows this).
Current code implements a simpler version with no momentum decay and slightly
different bias correction terms. The exact description can be found here
https://arxiv.org/pdf/1910.05446.pdf (Table 1).
Args:
learning_rate: A fixed global scaling factor.
b1: Decay rate for the exponentially weighted average of grads.
b2: Decay rate for the exponentially weighted average of squared grads.
eps: Term added to the denominator to improve numerical stability.
eps_root: Term added to the denominator inside the square-root to improve
numerical stability when backpropagating gradients through the rescaling.
debias: Whether to use bias correction.
weight_decay: Strength of the weight decay regularization. Note that this
weight decay is multiplied with the learning rate. This is consistent with
other frameworks such as PyTorch, but different from (Loshchilov et al,
2019) where the weight decay is only multiplied with the "schedule
multiplier", but not the base learning rate.
weight_decay_mask: A tree with same structure as (or a prefix of) the params
PyTree, or a Callable that returns such a pytree given the params/updates.
The leaves should be booleans, `True` for leaves/subtrees you want to
apply the weight decay to, and `False` for those you want to skip. Note
that the Nadam gradient transformations are applied to all parameters.
Returns:
An (init_fn, update_fn) tuple.
"""
return optax.chain(
scale_by_nadam(b1, b2, eps, eps_root, debias),
optax.add_decayed_weights(weight_decay, weight_decay_mask),
scale_by_learning_rate(learning_rate))
# All functions below are forked from
# github.com/google/init2winit/blob/master/init2winit/optimizer_lib/transform.py
def scale_by_nadam(b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-8,
eps_root: float = 0.0,
debias: bool = True,
power: float = 0.5) -> optax.GradientTransformation:
"""Rescale updates according to the NAdam algorithm.
References:
There seem to be multiple versions of NAdam. The original version is here
https://openreview.net/forum?id=OM0jvwB8jIp57ZJjtNEZ (the pytorch imp. also
follows this).
Current code implements a simpler version with no momentum decay and slightly
different (standard Adam) bias correction terms. The exact description can be
found here https://arxiv.org/pdf/1910.05446.pdf (Table 1)
Args:
b1: Decay rate for the exponentially weighted average of grads.
b2: Decay rate for the exponentially weighted average of squared grads.
eps: Term added to the denominator to improve numerical stability.
eps_root: Term added to the denominator inside the square-root to improve
numerical stability when backpropagating gradients through the rescaling.
debias: Whether to use bias correction.
power: The power to use in the preconditioner (0.5 in default adam).
Returns:
An (init_fn, update_fn) tuple.
"""
raise_power = jnp.sqrt if power == 0.5 else lambda x: jnp.power(x, power)
def init_fn(params):
mu = jax.tree_map(jnp.zeros_like, params) # First moment
nu = jax.tree_map(jnp.zeros_like, params) # Second moment
return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu)
def update_fn(updates, state, params=None):
del params
mu = _update_moment(updates, state.mu, b1, 1)
nu = _update_moment(updates, state.nu, b2, 2)
count = state.count + jnp.array(1, dtype=jnp.int32)
mu_hat = _update_moment(updates, mu, b1, 1)
mu_hat = mu_hat if not debias else _bias_correction(mu_hat, b1, count)
nu_hat = nu if not debias else _bias_correction(nu, b2, count)
updates = jax.tree_map(
lambda m, v: m / (raise_power(v + eps_root) + eps), mu_hat, nu_hat)
return updates, ScaleByAdamState(count=count, mu=mu, nu=nu)
return optax.GradientTransformation(init_fn, update_fn)
class ScaleByAdamState(NamedTuple):
"""State for the NAdam algorithm."""
count: chex.Array # shape=(), dtype=jnp.int32.
mu: optax.Updates
nu: optax.Updates
def _update_moment(updates, moments, decay, order):
"""Compute the exponential moving average of the `order-th` moment."""
return jax.tree_map(
lambda g, t: (1 - decay) * (g**order) + decay * t, updates, moments)
def _bias_correction(moment, decay, count):
"""Perform bias correction. This becomes a no-op as count goes to infinity."""
beta = 1 - decay**count
return jax.tree_map(lambda t: t / beta.astype(t.dtype), moment)
def scale_by_learning_rate(learning_rate, flip_sign=True):
m = -1 if flip_sign else 1
if callable(learning_rate):
return optax.scale_by_schedule(lambda count: m * learning_rate(count))
return optax.scale(m * learning_rate)
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates a NAdamW optimizer and a learning rate schedule."""
del model_params
del model_state
del rng
def jax_cosine_warmup(step_hint: int, hyperparameters):
# Create learning rate schedule.
warmup_steps = int(hyperparameters.warmup_factor * step_hint)
warmup_fn = optax.linear_schedule(
init_value=0.,
end_value=hyperparameters.learning_rate,
transition_steps=warmup_steps)
cosine_steps = max(step_hint - warmup_steps, 1)
cosine_fn = optax.cosine_decay_schedule(
init_value=hyperparameters.learning_rate, decay_steps=cosine_steps)
schedule_fn = optax.join_schedules(
schedules=[warmup_fn, cosine_fn], boundaries=[warmup_steps])
return schedule_fn
# Create optimizer + LR schedule.
lr_schedule_fn = jax_cosine_warmup(workload.step_hint, hyperparameters)
opt_init_fn, opt_update_fn = nadamw(
learning_rate=lr_schedule_fn,
b1=1.0 - hyperparameters.one_minus_beta1,
b2=hyperparameters.beta2,
eps=1e-8,
weight_decay=hyperparameters.weight_decay)
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, None, 0, 0, 0, 0, 0, None, None),
static_broadcasted_argnums=(0, 1),
donate_argnums=(2, 3, 4))
def pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
batch,
rng,
grad_clip,
label_smoothing):
def _loss_fn(params):
"""Loss function used for training."""
logits, new_model_state = workload.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.TRAIN,
rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(
label_batch=batch['targets'],
logits_batch=logits,
mask_batch=batch.get('weights'),
label_smoothing=label_smoothing)
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
return summed_loss, (n_valid_examples, new_model_state)
grad_fn = jax.value_and_grad(_loss_fn, has_aux=True)
(summed_loss, (n_valid_examples, new_model_state)), grad = grad_fn(
current_param_container)
# Get correct global mean loss and grad.
(summed_loss, n_valid_examples, grad) = lax.psum(
(summed_loss, n_valid_examples, grad), axis_name='batch')
loss = summed_loss / n_valid_examples
grad = jax.tree_map(lambda x: x / n_valid_examples, grad)
grad_norm = jnp.sqrt(
sum(jnp.sum(g**2) for g in jax.tree_util.tree_leaves(grad)))
if grad_clip is not None:
grad_scaling_factor = grad_clip / (grad_norm + _GRAD_CLIP_EPS)
grad_scaling_factor = jax.lax.clamp(min=0.0, x=grad_scaling_factor, max=1.0)
grad = jax.tree_map(lambda x: x * grad_scaling_factor, grad)
updates, new_optimizer_state = opt_update_fn(grad, optimizer_state,
current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_optimizer_state, updated_params, new_model_state, loss, grad_norm
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
optimizer_state, opt_update_fn = optimizer_state
per_device_rngs = jax.random.split(rng, jax.local_device_count())
if hasattr(hyperparameters, 'label_smoothing'):
label_smoothing = hyperparameters.label_smoothing
else:
label_smoothing = 0.0
if hasattr(hyperparameters, 'grad_clip'):
grad_clip = hyperparameters.grad_clip
else:
grad_clip = None
outputs = pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
batch,
per_device_rngs,
grad_clip,
label_smoothing)
new_optimizer_state, new_params, new_model_state, loss, grad_norm = outputs
# Log loss, grad_norm.
if global_step % 100 == 0 and workload.metrics_logger is not None:
workload.metrics_logger.append_scalar_metrics(
{
'loss': loss[0],
'grad_norm': grad_norm[0],
}, global_step)
return (new_optimizer_state, opt_update_fn), new_params, new_model_state
def get_batch_size(workload_name):
# Return the global batch size.
if workload_name == 'criteo1tb':
return 262_144
elif workload_name == 'fastmri':
return 32
elif workload_name == 'imagenet_resnet':
return 1024
elif workload_name == 'imagenet_vit':
return 1024
elif workload_name == 'librispeech_conformer':
return 256
elif workload_name == 'librispeech_deepspeech':
return 256
elif workload_name == 'ogbg':
return 512
elif workload_name == 'wmt':
return 128
elif workload_name == 'mnist':
return 16
else:
raise ValueError(f'Unsupported workload name: {workload_name}.')
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
batch = next(input_queue)
return batch
|
"""Submission file for an AdamW optimizer with warmup+cosine LR in PyTorch."""
from typing import Dict, Iterator, List, Tuple
from absl import logging
import torch
import torch.distributed.nn as dist_nn
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.optim.lr_scheduler import LinearLR
from torch.optim.lr_scheduler import SequentialLR
from algorithmic_efficiency import spec
from algorithmic_efficiency.pytorch_utils import pytorch_setup
USE_PYTORCH_DDP = pytorch_setup()[0]
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates an AdamW optimizer and a learning rate schedule."""
del model_state
del rng
optimizer_state = {
'optimizer':
torch.optim.AdamW(
model_params.parameters(),
lr=hyperparameters.learning_rate,
betas=(1.0 - hyperparameters.one_minus_beta1,
hyperparameters.beta2),
eps=1e-8,
weight_decay=hyperparameters.weight_decay,
fused=False),
}
def pytorch_cosine_warmup(step_hint: int, hyperparameters, optimizer):
warmup_steps = hyperparameters.warmup_factor * step_hint
warmup = LinearLR(
optimizer, start_factor=1e-10, end_factor=1., total_iters=warmup_steps)
cosine_steps = max(step_hint - warmup_steps, 1)
cosine_decay = CosineAnnealingLR(optimizer, T_max=cosine_steps)
return SequentialLR(
optimizer, schedulers=[warmup, cosine_decay], milestones=[warmup_steps])
optimizer_state['scheduler'] = pytorch_cosine_warmup(
workload.step_hint, hyperparameters, optimizer_state['optimizer'])
return optimizer_state
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
current_model = current_param_container
current_model.train()
optimizer_state['optimizer'].zero_grad()
logits_batch, new_model_state = workload.model_fn(
params=current_model,
augmented_and_preprocessed_input_batch=batch,
model_state=model_state,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
update_batch_norm=True)
label_smoothing = (
hyperparameters.label_smoothing if hasattr(hyperparameters,
'label_smoothing') else 0.0)
loss_dict = workload.loss_fn(
label_batch=batch['targets'],
logits_batch=logits_batch,
mask_batch=batch.get('weights'),
label_smoothing=label_smoothing)
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
if USE_PYTORCH_DDP:
# Use dist_nn.all_reduce to ensure correct loss and gradient scaling.
summed_loss = dist_nn.all_reduce(summed_loss)
n_valid_examples = dist_nn.all_reduce(n_valid_examples)
loss = summed_loss / n_valid_examples
loss.backward()
if hasattr(hyperparameters, 'grad_clip'):
grad_clip = hyperparameters.grad_clip
torch.nn.utils.clip_grad_norm_(
current_model.parameters(), max_norm=grad_clip)
optimizer_state['optimizer'].step()
optimizer_state['scheduler'].step()
# Log training metrics - loss, grad_norm, batch_size.
if global_step <= 100 or global_step % 500 == 0:
with torch.no_grad():
parameters = [p for p in current_model.parameters() if p.grad is not None]
grad_norm = torch.norm(
torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2)
if workload.metrics_logger is not None:
workload.metrics_logger.append_scalar_metrics(
{
'loss': loss.item(),
'grad_norm': grad_norm.item(),
}, global_step)
logging.info('%d) loss = %0.3f, grad_norm = %0.3f',
global_step,
loss.item(),
grad_norm.item())
return (optimizer_state, current_param_container, new_model_state)
def get_batch_size(workload_name):
# Return the global batch size.
if workload_name == 'criteo1tb':
return 262_144
elif workload_name == 'fastmri':
return 32
elif workload_name == 'imagenet_resnet':
return 1024
elif workload_name == 'imagenet_vit':
return 1024
elif workload_name == 'librispeech_conformer':
return 256
elif workload_name == 'librispeech_deepspeech':
return 256
elif workload_name == 'ogbg':
return 512
elif workload_name == 'wmt':
return 128
elif workload_name == 'mnist':
return 16
else:
raise ValueError(f'Unsupported workload name: {workload_name}.')
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
batch = next(input_queue)
return batch
|
"""Submission file for an AdamW optimizer with warmup+cosine LR in Jax."""
import functools
from typing import Dict, Iterator, List, Tuple
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
_GRAD_CLIP_EPS = 1e-6
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates an AdamW optimizer and a learning rate schedule."""
del model_params
del model_state
del rng
def jax_cosine_warmup(step_hint: int, hyperparameters):
# Create learning rate schedule.
warmup_steps = int(hyperparameters.warmup_factor * step_hint)
warmup_fn = optax.linear_schedule(
init_value=0.,
end_value=hyperparameters.learning_rate,
transition_steps=warmup_steps)
cosine_steps = max(step_hint - warmup_steps, 1)
cosine_fn = optax.cosine_decay_schedule(
init_value=hyperparameters.learning_rate, decay_steps=cosine_steps)
schedule_fn = optax.join_schedules(
schedules=[warmup_fn, cosine_fn], boundaries=[warmup_steps])
return schedule_fn
# Create optimizer + LR schedule.
lr_schedule_fn = jax_cosine_warmup(workload.step_hint, hyperparameters)
opt_init_fn, opt_update_fn = optax.adamw(
learning_rate=lr_schedule_fn,
b1=1.0 - hyperparameters.one_minus_beta1,
b2=hyperparameters.beta2,
eps=1e-8,
weight_decay=hyperparameters.weight_decay)
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, None, 0, 0, 0, 0, 0, None, None),
static_broadcasted_argnums=(0, 1),
donate_argnums=(2, 3, 4))
def pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
batch,
rng,
grad_clip,
label_smoothing):
def _loss_fn(params):
"""Loss function used for training."""
logits, new_model_state = workload.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.TRAIN,
rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(
label_batch=batch['targets'],
logits_batch=logits,
mask_batch=batch.get('weights'),
label_smoothing=label_smoothing)
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
return summed_loss, (n_valid_examples, new_model_state)
grad_fn = jax.value_and_grad(_loss_fn, has_aux=True)
(summed_loss, (n_valid_examples, new_model_state)), grad = grad_fn(
current_param_container)
# Get correct global mean loss and grad.
(summed_loss, n_valid_examples, grad) = lax.psum(
(summed_loss, n_valid_examples, grad), axis_name='batch')
loss = summed_loss / n_valid_examples
grad = jax.tree_map(lambda x: x / n_valid_examples, grad)
grad_norm = jnp.sqrt(
sum(jnp.sum(g**2) for g in jax.tree_util.tree_leaves(grad)))
if grad_clip is not None:
grad_scaling_factor = grad_clip / (grad_norm + _GRAD_CLIP_EPS)
grad_scaling_factor = jax.lax.clamp(min=0.0, x=grad_scaling_factor, max=1.0)
grad = jax.tree_map(lambda x: x * grad_scaling_factor, grad)
updates, new_optimizer_state = opt_update_fn(grad, optimizer_state,
current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_optimizer_state, updated_params, new_model_state, loss, grad_norm
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
optimizer_state, opt_update_fn = optimizer_state
per_device_rngs = jax.random.split(rng, jax.local_device_count())
if hasattr(hyperparameters, 'label_smoothing'):
label_smoothing = hyperparameters.label_smoothing
else:
label_smoothing = 0.0
if hasattr(hyperparameters, 'grad_clip'):
grad_clip = hyperparameters.grad_clip
else:
grad_clip = None
outputs = pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
batch,
per_device_rngs,
grad_clip,
label_smoothing)
new_optimizer_state, new_params, new_model_state, loss, grad_norm = outputs
# Log loss, grad_norm.
if global_step % 100 == 0 and workload.metrics_logger is not None:
workload.metrics_logger.append_scalar_metrics(
{
'loss': loss[0],
'grad_norm': grad_norm[0],
}, global_step)
return (new_optimizer_state, opt_update_fn), new_params, new_model_state
def get_batch_size(workload_name):
# Return the global batch size.
if workload_name == 'criteo1tb':
return 262_144
elif workload_name == 'fastmri':
return 32
elif workload_name == 'imagenet_resnet':
return 1024
elif workload_name == 'imagenet_vit':
return 1024
elif workload_name == 'librispeech_conformer':
return 256
elif workload_name == 'librispeech_deepspeech':
return 256
elif workload_name == 'ogbg':
return 512
elif workload_name == 'wmt':
return 128
elif workload_name == 'mnist':
return 16
else:
raise ValueError(f'Unsupported workload name: {workload_name}.')
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
batch = next(input_queue)
return batch
|
"""Submission file for Adafactor in PyTorch."""
from functools import partial
from typing import Dict, Iterator, List, Tuple
from absl import logging
import torch
import torch.distributed.nn as dist_nn
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.optim.lr_scheduler import LinearLR
from torch.optim.lr_scheduler import SequentialLR
from algorithmic_efficiency import spec
from algorithmic_efficiency.pytorch_utils import pytorch_setup
USE_PYTORCH_DDP = pytorch_setup()[0]
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates an Adafactor optimizer and a learning rate schedule."""
del model_state
del rng
# Create optimizer.
optimizer_state = {
'optimizer':
Adafactor(
model_params.parameters(),
lr=hyperparameters.learning_rate,
beta1=1 - hyperparameters.one_minus_beta1,
weight_decay=hyperparameters.weight_decay),
}
optimizer = optimizer_state['optimizer']
warmup = LinearLR(
optimizer,
start_factor=1e-10,
end_factor=1.,
total_iters=hyperparameters.warmup_steps)
cosine_steps = max(workload.step_hint - hyperparameters.warmup_steps, 1)
cosine_decay = CosineAnnealingLR(optimizer, T_max=cosine_steps)
optimizer_state['scheduler'] = SequentialLR(
optimizer,
schedulers=[warmup, cosine_decay],
milestones=[hyperparameters.warmup_steps])
return optimizer_state
class Adafactor(torch.optim.Optimizer):
"""Adapted from https://github.com/huggingface/transformers/blob/main/
src/transformers/optimization.py#L386"""
def __init__(
self,
params,
lr=None,
beta1=0.9,
decay_adam=0.99,
weight_decay=0.0,
):
defaults = dict(
lr=lr,
beta1=beta1,
decay_adam=decay_adam,
weight_decay=weight_decay,
decay_pow=0.0,
layerwise_adaptation=False,
decay_method='adam',
clip_threshold=1.0,
factored=True,
epsilon1_grad_sq_reg=1e-30,
respect_skip_lp_regularization=False,
exclude_from_layerwise_adaptation=None,
per_var_learning_summary=False,
sort_factored_second_moment_dims=False,
# Unused because sort_factored_second_moment_dims=False.
min_dim_size_to_factor=128,
multiply_by_parameter_scale=False,
# Unused because multiply_by_parameter_scale=False.
epsilon2_param_scale_reg=1e-3,
maybe_inf_to_nan=True,
)
super().__init__(params, defaults)
def inf_to_nan(self, group, x):
if group["maybe_inf_to_nan"]:
x = torch.nan_to_num(x, nan=torch.nan, posinf=torch.nan, neginf=torch.nan)
return x
def step(self, closure=None):
"""
Performs a single optimization step
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
inf_to_nan = partial(self.inf_to_nan, group)
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
grad = inf_to_nan(grad)
if grad.dtype in {torch.float16, torch.bfloat16}:
grad = grad.float()
if grad.is_sparse:
raise RuntimeError("Adafactor does not support sparse gradients.")
state = self.state[p]
grad_shape = grad.shape
factored = len(grad_shape) >= 2
# State Initialization
if len(state) == 0:
state["step"] = 0
state["exp_avg"] = torch.zeros_like(grad)
if factored:
state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad)
state["exp_avg_sq_col"] = torch.zeros(grad_shape[:-2] +
grad_shape[-1:]).to(grad)
else:
state["exp_avg_sq"] = torch.zeros_like(grad)
else:
state["exp_avg"] = state["exp_avg"].to(grad)
if factored:
state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad)
state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad)
else:
state["exp_avg_sq"] = state["exp_avg_sq"].to(grad)
p_data_fp32 = p.data
if p.data.dtype in {torch.float16, torch.bfloat16}:
p_data_fp32 = p_data_fp32.float()
state["step"] += 1
lr = group["lr"]
beta1 = group["beta1"]
beta2 = group["decay_adam"]
t = state["step"]
beta2t = beta2 * (1. - beta2**(t - 1.)) / (1. - beta2**t)
exp_avg_sq_update = (grad**2) + group["epsilon1_grad_sq_reg"]
if factored:
exp_avg_sq_row = state["exp_avg_sq_row"]
exp_avg_sq_col = state["exp_avg_sq_col"]
exp_avg_sq_row.mul_(beta2t).add_(
exp_avg_sq_update.mean(dim=-1), alpha=1.0 - beta2t)
exp_avg_sq_col.mul_(beta2t).add_(
exp_avg_sq_update.mean(dim=-2), alpha=1.0 - beta2t)
r_factor = inf_to_nan(
exp_avg_sq_row /
exp_avg_sq_row.mean(dim=-1, keepdim=True)).unsqueeze(-1)
c_factor = inf_to_nan(exp_avg_sq_col).unsqueeze(-2)
denom = r_factor * c_factor
else:
exp_avg_sq = state["exp_avg_sq"]
exp_avg_sq.mul_(beta2t).add_(exp_avg_sq_update, alpha=1.0 - beta2t)
denom = exp_avg_sq
denom = denom.sqrt()
update = grad / denom
# Clip the update based on RMS.
clipping_denom = inf_to_nan(torch.square(update).mean().sqrt() \
/group["clip_threshold"]).clamp(min=1.0)
update = update / clipping_denom * lr
# Momentum
exp_avg = state["exp_avg"]
exp_avg.mul_(beta1).add_(update, alpha=1 - beta1)
if group["weight_decay"] != 0:
p_data_fp32.add_(p_data_fp32, alpha=-group["weight_decay"] * lr)
p_data_fp32.add_(-exp_avg)
if p.data.dtype in {torch.float16, torch.bfloat16}:
p.data.copy_(p_data_fp32)
return loss
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
current_model = current_param_container
current_model.train()
optimizer_state['optimizer'].zero_grad()
logits_batch, new_model_state = workload.model_fn(
params=current_model,
augmented_and_preprocessed_input_batch=batch,
model_state=model_state,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
update_batch_norm=True)
label_smoothing = (
hyperparameters.label_smoothing if hasattr(hyperparameters,
'label_smoothing') else 0.0)
loss_dict = workload.loss_fn(
label_batch=batch['targets'],
logits_batch=logits_batch,
mask_batch=batch.get('weights'),
label_smoothing=label_smoothing)
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
if USE_PYTORCH_DDP:
# Use dist_nn.all_reduce to ensure correct loss and gradient scaling.
summed_loss = dist_nn.all_reduce(summed_loss)
n_valid_examples = dist_nn.all_reduce(n_valid_examples)
loss = summed_loss / n_valid_examples
loss.backward()
with torch.no_grad():
parameters = [p for p in current_model.parameters() if p.grad is not None]
grad_norm = torch.norm(
torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2)
if hasattr(hyperparameters, 'grad_clip'):
grad_clip = hyperparameters.grad_clip
torch.nn.utils.clip_grad_norm_(
current_model.parameters(), max_norm=grad_clip)
optimizer_state['optimizer'].step()
optimizer_state['scheduler'].step()
# Log training metrics - loss, grad_norm, batch_size.
if global_step <= 100 or global_step % 500 == 0:
if workload.metrics_logger is not None:
workload.metrics_logger.append_scalar_metrics(
{
'loss': loss.item(),
'grad_norm': grad_norm.item(),
}, global_step)
logging.info('%d) loss = %0.3f, grad_norm = %0.3f',
global_step,
loss.item(),
grad_norm.item())
return (optimizer_state, current_param_container, new_model_state)
def get_batch_size(workload_name):
# Return the global batch size.
if workload_name == 'criteo1tb':
return 262_144
elif workload_name == 'fastmri':
return 32
elif workload_name == 'imagenet_resnet':
return 1024
elif workload_name == 'imagenet_vit':
return 1024
elif workload_name == 'librispeech_conformer':
return 256
elif workload_name == 'librispeech_deepspeech':
return 256
elif workload_name == 'ogbg':
return 512
elif workload_name == 'wmt':
return 128
elif workload_name == 'mnist':
return 16
else:
raise ValueError(f'Unsupported workload name: {workload_name}.')
def data_selection(
workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Tuple[spec.Tensor, spec.Tensor, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
batch = next(input_queue)
return batch
|
"""Submission file for an Adafactor optimizer with warmup+cosine LR in Jax."""
import functools
from typing import Dict, Iterator, List, Tuple
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
from baselines.adafactor.jax.sharded_adafactor import sharded_adafactor
_GRAD_CLIP_EPS = 1e-6
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates an Adafactor optimizer and a learning rate schedule."""
del model_params
del model_state
del rng
def jax_cosine_warmup(step_hint: int, hyperparameters):
# Create learning rate schedule.
warmup_steps = int(hyperparameters.warmup_factor * step_hint)
warmup_fn = optax.linear_schedule(
init_value=0.,
end_value=hyperparameters.learning_rate,
transition_steps=warmup_steps)
cosine_steps = max(step_hint - warmup_steps, 1)
cosine_fn = optax.cosine_decay_schedule(
init_value=hyperparameters.learning_rate, decay_steps=cosine_steps)
schedule_fn = optax.join_schedules(
schedules=[warmup_fn, cosine_fn], boundaries=[warmup_steps])
return schedule_fn
# Create optimizer + LR schedule.
lr_schedule_fn = jax_cosine_warmup(workload.step_hint, hyperparameters)
opt_init_fn, opt_update_fn = sharded_adafactor(
learning_rate=lr_schedule_fn,
beta1=1.0 - hyperparameters.one_minus_beta1,
weight_decay=hyperparameters.weight_decay)
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, None, 0, 0, 0, 0, 0, None, None),
static_broadcasted_argnums=(0, 1),
donate_argnums=(2, 3, 4))
def pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
batch,
rng,
grad_clip,
label_smoothing):
def _loss_fn(params):
"""Loss function used for training."""
logits, new_model_state = workload.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.TRAIN,
rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(
label_batch=batch['targets'],
logits_batch=logits,
mask_batch=batch.get('weights'),
label_smoothing=label_smoothing)
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
return summed_loss, (n_valid_examples, new_model_state)
grad_fn = jax.value_and_grad(_loss_fn, has_aux=True)
(summed_loss, (n_valid_examples, new_model_state)), grad = grad_fn(
current_param_container)
# Get correct global mean loss and grad.
(summed_loss, n_valid_examples, grad) = lax.psum(
(summed_loss, n_valid_examples, grad), axis_name='batch')
loss = summed_loss / n_valid_examples
grad = jax.tree_map(lambda x: x / n_valid_examples, grad)
grad_norm = jnp.sqrt(
sum(jnp.sum(g**2) for g in jax.tree_util.tree_leaves(grad)))
if grad_clip is not None:
grad_scaling_factor = grad_clip / (grad_norm + _GRAD_CLIP_EPS)
grad_scaling_factor = jax.lax.clamp(min=0.0, x=grad_scaling_factor, max=1.0)
grad = jax.tree_map(lambda x: x * grad_scaling_factor, grad)
updates, new_optimizer_state = opt_update_fn(grad, optimizer_state,
current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_optimizer_state, updated_params, new_model_state, loss, grad_norm
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
optimizer_state, opt_update_fn = optimizer_state
per_device_rngs = jax.random.split(rng, jax.local_device_count())
if hasattr(hyperparameters, 'label_smoothing'):
label_smoothing = hyperparameters.label_smoothing
else:
label_smoothing = 0.0
if hasattr(hyperparameters, 'grad_clip'):
grad_clip = hyperparameters.grad_clip
else:
grad_clip = None
outputs = pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
batch,
per_device_rngs,
grad_clip,
label_smoothing)
new_optimizer_state, new_params, new_model_state, loss, grad_norm = outputs
# Log loss, grad_norm.
if global_step % 100 == 0 and workload.metrics_logger is not None:
workload.metrics_logger.append_scalar_metrics(
{
'loss': loss[0],
'grad_norm': grad_norm[0],
}, global_step)
return (new_optimizer_state, opt_update_fn), new_params, new_model_state
def get_batch_size(workload_name):
# Return the global batch size.
if workload_name == 'criteo1tb':
return 262_144
elif workload_name == 'fastmri':
return 32
elif workload_name == 'imagenet_resnet':
return 1024
elif workload_name == 'imagenet_vit':
return 1024
elif workload_name == 'librispeech_conformer':
return 256
elif workload_name == 'librispeech_deepspeech':
return 256
elif workload_name == 'ogbg':
return 512
elif workload_name == 'wmt':
return 128
elif workload_name == 'mnist':
return 16
else:
raise ValueError(f'Unsupported workload name: {workload_name}.')
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
batch = next(input_queue)
return batch
|
# coding=utf-8
# Copyright 2023 The init2winit Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PAX/Praxis implementation of Adafactor.
Copied from Praxis's `sharded_adafactor`, removing unnecessary sharding-related
code and dependencies on Praxis.
Code:
https://github.com/google/praxis/blob/516a96bce6f03090c5903531038f8f8af6212250/praxis/optimizers.py#L2308
Forked from:
https://github.com/google/init2winit/master/init2winit/optimizer_lib/pax_adafactor.py
"""
import dataclasses
import functools
import re
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
import jax
from jax import numpy as jnp
import optax
JTensor = Any
NestedJTensor = Any
NestedHParams = Any
def to_quantized(fvalue: JTensor,
quantized_dtype: jnp.dtype) -> Tuple[JTensor, JTensor]:
"""Converts floating point values `fvalues` to quantized values.
We use a very simple quantization scheme where the range is symmetric around
0.0, and we simply map 0 to 0.0.
Let x = bucket_size
We map [-0.5x, 0.5x] to 0
[-1.5x, -0.5x] to -1
[0.5x, 1.5x] to 1
and so on so forth.
Some properties:
a1, a2 = to_quantized(x, quantized_dtype)
b1 = to_float(a1, a2)
c1, c2 = to_quantized(b1, quantized_dtype)
then a1 == c1, a2 == c2
Args:
fvalue: Values in floating point.
quantized_dtype: Quantized dtype, can be either jnp.int8, or jnp.int16.
Returns:
A (quantized_values, bucket_size) 2-tuple.
`quantized_values * bucket_size[jnp.newaxis, ...]` are the quantized
values
on the floating value axis.
"""
float_dtype = fvalue.dtype
if quantized_dtype == jnp.int8:
# value -128 is not used.
num_buckets = jnp.array(127.0, dtype=float_dtype)
elif quantized_dtype == jnp.int16:
# value -32768 is not used.
num_buckets = jnp.array(32767.0, dtype=float_dtype)
else:
raise ValueError(f'Quantized dtype {quantized_dtype} not supported.')
# max value is mapped to num_buckets
# We first decide the scale.
if fvalue.ndim < 1:
raise ValueError(
f'Input array {fvalue} must have a strictly positive number of '
'dimensions.')
max_abs = jnp.max(jnp.abs(fvalue), axis=0)
bucket_size = max_abs / num_buckets
bs_expanded = bucket_size[jnp.newaxis, ...]
# To avoid divide by 0.0
bs_nonzero = jnp.where(bs_expanded > 0.0,
bs_expanded,
jnp.ones_like(bs_expanded))
ratio = fvalue / bs_nonzero
# We use rounding to remove bias.
quantized = jnp.round(ratio)
return quantized.astype(quantized_dtype), bucket_size
def to_float(quantized: JTensor, bucket_size: JTensor) -> JTensor:
"""Converts quantized values to float values.
Args:
quantized: Quantized values, of type either jnp.int8 or jnp.int16.
bucket_size: The size of each bucket on the floating-point axis. bucket_size
is of rank tf.rank(quantized) - 1. For example, if quantized is of shape
[x, ...], bucket_size is of shape [...].
Returns:
Unquantized values of type bucket_size.dtype.
"""
float_dtype = bucket_size.dtype
bucket_size = bucket_size[jnp.newaxis, ...]
return quantized.astype(float_dtype) * bucket_size
def adafactor_decay_rate_adam(beta2: float, step_counter: JTensor) -> JTensor:
"""Second-moment decay rate like Adam, subsuming the correction factor.
Args:
beta2: A floating point value between 0 and 1.
step_counter: A scalar tensor keeping track of the number of steps
performed.
Returns:
The decay rate as a scalar JTensor.
"""
step = step_counter
beta2 = jnp.array(beta2, dtype=jnp.float32)
t = step + 1.
return beta2 * (1. - jnp.power(beta2, t - 1.)) / (1. - jnp.power(beta2, t))
def adafactor_decay_rate_pow(exponent: float, step_counter: JTensor) -> JTensor:
"""Second moment decay rate where memory-length grows as step_num^exponent.
Args:
exponent: A floating point value between 0 and 1.
step_counter: A scalar tensor keeping track of the number of steps
performed.
Returns:
The decay rate as a scalar JTensor.
"""
step = step_counter
exponent = jnp.array(exponent, dtype=jnp.float32)
return 1. - jnp.power((step + 1.), -exponent)
def reduce_mean(array: JTensor) -> JTensor:
"""Computes the mean of `array` in a more numerically stable way.
Args:
array: Input array.
Returns:
The mean of the input array as a scalar array.
"""
num_elements = array.size
if num_elements > 1e8:
# When x is too large, simple jnp.mean() can result in nan or inf values.
# TODO(bf-jax): The following code snippet is consistent with the TensorFlow
# implementation. This can be simplified into `jnp.mean(jnp.mean(x, -1))`.
# Update to using mean() after verifying consistency.
array_sum = jnp.sum(array, axis=-1)
array_sum = jnp.sum(array_sum)
return array_sum / jnp.array(num_elements, dtype=array_sum.dtype)
else:
return jnp.mean(array)
def reduce_rms(array: JTensor) -> JTensor:
"""Computes the RMS of `array` (in a numerically stable way).
Args:
array: Input array.
Returns:
The root mean square of the input array as a scalar array.
"""
sq = jnp.square(array)
sq_mean = reduce_mean(sq)
return jnp.sqrt(sq_mean)
@dataclasses.dataclass(frozen=True)
class _ShardedAdafactorUpdateResult:
"""Structure containing per-variable info for Adafactor."""
update: Optional[Any]
m: Optional[Any]
m_scale: Optional[Any]
vr: Optional[Any]
vc: Optional[Any]
v: Optional[Any]
class ShardedAdafactorState(NamedTuple):
"""Overall state of the ShardedAdafactor optimizer."""
count: JTensor
m: Optional[NestedJTensor]
m_scale: Optional[NestedJTensor]
vr: Optional[NestedJTensor]
vc: Optional[NestedJTensor]
v: Optional[NestedJTensor]
class _ShardedAdafactorHelper:
"""Helper class to implement optax-based sharded Adafactor."""
def __init__(self,
learning_rate: optax.Schedule,
weight_decay: Optional[float],
layerwise_adaptation: bool,
decay_method: str,
decay_adam: float,
decay_pow: float,
beta1: float,
clip_threshold: Optional[float],
factored: bool,
epsilon1_grad_sq_reg: float,
quantized_dtype: jnp.dtype,
respect_skip_lp_regularization: bool,
exclude_from_layerwise_adaptation: Optional[List[str]],
per_var_learning_summary: bool,
sort_factored_second_moment_dims: bool,
min_dim_size_to_factor: int,
multiply_by_parameter_scale: bool,
epsilon2_param_scale_reg: float,
maybe_inf_to_nan: bool,
nesterov: bool) -> None:
"""Constructor. See ShardedAdafactor() below."""
self._learning_rate = learning_rate
self._weight_decay = weight_decay
self._layerwise_adaptation = layerwise_adaptation
self._decay_method = decay_method
self._decay_adam = decay_adam
self._decay_pow = decay_pow
self._beta1 = beta1
self._clip_threshold = clip_threshold
self._factored = factored
self._epsilon1 = epsilon1_grad_sq_reg
self._quantized_dtype = quantized_dtype
self._respect_skip_lp_regularization = respect_skip_lp_regularization
self._exclude_from_layerwise_adaptation = exclude_from_layerwise_adaptation
self._per_var_learning_summary = per_var_learning_summary
self._sort_factored_second_moment_dims = sort_factored_second_moment_dims
self._min_dim_size_to_factor = min_dim_size_to_factor
self._multiply_by_parameter_scale = multiply_by_parameter_scale
self._epsilon2 = epsilon2_param_scale_reg
self._maybe_inf_to_nan = maybe_inf_to_nan
self._nesterov = nesterov
def should_use_factored_second_moment_estimate(self, shape):
"""Should we use a factored second moment estimator.
Based on the shape of the variable.
Args:
shape: a list of integers.
Returns:
A boolean.
"""
return self.factored_second_moment_dims(shape) is not None
def factored_second_moment_dims(self, shape):
"""Should we use a factored second moment estimator.
We select largest and second largest var dims as row and colum dims.
Default list of factored dims is -1, -2.
Args:
shape: a list of integers.
Returns:
either a list of 2 Dimension indices for row and col or None
"""
if not self._factored:
return None
if len(shape) < 2:
return None
if not self._sort_factored_second_moment_dims:
return len(shape) - 1, len(shape) - 2
def largest_two_dim_indices():
s = [(s, i) for i, s in enumerate(shape)]
sorted_dims = sorted(s, key=lambda d: -d[0])
return sorted_dims[0][1], sorted_dims[1][1]
r_idx, c_idx = largest_two_dim_indices()
if shape[c_idx] < self._min_dim_size_to_factor:
return None
return r_idx, c_idx
def should_store_momentum_in_qint(self, shape):
"""Should we store momentum as quantized integers.
Based on the shape of the variable.
Args:
shape: a list of integers
Returns:
A boolean.
"""
if jnp.issubdtype(self._quantized_dtype, jnp.floating):
return False
if self._quantized_dtype is None:
return False
return len(shape) >= 1
def to_state(self, count, result_tree):
"""Maps from a tree of (factored) values to separate trees of values."""
return ShardedAdafactorState(
count=count,
m=jax.tree_map(lambda o: o.m, result_tree),
m_scale=jax.tree_map(lambda o: o.m_scale, result_tree),
vr=jax.tree_map(lambda o: o.vr, result_tree),
vc=jax.tree_map(lambda o: o.vc, result_tree),
v=jax.tree_map(lambda o: o.v, result_tree))
def init(self, param):
"""Initializes the optimizer state for a given param."""
# The actually value that will be added to a variable for updating it.
output_update = jnp.zeros((1,))
output_m = jnp.zeros((1,))
output_m_scale = jnp.zeros((1,))
output_vr = jnp.zeros((1,))
output_vc = jnp.zeros((1,))
output_v = jnp.zeros((1,))
shape = param.shape
if self._beta1:
if jnp.issubdtype(self._quantized_dtype, jnp.floating):
output_m = jnp.zeros(shape, dtype=self._quantized_dtype)
elif self.should_store_momentum_in_qint(shape):
output_m = jnp.zeros(shape, dtype=self._quantized_dtype)
scale_shape = shape[1:]
output_m_scale = jnp.zeros(scale_shape, dtype=jnp.float32)
else:
output_m = jnp.zeros(shape, dtype=jnp.float32)
if self.should_use_factored_second_moment_estimate(shape):
factored_dims = self.factored_second_moment_dims(shape)
vr_axis, vc_axis = factored_dims
output_vr_shape = list(shape).copy()
del output_vr_shape[vr_axis]
output_vc_shape = list(shape).copy()
del output_vc_shape[vc_axis]
output_vr = jnp.zeros(output_vr_shape, dtype=jnp.float32)
output_vc = jnp.zeros(output_vc_shape, dtype=jnp.float32)
else:
output_v = jnp.zeros(shape, dtype=jnp.float32)
return _ShardedAdafactorUpdateResult(
update=output_update,
m=output_m,
m_scale=output_m_scale,
vr=output_vr,
vc=output_vc,
v=output_v)
def inf_to_nan(self, array):
"""Converting Infinity values to the more sticky NaN."""
# For example, when we have y = 1.0 / x in code and x == inf, y will become
# 0. Therefore the infinite value of x is hidden in the calculation,
# leading to silent omission of numerical issues.
if not self._maybe_inf_to_nan:
return array
return jnp.nan_to_num(array, nan=jnp.nan, posinf=jnp.nan, neginf=jnp.nan)
def parameter_scale(self, var):
"""Estimate the scale of the parameters from the current values.
We include a minimum value of 0.001 to give it a chance to escape 0
if it was zero-initialized.
Instead of using the value, we could impute the scale from the shape,
as initializers do.
Args:
var: a variable or Tensor.
Returns:
a Scalar
"""
return jnp.maximum(reduce_rms(var), jnp.asarray(self._epsilon2, var.dtype))
def compute_var_and_slot_update(self,
count,
grad,
m,
m_scale,
vr,
vc,
v,
param,
var_name=None):
"""Computes the var and optimizer slots updates for a single variable."""
# We can probably skip this step
grad = grad.astype(jnp.float32)
grad = self.inf_to_nan(grad)
grad_squared = jnp.square(grad)
# Add epsilon1_grad_sq_reg as per Algorithm 4
# of https://arxiv.org/pdf/1804.04235.pdf
grad_squared += self._epsilon1
grad_squared_mean = self.inf_to_nan(reduce_mean(grad_squared))
if self._decay_method == 'adam':
assert self._decay_adam > 0
decay_rate = adafactor_decay_rate_adam(self._decay_adam, count)
elif self._decay_method == 'pow':
assert self._decay_pow > 0
decay_rate = adafactor_decay_rate_pow(self._decay_pow, count)
else:
raise ValueError(f'decay_method {self._decay_method} not supported.')
learning_rate = self._learning_rate
if callable(learning_rate):
learning_rate = learning_rate(count)
update_scale = learning_rate
old_val = param
if self._multiply_by_parameter_scale:
update_scale *= self.parameter_scale(old_val).astype(update_scale.dtype)
# Q(yonghui): Can we remove the hack now?
# HACK: Make things dependent on grad.
# This confounds the XLA rewriter and keeps it from fusing computations
# across different variables. This fusion is a bad for HBM usage, since
# it causes the gradients to persist in memory.
decay_rate += grad_squared_mean * 1e-30
update_scale += grad_squared_mean * 1e-30
# END HACK
mixing_rate = 1. - decay_rate
shape = param.shape
output_m = jnp.zeros((1,))
output_m_scale = jnp.zeros((1,))
output_vr = jnp.zeros((1,))
output_vc = jnp.zeros((1,))
output_v = jnp.zeros((1,))
factored_second_moment_dims = self.factored_second_moment_dims(shape)
if factored_second_moment_dims is not None:
# Q(shafey): Should we use the more numerically stable version
# reduce_mean().
vr_axis, vc_axis = factored_second_moment_dims
grad_squared_row_mean = self.inf_to_nan(
jnp.mean(grad_squared, axis=vr_axis))
grad_squared_col_mean = self.inf_to_nan(
jnp.mean(grad_squared, axis=vc_axis))
new_vr = decay_rate * vr + mixing_rate * grad_squared_row_mean
new_vc = decay_rate * vc + mixing_rate * grad_squared_col_mean
output_vr = new_vr
output_vc = new_vc
long_term_mean = jnp.mean(new_vr, axis=-1, keepdims=True)
r_factor = 1. / jnp.sqrt(new_vr / long_term_mean)
c_factor = 1. / jnp.sqrt(new_vc)
x = grad * jnp.expand_dims(r_factor, vr_axis) * jnp.expand_dims(
c_factor, vc_axis)
else:
# v with sharding annotation.
new_v = decay_rate * v + mixing_rate * grad_squared
output_v = new_v
x = grad / jnp.sqrt(new_v)
if self._clip_threshold is not None:
clipping_denom = jnp.maximum(1., reduce_rms(x) / self._clip_threshold)
clipping_denom = self.inf_to_nan(clipping_denom)
x /= clipping_denom
subtrahend = update_scale * x
if self._beta1:
if jnp.issubdtype(self._quantized_dtype, jnp.floating):
m = m.astype(jnp.float32)
elif self.should_store_momentum_in_qint(shape):
m_init_dtype = m.dtype
m = to_float(m, m_scale)
if self._nesterov:
subtrahend_original = subtrahend
subtrahend = self._beta1 * m + (1. - self._beta1) * subtrahend
subtrahend = self.inf_to_nan(subtrahend)
if self._quantized_dtype == jnp.bfloat16:
new_m = subtrahend.astype(jnp.bfloat16)
output_m = new_m
elif self.should_store_momentum_in_qint(shape):
# Update the momentum values.
new_m_val, new_m_scale = to_quantized(subtrahend, m_init_dtype)
output_m = new_m_val
output_m_scale = new_m_scale
else:
output_m = subtrahend
if self._nesterov:
subtrahend = (
self._beta1 * subtrahend +
(1.0 - self._beta1) * subtrahend_original)
if self._weight_decay is not None:
# Apply decoupled weight decay to be consistent with AdamW.
var_weight_decay = None
if isinstance(self._weight_decay, dict):
for scope_pattern in self._weight_decay.keys():
regex_pattern = re.compile(scope_pattern)
if regex_pattern.match(var_name):
var_weight_decay = self._weight_decay[scope_pattern]
else:
var_weight_decay = self._weight_decay
if var_weight_decay is not None:
weight_decay = var_weight_decay * learning_rate
subtrahend += weight_decay * old_val
if self._layerwise_adaptation:
include = True
if self._exclude_from_layerwise_adaptation is not None:
for scope_pattern in self._exclude_from_layerwise_adaptation:
regex_pattern = re.compile(scope_pattern)
if regex_pattern.match(var_name):
include = False
break
if include:
w_norm = reduce_rms(old_val)
g_norm = reduce_rms(subtrahend / update_scale) + self._epsilon1
ratio = w_norm / g_norm
ratio = jnp.where(
jnp.greater(w_norm, 0),
jnp.where(jnp.greater(g_norm, 0), (w_norm / g_norm), 1.0),
1.0)
subtrahend *= ratio
return _ShardedAdafactorUpdateResult(
update=-subtrahend,
m=output_m,
m_scale=output_m_scale,
vr=output_vr,
vc=output_vc,
v=output_v)
def sharded_adafactor(
learning_rate: optax.Schedule,
weight_decay: Optional[Union[float, Dict[str, float]]] = None,
layerwise_adaptation: bool = False,
decay_method: str = 'adam',
decay_adam: float = 0.99,
decay_pow: float = 0.,
beta1: float = 0.9,
clip_threshold: Optional[float] = 1.,
factored: bool = True,
epsilon1_grad_sq_reg: float = 1e-30,
quantized_dtype: jnp.dtype = jnp.int8,
respect_skip_lp_regularization: bool = False,
exclude_from_layerwise_adaptation: Optional[List[str]] = None,
per_var_learning_summary: bool = False,
sort_factored_second_moment_dims: bool = False,
# min_dim_size_to_factor is only used when
# sort_factored_second_moment_dims=True.
min_dim_size_to_factor: int = 128,
multiply_by_parameter_scale: bool = False,
epsilon2_param_scale_reg: float = 1e-3,
maybe_inf_to_nan: bool = True,
nesterov: bool = False,
) -> optax.GradientTransformation:
"""AdaFactor optimizer that supports SPMD sharding.
Reference:
Shazeer et al, 2018: https://arxiv.org/abs/1804.04235
Adafactor is very similar to Adam (Kingma and Ba, 2019), the major
differences being:
1. For a two-dimensional AxB weight matrix, Adafactor uses only A+B auxiliary
parameters to maintain the second-moment estimator, instead of AB.
This is advantageous on memory-limited systems. In addition, beta1
(momentum) is set to zero by default, saving an additional auxiliary
parameter per weight. Variables with >=3 dimensions are treated as
collections of two-dimensional matrices - factorization is over the final
two dimensions.
2. Adafactor incorporates "update-clipping" - a scale-invariant analog of
gradient clipping. This improves stability.
3. Adafactor does not require an external "learning rate". By default, it
incorporates a relative-update-scale schedule, corresponding to
inverse-square-root learning-rate-decay in Adam. We hope this works well
for most applications.
Args:
learning_rate: a callable that given the current training step, returns the
learning rate to apply.
weight_decay: an optional float tensor as decoupled weight decay value, or a
dictionary with key as regex scope pattern and value as corresponding
weight decay float tensor. The value will apply to all variables under
that scope name.
layerwise_adaptation: a boolean, whether or not to use layer-wise adaptive
moments (LAMB) https://arxiv.org/abs/1904.00962.
decay_method: a string, deciding how decay_rate should be computed.
Permitted values are 'adam' and 'pow'.
decay_adam: a float, decay if decay_method == 'adam'.
decay_pow: a float, decay if decay_method == 'pow'.
beta1: a float value between 0 and 1 for momentum.
clip_threshold: an optional float >= 1
factored: a boolean, whether or not to use factored second order momentum.
epsilon1_grad_sq_reg: Regularization constant for squared gradient.
quantized_dtype: type of the quantized input. Allowed options are jnp.int8,
jnp.int16, jnp.bfloat16 and jnp.float32. If floating-point type is
specified, accumulators are stored as such type, instead of quantized
integers.
respect_skip_lp_regularization: whether or not to respect lingvo
SKIP_LP_REGULARIZATION var collection that skips decoupled weight decay.
exclude_from_layerwise_adaptation: A dictionary with key as regex scope
pattern for variables to be skipped.
per_var_learning_summary: a bool, whether or not to export per-var learning
summaries.
sort_factored_second_moment_dims: a bool, whether to select dims to factor
by size, for the factored second moment.
min_dim_size_to_factor: an integer, only factor the statistics if two array
dimensions have at least this size. NOTE min_dim_size_to_factor is only
used when sort_factored_second_moment_dims=True.
multiply_by_parameter_scale: a boolean, if True, then scale learning_rate by
parameter scale. if False provided learning_rate is absolute step size.
NOTE False by default.
epsilon2_param_scale_reg: Regularization constant for parameter scale. Only
used when multiply_by_parameter_scale is True.
maybe_inf_to_nan: Will use jax.nan_to_num during update when True.
nesterov: Will use Nesterov momentum when True.
Returns:
A `ShardedGradientTransformation`.
"""
assert not respect_skip_lp_regularization
assert decay_adam >= 0
assert decay_pow >= 0
assert learning_rate is not None
assert decay_method == 'adam' or decay_method == 'pow', (
f'decay_method: {decay_method} not supported. Supported methods are '
'"pow", or "adam".')
sharded_adafactor_helper = _ShardedAdafactorHelper(
learning_rate=learning_rate,
weight_decay=weight_decay,
layerwise_adaptation=layerwise_adaptation,
decay_method=decay_method,
decay_adam=decay_adam,
decay_pow=decay_pow,
beta1=beta1,
clip_threshold=clip_threshold,
factored=factored,
epsilon1_grad_sq_reg=epsilon1_grad_sq_reg,
quantized_dtype=quantized_dtype,
respect_skip_lp_regularization=respect_skip_lp_regularization,
exclude_from_layerwise_adaptation=exclude_from_layerwise_adaptation,
per_var_learning_summary=per_var_learning_summary,
sort_factored_second_moment_dims=sort_factored_second_moment_dims,
min_dim_size_to_factor=min_dim_size_to_factor,
multiply_by_parameter_scale=multiply_by_parameter_scale,
epsilon2_param_scale_reg=epsilon2_param_scale_reg,
maybe_inf_to_nan=maybe_inf_to_nan,
nesterov=nesterov)
def init_fn(params):
"""Initializes the optimizer's state."""
return sharded_adafactor_helper.to_state(
jnp.zeros([], jnp.int32),
jax.tree_map(sharded_adafactor_helper.init, params))
def update_fn(updates, state, params=None):
if params is None:
raise ValueError(
'You are using a transformation that requires the current value of '
'parameters, but you are not passing `params` when calling `update`.')
compute_var_and_slot_update_fn = functools.partial(
sharded_adafactor_helper.compute_var_and_slot_update, state.count)
output = jax.tree_map(compute_var_and_slot_update_fn,
updates,
state.m,
state.m_scale,
state.vr,
state.vc,
state.v,
params)
updates = jax.tree_map(lambda o: o.update, output)
count_plus_one = state.count + jnp.array(1, jnp.int32)
updated_states = sharded_adafactor_helper.to_state(count_plus_one, output)
return updates, updated_states
return optax.GradientTransformation(init=init_fn, update=update_fn)
|
"""Submission file for a SAM optimizer with warmup+cosine LR in PyTorch."""
from typing import Callable, Dict, Iterator, List, Tuple
from absl import logging
import torch
import torch.distributed.nn as dist_nn
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.optim.lr_scheduler import LinearLR
from torch.optim.lr_scheduler import SequentialLR
from algorithmic_efficiency import spec
from algorithmic_efficiency.pytorch_utils import pytorch_setup
USE_PYTORCH_DDP = pytorch_setup()[0]
# Modified from https://github.com/davda54/sam.
class SAM(torch.optim.Optimizer):
def __init__(self,
params: spec.ParameterContainer,
base_optimizer: torch.optim.Optimizer,
rho: float = 0.05,
adaptive: bool = False,
**kwargs):
if rho < 0.0:
raise ValueError(f'Invalid rho, should be non-negative: {rho}')
defaults = dict(rho=rho, adaptive=adaptive, **kwargs)
super().__init__(params, defaults)
self.base_optimizer = base_optimizer(self.param_groups, **kwargs)
self.param_groups = self.base_optimizer.param_groups
self.defaults.update(self.base_optimizer.defaults)
@torch.no_grad()
def first_step(self, zero_grad: bool = False):
grad_norm = self._grad_norm()
for group in self.param_groups:
scale = group['rho'] / grad_norm
for p in group['params']:
if p.grad is None:
continue
self.state[p]['old_p'] = p.data.clone()
factor = torch.pow(p, 2) if group['adaptive'] else 1.0
e_w = factor * p.grad * scale.to(p)
p.add_(e_w) # Climb to the local maximum 'w + e(w)'.
if zero_grad:
self.zero_grad()
@torch.no_grad()
def second_step(self, zero_grad: bool = False):
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
p.data = self.state[p]['old_p'] # Get back to 'w' from 'w + e(w)'.
self.base_optimizer.step() # Do the actual 'sharpness-aware' update.
if zero_grad:
self.zero_grad()
@torch.no_grad()
def step(self, closure: Callable = None):
if closure is None:
raise ValueError('SAM requires closure, but it was not provided.')
# The closure should do a full forward-backward pass.
closure = torch.enable_grad()(closure)
self.first_step(zero_grad=True)
closure()
self.second_step()
def _grad_norm(self):
# In case of model parallelism, put everything on the same device.
shared_device = self.param_groups[0]['params'][0].device
norm = torch.norm(
torch.stack([((torch.abs(p) if group['adaptive'] else 1.0) *
p.grad).norm(p=2).to(shared_device)
for group in self.param_groups
for p in group['params']
if p.grad is not None]),
p=2)
return norm
def load_state_dict(self, state_dict: Dict):
super().load_state_dict(state_dict)
self.base_optimizer.param_groups = self.param_groups
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates an AdamW optimizer and a learning rate schedule."""
del model_state
del rng
# Create SAM optimizer with AdamW base.
base_optimizer = torch.optim.AdamW
optimizer_state = {
'optimizer':
SAM(model_params.parameters(),
base_optimizer=base_optimizer,
rho=hyperparameters.rho,
lr=hyperparameters.learning_rate,
betas=(1.0 - hyperparameters.one_minus_beta1,
hyperparameters.beta2),
eps=1e-8,
weight_decay=hyperparameters.weight_decay),
}
def pytorch_cosine_warmup(step_hint: int, hyperparameters, optimizer):
warmup_steps = int(hyperparameters.warmup_factor * step_hint)
warmup = LinearLR(
optimizer, start_factor=1e-10, end_factor=1., total_iters=warmup_steps)
cosine_steps = max(step_hint - warmup_steps, 1)
cosine_decay = CosineAnnealingLR(optimizer, T_max=cosine_steps)
return SequentialLR(
optimizer, schedulers=[warmup, cosine_decay], milestones=[warmup_steps])
# Create learning rate schedule.
optimizer_state['scheduler'] = pytorch_cosine_warmup(
workload.step_hint, hyperparameters, optimizer_state['optimizer'])
return optimizer_state
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
current_model = current_param_container
current_model.train()
def _loss_fn(params, update_batch_norm=True):
"""Loss function used for training."""
logits_batch, new_model_state = workload.model_fn(
params=params,
augmented_and_preprocessed_input_batch=batch,
model_state=model_state,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
update_batch_norm=update_batch_norm)
label_smoothing = (
hyperparameters.label_smoothing if hasattr(hyperparameters,
'label_smoothing') else 0.0)
loss_dict = workload.loss_fn(
label_batch=batch['targets'],
logits_batch=logits_batch,
mask_batch=batch.get('weights'),
label_smoothing=label_smoothing)
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
if USE_PYTORCH_DDP:
# Use dist_nn.all_reduce to ensure correct loss and gradient scaling.
summed_loss = dist_nn.all_reduce(summed_loss)
n_valid_examples = dist_nn.all_reduce(n_valid_examples)
loss = summed_loss / n_valid_examples
return loss, new_model_state
# First backward pass.
loss, _ = _loss_fn(current_model, update_batch_norm=True)
loss.backward()
logging_loss = loss.clone().detach()
with torch.no_grad():
parameters = [p for p in current_model.parameters() if p.grad is not None]
grad_norm = torch.norm(
torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2)
optimizer_state['optimizer'].first_step(zero_grad=True)
# Second forward-backward pass.
loss, new_model_state = _loss_fn(current_model, update_batch_norm=False)
loss.backward()
if hasattr(hyperparameters, 'grad_clip'):
grad_clip = hyperparameters.grad_clip
torch.nn.utils.clip_grad_norm_(
current_model.parameters(), max_norm=grad_clip)
optimizer_state['optimizer'].second_step(zero_grad=True)
optimizer_state['scheduler'].step()
# Log training metrics - loss, grad_norm, batch_size.
if global_step <= 100 or global_step % 500 == 0:
if workload.metrics_logger is not None:
workload.metrics_logger.append_scalar_metrics(
{
'loss': logging_loss.item(),
'grad_norm': grad_norm.item(),
},
global_step)
logging.info('%d) loss = %0.3f, grad_norm = %0.3f',
global_step,
logging_loss.item(),
grad_norm.item())
return (optimizer_state, current_param_container, new_model_state)
def get_batch_size(workload_name):
# Return the global batch size.
if workload_name == 'criteo1tb':
return 262_144
elif workload_name == 'fastmri':
return 32
elif workload_name == 'imagenet_resnet':
return 1024
elif workload_name == 'imagenet_vit':
return 1024
elif workload_name == 'librispeech_conformer':
return 256
elif workload_name == 'librispeech_deepspeech':
return 256
elif workload_name == 'ogbg':
return 512
elif workload_name == 'wmt':
return 128
elif workload_name == 'mnist':
return 16
else:
raise ValueError(f'Unsupported workload name: {workload_name}.')
def data_selection(
workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Tuple[spec.Tensor, spec.Tensor, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
batch = next(input_queue)
return batch
|
"""Submission file for a SAM optimizer with warmup+cosine LR in Jax."""
import functools
from typing import Dict, Iterator, List, Optional, Tuple
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
_GRAD_CLIP_EPS = 1e-6
# Copied from the official SAM GitHub repository. Note how it doesn't add an
# epsilon to the gradient norm before normalizing the gradients.
def dual_vector(y: jnp.ndarray) -> jnp.ndarray:
"""Returns the solution of max_x y^T x s.t.
||x||_2 <= 1.
Args:
y: A pytree of numpy ndarray, vector y in the equation above.
"""
gradient_norm = jnp.sqrt(
sum(jnp.sum(jnp.square(e)) for e in jax.tree_util.tree_leaves(y)))
normalized_gradient = jax.tree_map(lambda x: x / gradient_norm, y)
return normalized_gradient
# github.com/google/init2winit/blob/master/init2winit/optimizer_lib/
# sharpness_aware_minimization.py
def sharpness_aware_minimization(
rho: float,
grad_clip: Optional[float],
batch_axis_name: str,
base_opt_init_fn,
base_opt_update_fn,
) -> optax.GradientTransformation:
"""Implementation of Sharpness Aware Minimization (SAM).
Paper: https://arxiv.org/abs/2010.01412
Code: https://github.com/google-research/sam
References:
Foret et al, 2021: https://arxiv.org/abs/2010.01412
Args:
rho: The size of the neighborhood for the sharpness aware minimization
gradient updates. Defaults to 0.1.
grad_clip: The optional value to clip the updates by. Defaults to None.
batch_axis_name: the name of the axis to pmap over. Used to run a pmean
before applying the optimizer update.
base_opt_init_fn: The initialization function for the base optimizer used to
generate updates given the total gradient.
base_opt_update_fn: The update function for the base optimizer used to
generate updates given the total gradient.
Returns:
The corresponding `GradientTransformation`.
"""
def init_fn(params):
return base_opt_init_fn(params)
def update_fn(updates, state, grad_fn_params_tuple):
(grad_fn, params) = grad_fn_params_tuple
# Updates here have been synced (mean) across devices before being sent to
# the optimizer. We again take the correct mean of the gradients computed on
# the noised parameters in the same order as on the original gradients and
# with the same 1e-6 epsilon that is used when clipping the gradients.
updates = dual_vector(updates)
noised_params = jax.tree_util.tree_map(
lambda p, u: p + rho * u, params, updates)
(_, (n_valid_examples, _)), updates = grad_fn(noised_params)
# Get correct global mean grad.
(n_valid_examples, updates) = lax.psum((n_valid_examples, updates),
axis_name=batch_axis_name)
updates = jax.tree_map(lambda x: x / n_valid_examples, updates)
if grad_clip:
updates_norm = jnp.sqrt(
sum(jnp.sum(g**2) for g in jax.tree_util.tree_leaves(updates)))
scaled_updates = jax.tree_map(
lambda x: x / (updates_norm + _GRAD_CLIP_EPS) * grad_clip, updates)
updates = jax.lax.cond(updates_norm > grad_clip,
lambda _: scaled_updates,
lambda _: updates,
None)
updates, state = base_opt_update_fn(updates, state, params)
return updates, state
return optax.GradientTransformation(init_fn, update_fn)
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates a SAM optimizer (with AdamW base) and a learning rate schedule."""
del model_params
del model_state
del rng
def jax_cosine_warmup(step_hint: int, hyperparameters):
# Create learning rate schedule.
warmup_steps = int(hyperparameters.warmup_factor * step_hint)
warmup_fn = optax.linear_schedule(
init_value=0.,
end_value=hyperparameters.learning_rate,
transition_steps=warmup_steps)
cosine_steps = max(step_hint - warmup_steps, 1)
cosine_fn = optax.cosine_decay_schedule(
init_value=hyperparameters.learning_rate, decay_steps=cosine_steps)
schedule_fn = optax.join_schedules(
schedules=[warmup_fn, cosine_fn], boundaries=[warmup_steps])
return schedule_fn
# Create base optimizer + LR schedule.
lr_schedule_fn = jax_cosine_warmup(workload.step_hint, hyperparameters)
opt_init_fn, opt_update_fn = optax.adamw(
learning_rate=lr_schedule_fn,
b1=1.0 - hyperparameters.one_minus_beta1,
b2=hyperparameters.beta2,
eps=1e-8,
weight_decay=hyperparameters.weight_decay)
# Create SAM update fn.
grad_clip = (
hyperparameters.grad_clip
if hasattr(hyperparameters, 'grad_clip') else None)
opt_init_fn, opt_update_fn = sharpness_aware_minimization(
rho=hyperparameters.rho,
grad_clip=grad_clip,
batch_axis_name='batch',
base_opt_init_fn=opt_init_fn,
base_opt_update_fn=opt_update_fn)
# Initialize optimizer state.
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, None, 0, 0, 0, 0, 0, None, None),
static_broadcasted_argnums=(0, 1),
donate_argnums=(2, 3, 4))
def pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
batch,
rng,
grad_clip,
label_smoothing):
def _loss_fn(params, update_batch_norm=True):
"""Loss function used for training."""
logits, new_model_state = workload.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.TRAIN,
rng,
update_batch_norm=update_batch_norm)
loss_dict = workload.loss_fn(
label_batch=batch['targets'],
logits_batch=logits,
mask_batch=batch.get('weights'),
label_smoothing=label_smoothing)
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
return summed_loss, (n_valid_examples, new_model_state)
grad_fn = jax.value_and_grad(_loss_fn, has_aux=True)
second_grad_fn = jax.value_and_grad(
functools.partial(_loss_fn, update_batch_norm=False), has_aux=True)
(summed_loss, (n_valid_examples, new_model_state)), grad = grad_fn(
current_param_container)
# Get correct global mean loss and grad.
(summed_loss, n_valid_examples, grad) = lax.psum(
(summed_loss, n_valid_examples, grad), axis_name='batch')
loss = summed_loss / n_valid_examples
grad = jax.tree_map(lambda x: x / n_valid_examples, grad)
grad_norm = jnp.sqrt(
sum(jnp.sum(g**2) for g in jax.tree_util.tree_leaves(grad)))
updates, new_optimizer_state = opt_update_fn(
grad, optimizer_state, (second_grad_fn, current_param_container))
updated_params = optax.apply_updates(current_param_container, updates)
return new_optimizer_state, updated_params, new_model_state, loss, grad_norm
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
optimizer_state, opt_update_fn = optimizer_state
per_device_rngs = jax.random.split(rng, jax.local_device_count())
if hasattr(hyperparameters, 'label_smoothing'):
label_smoothing = hyperparameters.label_smoothing
else:
label_smoothing = 0.0
if hasattr(hyperparameters, 'grad_clip'):
grad_clip = hyperparameters.grad_clip
else:
grad_clip = None
outputs = pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
batch,
per_device_rngs,
grad_clip,
label_smoothing)
new_optimizer_state, new_params, new_model_state, loss, grad_norm = outputs
# Log loss, grad_norm.
if global_step % 100 == 0 and workload.metrics_logger is not None:
workload.metrics_logger.append_scalar_metrics(
{
'loss': loss[0],
'grad_norm': grad_norm[0],
}, global_step)
return (new_optimizer_state, opt_update_fn), new_params, new_model_state
def get_batch_size(workload_name):
# Return the global batch size.
if workload_name == 'criteo1tb':
return 262_144
elif workload_name == 'fastmri':
return 32
elif workload_name == 'imagenet_resnet':
return 1024
elif workload_name == 'imagenet_vit':
return 1024
elif workload_name == 'librispeech_conformer':
return 256
elif workload_name == 'librispeech_deepspeech':
return 256
elif workload_name == 'ogbg':
return 512
elif workload_name == 'wmt':
return 128
elif workload_name == 'mnist':
return 16
else:
raise ValueError(f'Unsupported workload name: {workload_name}.')
def data_selection(
workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Tuple[spec.Tensor, spec.Tensor, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
batch = next(input_queue)
return batch
|
"""Submission file for a SGD with HeavyBall momentum optimizer in PyTorch."""
from typing import Callable, Dict, Iterator, List, Tuple
from absl import logging
import optax
import torch
import torch.distributed.nn as dist_nn
from torch.optim.lr_scheduler import LambdaLR
from algorithmic_efficiency import spec
from algorithmic_efficiency.pytorch_utils import pytorch_setup
USE_PYTORCH_DDP = pytorch_setup()[0]
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates a Nesterov optimizer and a learning rate schedule."""
del model_state
del rng
# Create optimizer.
optimizer_state = {
'optimizer':
torch.optim.SGD(
model_params.parameters(),
lr=hyperparameters.learning_rate,
momentum=1.0 - hyperparameters.one_minus_beta1,
weight_decay=hyperparameters.weight_decay,
nesterov=False),
}
# Create learning rate schedule.
lr_schedule_fn = create_lr_schedule_fn(workload.step_hint, hyperparameters)
# PyTorch's LambdaLR expects the lr_lambda fn to return a factor which will
# be multiplied with the base lr, so we have to divide by it here.
def _lr_lambda(step: int) -> float:
return lr_schedule_fn(step).item() / hyperparameters.learning_rate
optimizer_state['scheduler'] = LambdaLR(
optimizer_state['optimizer'], lr_lambda=_lr_lambda)
return optimizer_state
def create_lr_schedule_fn(
step_hint: int,
hyperparameters: spec.Hyperparameters) -> Callable[[int], float]:
warmup_steps = int(hyperparameters.warmup_factor * step_hint)
warmup_fn = optax.linear_schedule(
init_value=0.,
end_value=hyperparameters.learning_rate,
transition_steps=warmup_steps)
decay_steps = step_hint - warmup_steps
polynomial_schedule_fn = optax.polynomial_schedule(
init_value=hyperparameters.learning_rate,
end_value=hyperparameters.learning_rate * hyperparameters.end_factor,
power=1,
transition_steps=int(decay_steps * hyperparameters.decay_steps_factor))
lr_schedule_fn = optax.join_schedules(
schedules=[warmup_fn, polynomial_schedule_fn], boundaries=[warmup_steps])
return lr_schedule_fn
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
current_model = current_param_container
current_model.train()
optimizer_state['optimizer'].zero_grad()
logits_batch, new_model_state = workload.model_fn(
params=current_model,
augmented_and_preprocessed_input_batch=batch,
model_state=model_state,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
update_batch_norm=True)
label_smoothing = (
hyperparameters.label_smoothing if hasattr(hyperparameters,
'label_smoothing') else 0.0)
if hasattr(hyperparameters, 'grad_clip'):
grad_clip = hyperparameters.grad_clip
else:
grad_clip = None
loss_dict = workload.loss_fn(
label_batch=batch['targets'],
logits_batch=logits_batch,
mask_batch=batch.get('weights'),
label_smoothing=label_smoothing)
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
if USE_PYTORCH_DDP:
# Use dist_nn.all_reduce to ensure correct loss and gradient scaling.
summed_loss = dist_nn.all_reduce(summed_loss)
n_valid_examples = dist_nn.all_reduce(n_valid_examples)
loss = summed_loss / n_valid_examples
loss.backward()
if grad_clip is not None:
torch.nn.utils.clip_grad_norm_(
current_model.parameters(), max_norm=grad_clip)
optimizer_state['optimizer'].step()
optimizer_state['scheduler'].step()
# Log training metrics - loss, grad_norm, batch_size.
if global_step <= 100 or global_step % 500 == 0:
with torch.no_grad():
parameters = [p for p in current_model.parameters() if p.grad is not None]
grad_norm = torch.norm(
torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2)
if workload.metrics_logger is not None:
workload.metrics_logger.append_scalar_metrics(
{
'loss': loss.item(),
'grad_norm': grad_norm.item(),
}, global_step)
logging.info('%d) loss = %0.3f, grad_norm = %0.3f',
global_step,
loss.item(),
grad_norm.item())
return (optimizer_state, current_param_container, new_model_state)
def get_batch_size(workload_name):
# Return the global batch size.
if workload_name == 'criteo1tb':
return 262_144
elif workload_name == 'fastmri':
return 32
elif workload_name == 'imagenet_resnet':
return 1024
elif workload_name == 'imagenet_vit':
return 1024
elif workload_name == 'librispeech_conformer':
return 256
elif workload_name == 'librispeech_deepspeech':
return 256
elif workload_name == 'ogbg':
return 512
elif workload_name == 'wmt':
return 128
elif workload_name == 'mnist':
return 16
else:
raise ValueError(f'Unsupported workload name: {workload_name}.')
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
batch = next(input_queue)
return batch
|
"""Submission file for a SGD with HeavyBall momentum optimizer in Jax."""
import functools
from typing import Callable, Dict, Iterator, List, Tuple
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
_GRAD_CLIP_EPS = 1e-6
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates a Nesterov optimizer and a learning rate schedule."""
del model_params
del model_state
del rng
# Create learning rate schedule.
lr_schedule_fn = create_lr_schedule_fn(workload.step_hint, hyperparameters)
# Create optimizer.
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
opt_init_fn, opt_update_fn = sgd(
learning_rate=lr_schedule_fn,
weight_decay=hyperparameters.weight_decay,
momentum=1.0 - hyperparameters.one_minus_beta1,
nesterov=False)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
def create_lr_schedule_fn(
step_hint: int,
hyperparameters: spec.Hyperparameters) -> Callable[[int], float]:
warmup_steps = int(hyperparameters.warmup_factor * step_hint)
warmup_fn = optax.linear_schedule(
init_value=0.,
end_value=hyperparameters.learning_rate,
transition_steps=warmup_steps)
decay_steps = step_hint - warmup_steps
polynomial_schedule_fn = optax.polynomial_schedule(
init_value=hyperparameters.learning_rate,
end_value=hyperparameters.learning_rate * hyperparameters.end_factor,
power=1,
transition_steps=int(decay_steps * hyperparameters.decay_steps_factor))
lr_schedule_fn = optax.join_schedules(
schedules=[warmup_fn, polynomial_schedule_fn], boundaries=[warmup_steps])
return lr_schedule_fn
# Forked from github.com/google/init2winit/blob/master/init2winit/ (cont. below)
# optimizer_lib/optimizers.py.
def sgd(learning_rate, weight_decay, momentum=None, nesterov=False):
r"""A customizable gradient descent optimizer.
NOTE: We apply weight decay **before** computing the momentum update.
This is equivalent to applying WD after for heavy-ball momentum,
but slightly different when using Nesterov acceleration. This is the same as
how the Flax optimizers handle weight decay
https://flax.readthedocs.io/en/latest/_modules/flax/optim/momentum.html.
Args:
learning_rate: The learning rate. Expected as the positive learning rate,
for example `\alpha` in `w -= \alpha * u` (as opposed to `\alpha`).
weight_decay: The weight decay hyperparameter.
momentum: The momentum hyperparameter.
nesterov: Whether or not to use Nesterov momentum.
Returns:
An optax gradient transformation that applies weight decay and then one of a
{SGD, Momentum, Nesterov} update.
"""
return optax.chain(
optax.add_decayed_weights(weight_decay),
optax.sgd(
learning_rate=learning_rate, momentum=momentum, nesterov=nesterov))
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, None, 0, 0, 0, 0, 0, None, None),
static_broadcasted_argnums=(0, 1),
donate_argnums=(2, 3, 4))
def pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
batch,
rng,
grad_clip,
label_smoothing):
def _loss_fn(params):
"""Loss function used for training."""
logits, new_model_state = workload.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.TRAIN,
rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(
label_batch=batch['targets'],
logits_batch=logits,
mask_batch=batch.get('weights'),
label_smoothing=label_smoothing)
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
return summed_loss, (n_valid_examples, new_model_state)
grad_fn = jax.value_and_grad(_loss_fn, has_aux=True)
(summed_loss, (n_valid_examples, new_model_state)), grad = grad_fn(
current_param_container)
# Get correct global mean loss and grad.
(summed_loss, n_valid_examples, grad) = lax.psum(
(summed_loss, n_valid_examples, grad), axis_name='batch')
loss = summed_loss / n_valid_examples
grad = jax.tree_map(lambda x: x / n_valid_examples, grad)
grad_norm = jnp.sqrt(
sum(jnp.sum(g**2) for g in jax.tree_util.tree_leaves(grad)))
if grad_clip is not None:
grad_scaling_factor = grad_clip / (grad_norm + _GRAD_CLIP_EPS)
grad_scaling_factor = jax.lax.clamp(min=0.0, x=grad_scaling_factor, max=1.0)
grad = jax.tree_map(lambda x: x * grad_scaling_factor, grad)
updates, new_optimizer_state = opt_update_fn(grad, optimizer_state,
current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_optimizer_state, updated_params, new_model_state, loss, grad_norm
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
optimizer_state, opt_update_fn = optimizer_state
per_device_rngs = jax.random.split(rng, jax.local_device_count())
if hasattr(hyperparameters, 'label_smoothing'):
label_smoothing = hyperparameters.label_smoothing
else:
label_smoothing = 0.0
if hasattr(hyperparameters, 'grad_clip'):
grad_clip = hyperparameters.grad_clip
else:
grad_clip = None
outputs = pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
batch,
per_device_rngs,
grad_clip,
label_smoothing)
new_optimizer_state, new_params, new_model_state, loss, grad_norm = outputs
# Log loss, grad_norm.
if global_step % 100 == 0 and workload.metrics_logger is not None:
workload.metrics_logger.append_scalar_metrics(
{
'loss': loss[0],
'grad_norm': grad_norm[0],
}, global_step)
return (new_optimizer_state, opt_update_fn), new_params, new_model_state
def get_batch_size(workload_name):
# Return the global batch size.
if workload_name == 'criteo1tb':
return 262_144
elif workload_name == 'fastmri':
return 32
elif workload_name == 'imagenet_resnet':
return 1024
elif workload_name == 'imagenet_vit':
return 1024
elif workload_name == 'librispeech_conformer':
return 256
elif workload_name == 'librispeech_deepspeech':
return 256
elif workload_name == 'ogbg':
return 512
elif workload_name == 'wmt':
return 128
elif workload_name == 'mnist':
return 16
else:
raise ValueError(f'Unsupported workload name: {workload_name}.')
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
batch = next(input_queue)
return batch
|
"""Submission file for a SGD with Nesterov momentum optimizer in PyTorch."""
from typing import Callable, Dict, Iterator, List, Tuple
from absl import logging
import optax
import torch
import torch.distributed.nn as dist_nn
from torch.optim.lr_scheduler import LambdaLR
from algorithmic_efficiency import spec
from algorithmic_efficiency.pytorch_utils import pytorch_setup
USE_PYTORCH_DDP = pytorch_setup()[0]
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates a Nesterov optimizer and a learning rate schedule."""
del model_state
del rng
# Create optimizer.
optimizer_state = {
'optimizer':
torch.optim.SGD(
model_params.parameters(),
lr=hyperparameters.learning_rate,
momentum=1.0 - hyperparameters.one_minus_beta1,
weight_decay=hyperparameters.weight_decay,
nesterov=True),
}
# Create learning rate schedule.
lr_schedule_fn = create_lr_schedule_fn(workload.step_hint, hyperparameters)
# PyTorch's LambdaLR expects the lr_lambda fn to return a factor which will
# be multiplied with the base lr, so we have to divide by it here.
def _lr_lambda(step: int) -> float:
return lr_schedule_fn(step).item() / hyperparameters.learning_rate
optimizer_state['scheduler'] = LambdaLR(
optimizer_state['optimizer'], lr_lambda=_lr_lambda)
return optimizer_state
def create_lr_schedule_fn(
step_hint: int,
hyperparameters: spec.Hyperparameters) -> Callable[[int], float]:
warmup_steps = int(hyperparameters.warmup_factor * step_hint)
warmup_fn = optax.linear_schedule(
init_value=0.,
end_value=hyperparameters.learning_rate,
transition_steps=warmup_steps)
decay_steps = step_hint - warmup_steps
polynomial_schedule_fn = optax.polynomial_schedule(
init_value=hyperparameters.learning_rate,
end_value=hyperparameters.learning_rate * hyperparameters.end_factor,
power=1,
transition_steps=int(decay_steps * hyperparameters.decay_steps_factor))
lr_schedule_fn = optax.join_schedules(
schedules=[warmup_fn, polynomial_schedule_fn], boundaries=[warmup_steps])
return lr_schedule_fn
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
current_model = current_param_container
current_model.train()
optimizer_state['optimizer'].zero_grad()
logits_batch, new_model_state = workload.model_fn(
params=current_model,
augmented_and_preprocessed_input_batch=batch,
model_state=model_state,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
update_batch_norm=True)
label_smoothing = (
hyperparameters.label_smoothing if hasattr(hyperparameters,
'label_smoothing') else 0.0)
if hasattr(hyperparameters, 'grad_clip'):
grad_clip = hyperparameters.grad_clip
else:
grad_clip = None
loss_dict = workload.loss_fn(
label_batch=batch['targets'],
logits_batch=logits_batch,
mask_batch=batch.get('weights'),
label_smoothing=label_smoothing)
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
if USE_PYTORCH_DDP:
# Use dist_nn.all_reduce to ensure correct loss and gradient scaling.
summed_loss = dist_nn.all_reduce(summed_loss)
n_valid_examples = dist_nn.all_reduce(n_valid_examples)
loss = summed_loss / n_valid_examples
loss.backward()
if grad_clip is not None:
torch.nn.utils.clip_grad_norm_(
current_model.parameters(), max_norm=grad_clip)
optimizer_state['optimizer'].step()
optimizer_state['scheduler'].step()
# Log training metrics - loss, grad_norm, batch_size.
if global_step <= 100 or global_step % 500 == 0:
with torch.no_grad():
parameters = [p for p in current_model.parameters() if p.grad is not None]
grad_norm = torch.norm(
torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2)
if workload.metrics_logger is not None:
workload.metrics_logger.append_scalar_metrics(
{
'loss': loss.item(),
'grad_norm': grad_norm.item(),
}, global_step)
logging.info('%d) loss = %0.3f, grad_norm = %0.3f',
global_step,
loss.item(),
grad_norm.item())
return (optimizer_state, current_param_container, new_model_state)
def get_batch_size(workload_name):
# Return the global batch size.
if workload_name == 'criteo1tb':
return 262_144
elif workload_name == 'fastmri':
return 32
elif workload_name == 'imagenet_resnet':
return 1024
elif workload_name == 'imagenet_vit':
return 1024
elif workload_name == 'librispeech_conformer':
return 256
elif workload_name == 'librispeech_deepspeech':
return 256
elif workload_name == 'ogbg':
return 512
elif workload_name == 'wmt':
return 128
elif workload_name == 'mnist':
return 16
else:
raise ValueError(f'Unsupported workload name: {workload_name}.')
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
batch = next(input_queue)
return batch
|
"""Submission file for a SGD with Nesterov momentum optimizer in Jax."""
import functools
from typing import Callable, Dict, Iterator, List, Tuple
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
_GRAD_CLIP_EPS = 1e-6
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates a Nesterov optimizer and a learning rate schedule."""
del model_params
del model_state
del rng
# Create learning rate schedule.
lr_schedule_fn = create_lr_schedule_fn(workload.step_hint, hyperparameters)
# Create optimizer.
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
opt_init_fn, opt_update_fn = sgd(
learning_rate=lr_schedule_fn,
weight_decay=hyperparameters.weight_decay,
momentum=1.0 - hyperparameters.one_minus_beta1,
nesterov=True)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
def create_lr_schedule_fn(
step_hint: int,
hyperparameters: spec.Hyperparameters) -> Callable[[int], float]:
warmup_steps = int(hyperparameters.warmup_factor * step_hint)
warmup_fn = optax.linear_schedule(
init_value=0.,
end_value=hyperparameters.learning_rate,
transition_steps=warmup_steps)
decay_steps = step_hint - warmup_steps
polynomial_schedule_fn = optax.polynomial_schedule(
init_value=hyperparameters.learning_rate,
end_value=hyperparameters.learning_rate * hyperparameters.end_factor,
power=1,
transition_steps=int(decay_steps * hyperparameters.decay_steps_factor))
lr_schedule_fn = optax.join_schedules(
schedules=[warmup_fn, polynomial_schedule_fn], boundaries=[warmup_steps])
return lr_schedule_fn
# Forked from github.com/google/init2winit/blob/master/init2winit/ (cont. below)
# optimizer_lib/optimizers.py.
def sgd(learning_rate, weight_decay, momentum=None, nesterov=False):
r"""A customizable gradient descent optimizer.
NOTE: We apply weight decay **before** computing the momentum update.
This is equivalent to applying WD after for heavy-ball momentum,
but slightly different when using Nesterov acceleration. This is the same as
how the Flax optimizers handle weight decay
https://flax.readthedocs.io/en/latest/_modules/flax/optim/momentum.html.
Args:
learning_rate: The learning rate. Expected as the positive learning rate,
for example `\alpha` in `w -= \alpha * u` (as opposed to `\alpha`).
weight_decay: The weight decay hyperparameter.
momentum: The momentum hyperparameter.
nesterov: Whether or not to use Nesterov momentum.
Returns:
An optax gradient transformation that applies weight decay and then one of a
{SGD, Momentum, Nesterov} update.
"""
return optax.chain(
optax.add_decayed_weights(weight_decay),
optax.sgd(
learning_rate=learning_rate, momentum=momentum, nesterov=nesterov))
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, None, 0, 0, 0, 0, 0, None, None),
static_broadcasted_argnums=(0, 1),
donate_argnums=(2, 3, 4))
def pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
batch,
rng,
grad_clip,
label_smoothing):
def _loss_fn(params):
"""Loss function used for training."""
logits, new_model_state = workload.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.TRAIN,
rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(
label_batch=batch['targets'],
logits_batch=logits,
mask_batch=batch.get('weights'),
label_smoothing=label_smoothing)
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
return summed_loss, (n_valid_examples, new_model_state)
grad_fn = jax.value_and_grad(_loss_fn, has_aux=True)
(summed_loss, (n_valid_examples, new_model_state)), grad = grad_fn(
current_param_container)
# Get correct global mean loss and grad.
(summed_loss, n_valid_examples, grad) = lax.psum(
(summed_loss, n_valid_examples, grad), axis_name='batch')
loss = summed_loss / n_valid_examples
grad = jax.tree_map(lambda x: x / n_valid_examples, grad)
grad_norm = jnp.sqrt(
sum(jnp.sum(g**2) for g in jax.tree_util.tree_leaves(grad)))
if grad_clip is not None:
grad_scaling_factor = grad_clip / (grad_norm + _GRAD_CLIP_EPS)
grad_scaling_factor = jax.lax.clamp(min=0.0, x=grad_scaling_factor, max=1.0)
grad = jax.tree_map(lambda x: x * grad_scaling_factor, grad)
updates, new_optimizer_state = opt_update_fn(grad, optimizer_state,
current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_optimizer_state, updated_params, new_model_state, loss, grad_norm
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
optimizer_state, opt_update_fn = optimizer_state
per_device_rngs = jax.random.split(rng, jax.local_device_count())
if hasattr(hyperparameters, 'label_smoothing'):
label_smoothing = hyperparameters.label_smoothing
else:
label_smoothing = 0.0
if hasattr(hyperparameters, 'grad_clip'):
grad_clip = hyperparameters.grad_clip
else:
grad_clip = None
outputs = pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
batch,
per_device_rngs,
grad_clip,
label_smoothing)
new_optimizer_state, new_params, new_model_state, loss, grad_norm = outputs
# Log loss, grad_norm.
if global_step % 100 == 0 and workload.metrics_logger is not None:
workload.metrics_logger.append_scalar_metrics(
{
'loss': loss[0],
'grad_norm': grad_norm[0],
}, global_step)
return (new_optimizer_state, opt_update_fn), new_params, new_model_state
def get_batch_size(workload_name):
# Return the global batch size.
if workload_name == 'criteo1tb':
return 262_144
elif workload_name == 'fastmri':
return 32
elif workload_name == 'imagenet_resnet':
return 1024
elif workload_name == 'imagenet_vit':
return 1024
elif workload_name == 'librispeech_conformer':
return 256
elif workload_name == 'librispeech_deepspeech':
return 256
elif workload_name == 'ogbg':
return 512
elif workload_name == 'wmt':
return 128
elif workload_name == 'mnist':
return 16
else:
raise ValueError(f'Unsupported workload name: {workload_name}.')
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
batch = next(input_queue)
return batch
|
"""Submission file for a Shampoo optimizer with warmup+cosine LR in Jax."""
import functools
from typing import Dict, Iterator, List, Tuple
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
from baselines.shampoo.jax.distributed_shampoo import distributed_shampoo
_GRAD_CLIP_EPS = 1e-6
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates a Shampoo optimizer and a learning rate schedule."""
del model_params
del model_state
del rng
def jax_cosine_warmup(step_hint: int, hyperparameters):
# Create learning rate schedule.
warmup_steps = int(hyperparameters.warmup_factor * step_hint)
warmup_fn = optax.linear_schedule(
init_value=0.,
end_value=hyperparameters.learning_rate,
transition_steps=warmup_steps)
cosine_steps = max(step_hint - warmup_steps, 1)
cosine_fn = optax.cosine_decay_schedule(
init_value=hyperparameters.learning_rate, decay_steps=cosine_steps)
schedule_fn = optax.join_schedules(
schedules=[warmup_fn, cosine_fn], boundaries=[warmup_steps])
return schedule_fn
# Create optimizer + LR schedule.
lr_schedule_fn = jax_cosine_warmup(workload.step_hint, hyperparameters)
opt_init_fn, opt_update_fn = distributed_shampoo(
learning_rate=lr_schedule_fn,
beta1=1.0 - hyperparameters.one_minus_beta1,
beta2=hyperparameters.beta2,
weight_decay=hyperparameters.weight_decay,
batch_axis_name='batch',
eigh=False)
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, None, 0, 0, 0, 0, 0, None, None),
static_broadcasted_argnums=(0, 1),
donate_argnums=(2, 3, 4))
def pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
batch,
rng,
grad_clip,
label_smoothing):
def _loss_fn(params):
"""Loss function used for training."""
logits, new_model_state = workload.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.TRAIN,
rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(
label_batch=batch['targets'],
logits_batch=logits,
mask_batch=batch.get('weights'),
label_smoothing=label_smoothing)
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
return summed_loss, (n_valid_examples, new_model_state)
grad_fn = jax.value_and_grad(_loss_fn, has_aux=True)
(summed_loss, (n_valid_examples, new_model_state)), grad = grad_fn(
current_param_container)
# Get correct global mean loss and grad.
(summed_loss, n_valid_examples, grad) = lax.psum(
(summed_loss, n_valid_examples, grad), axis_name='batch')
loss = summed_loss / n_valid_examples
grad = jax.tree_map(lambda x: x / n_valid_examples, grad)
grad_norm = jnp.sqrt(
sum(jnp.sum(g**2) for g in jax.tree_util.tree_leaves(grad)))
if grad_clip is not None:
grad_scaling_factor = grad_clip / (grad_norm + _GRAD_CLIP_EPS)
grad_scaling_factor = jax.lax.clamp(min=0.0, x=grad_scaling_factor, max=1.0)
grad = jax.tree_map(lambda x: x * grad_scaling_factor, grad)
updates, new_optimizer_state = opt_update_fn(grad, optimizer_state,
current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_optimizer_state, updated_params, new_model_state, loss, grad_norm
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
optimizer_state, opt_update_fn = optimizer_state
per_device_rngs = jax.random.split(rng, jax.local_device_count())
if hasattr(hyperparameters, 'label_smoothing'):
label_smoothing = hyperparameters.label_smoothing
else:
label_smoothing = 0.0
if hasattr(hyperparameters, 'grad_clip'):
grad_clip = hyperparameters.grad_clip
else:
grad_clip = None
outputs = pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
batch,
per_device_rngs,
grad_clip,
label_smoothing)
new_optimizer_state, new_params, new_model_state, loss, grad_norm = outputs
# Log loss, grad_norm.
if global_step % 100 == 0 and workload.metrics_logger is not None:
workload.metrics_logger.append_scalar_metrics(
{
'loss': loss[0],
'grad_norm': grad_norm[0],
}, global_step)
return (new_optimizer_state, opt_update_fn), new_params, new_model_state
def get_batch_size(workload_name):
# Return the global batch size.
if workload_name == 'criteo1tb':
return 262_144
elif workload_name == 'fastmri':
return 32
elif workload_name == 'imagenet_resnet':
return 1024
elif workload_name == 'imagenet_vit':
return 1024
elif workload_name == 'librispeech_conformer':
return 256
elif workload_name == 'librispeech_deepspeech':
return 256
elif workload_name == 'ogbg':
return 512
elif workload_name == 'wmt':
return 128
elif workload_name == 'mnist':
return 16
else:
raise ValueError(f'Unsupported workload name: {workload_name}.')
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
batch = next(input_queue)
return batch
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# An implementation of distributed Shampoo optimizer from:
#
# Scalable Second Order Optimization for Deep Learning
# Rohan Anil, Vineet Gupta, Tomer Koren, Kevin Regan, Yoram Singer
# Preprint Paper: https://arxiv.org/abs/2002.09018
#
# This implementation moves computation of inverse pth root back to the
# accelerator (if higher precision is available).
#
# Authors: Rohan Anil (rohananil at google dot com)
# Vineet Gupta (vineet at google dot com)
# James Lottes (jlottes at google dot com)
# Anudhyan Boral (anudhyan at google dot com)
#
# Forked with minor modifications from:
# github.com/google-research/google-research/blob/master/scalable_shampoo/ (...)
# optax/distributed_shampoo.py
"""Distributed Shampoo Implementation."""
import enum
import functools
import itertools
import logging
from typing import Any, cast, List, NamedTuple, Optional, TypeVar, Union
import chex
from flax import struct
import jax
from jax import lax
from jax.experimental import pjit
from jax.experimental.sparse import linalg
import jax.numpy as jnp
import numpy as np
import optax
# Dtype for inverse-pth root routine
# Switch to f64 if you have hardware that supports it. Enable the jax flag
# jax_enable_x64 for this to work, otherwise it will default to float32.
_MAT_INV_PTH_ROOT_DTYPE = jnp.float64 # pylint: disable=invalid-name
# Small epsilon to avoid divide by zero.
_EPSILON = 1e-25
# pylint:disable=no-value-for-parameter
@struct.dataclass
class QuantizedValue:
"""State associated with quantized value."""
quantized: chex.Array
diagonal: chex.Array # Diagonal (if extract_diagonal is set)
bucket_size: chex.Array
quantized_dtype: jnp.dtype = struct.field(
pytree_node=False) # Dtype for the quantized value.
extract_diagonal: bool = struct.field(
pytree_node=False) # In case its centered.
shape: Any = struct.field(pytree_node=False) # Shape of the tensor.
@classmethod
def from_float_value(cls, fvalue, quantized_dtype, extract_diagonal=False):
if isinstance(fvalue, list) and not fvalue:
return QuantizedValue([], [], [], quantized_dtype, extract_diagonal, [])
quantized, diagonal_fvalue, bucket_size = QuantizedValue.quantize(
fvalue, quantized_dtype, extract_diagonal)
return QuantizedValue(quantized,
diagonal_fvalue,
bucket_size,
quantized_dtype,
extract_diagonal,
list(quantized.shape))
# Quantization is from Lingvo JAX optimizers.
# We extend it for int16 quantization of PSD matrices.
@classmethod
def quantize(cls, fvalue, quantized_dtype, extract_diagonal=False):
"""Returns quantized value and the bucket."""
if quantized_dtype == jnp.float32:
return fvalue, [], []
elif quantized_dtype == jnp.bfloat16:
return fvalue.astype(jnp.bfloat16), [], []
float_dtype = fvalue.dtype
if quantized_dtype == jnp.int8:
# value -128 is not used.
num_buckets = jnp.array(127.0, dtype=float_dtype)
elif quantized_dtype == jnp.int16:
# value -32768 is not used.
num_buckets = jnp.array(32767.0, dtype=float_dtype)
else:
raise ValueError(f'Quantized dtype {quantized_dtype} not supported.')
# max value is mapped to num_buckets
if extract_diagonal and fvalue.ndim != 2:
raise ValueError(
f'Input array {fvalue} must be 2D to work with extract_diagonal.')
diagonal_fvalue = []
if extract_diagonal:
diagonal_fvalue = jnp.diag(fvalue)
# Remove the diagonal entries.
fvalue = fvalue - jnp.diag(diagonal_fvalue)
# TODO(rohananil): Extend this by making use of information about the blocks
# SM3 style which will be useful for diagonal statistics
# We first decide the scale.
if fvalue.ndim < 1:
raise ValueError(
f'Input array {fvalue} must have a strictly positive number of '
'dimensions.')
max_abs = jnp.max(jnp.abs(fvalue), axis=0)
bucket_size = max_abs / num_buckets
bs_expanded = bucket_size[jnp.newaxis, Ellipsis]
# To avoid divide by 0.0
bs_nonzero = jnp.where(bs_expanded > 0.0,
bs_expanded,
jnp.ones_like(bs_expanded))
ratio = fvalue / bs_nonzero
# We use rounding to remove bias.
quantized = jnp.round(ratio)
return quantized.astype(quantized_dtype), diagonal_fvalue, bucket_size
def to_float(self):
"""Returns the float value."""
if isinstance(self.quantized, list) and not self.quantized:
return self.quantized
if self.quantized_dtype == jnp.float32:
return self.quantized
if self.quantized_dtype == jnp.bfloat16:
return self.quantized.astype(jnp.float32)
float_dtype = self.bucket_size.dtype
bucket_size = self.bucket_size[jnp.newaxis, Ellipsis]
val = self.quantized.astype(float_dtype) * bucket_size
if self.extract_diagonal:
val += jnp.diag(self.diagonal)
return val
def _default_zero_field():
return struct.field(
default_factory=functools.partial(jnp.array, 0, jnp.float32))
T = TypeVar("T")
def _maybe_ix(ls, ix):
"""Return ls[ix] if not None else None."""
if ls is None:
return None
return ls[ix]
def _maybe(f):
"""Lifts f to Maybe monad; ie return None if first arg is."""
def wrap_f(x, *args, **kwargs):
if x is None:
return None
return f(x, *args, **kwargs)
return wrap_f
InversePthRootDiagnosticsSubtype = TypeVar(
"InversePthRootDiagnosticsSubtype", bound="InversePthRootDiagnostics")
@struct.dataclass
class InversePthRootDiagnostics:
"""Diagnostics for inverse p-th root iterative procedure.
Given an inverse pth root B = A^(-1/p), contains the average and
maximum diagonal and off diagonal absolute entrywise errors between
(B^p A) and I.
"""
max_diag_error: chex.Array = _default_zero_field()
avg_diag_error: chex.Array = _default_zero_field()
max_off_diag_error: chex.Array = _default_zero_field()
avg_off_diag_error: chex.Array = _default_zero_field()
p: chex.Array = _default_zero_field()
@classmethod
def create(cls, pth_inverse_root, matrix, p):
"""Generates a diagnostics struct from (-1/p) root result."""
mat_m = jnp.matmul(
mat_power(pth_inverse_root, p),
matrix,
precision=jax.lax.Precision.HIGHEST)
num_off_diag_entries = mat_m.size - jnp.diag(mat_m).size
diag_error = jnp.abs(jnp.diag(mat_m) - 1).astype(jnp.float32)
off_diag_error = jnp.abs(mat_m - jnp.diag(jnp.diag(mat_m))).astype(
jnp.float32)
return cls(
max_diag_error=jnp.max(diag_error).astype(jnp.float32),
avg_diag_error=jnp.mean(diag_error).astype(jnp.float32),
max_off_diag_error=jnp.max(off_diag_error).astype(jnp.float32),
avg_off_diag_error=(jnp.sum(off_diag_error) /
num_off_diag_entries).astype(jnp.float32),
p=jnp.array(p, jnp.float32))
LOBPCGDiagnosticsSubtype = TypeVar(
"LOBPCGDiagnosticsSubtype", bound="LOBPCGDiagnostics")
@struct.dataclass
class LOBPCGDiagnostics:
"""Diagnostics for iterative LOBPCG eigenvalue routine.
Contains consistency error for LOBPCG eigenvalue routine, which
refers to |A v - lambda v| / (lambda + |A v|) for a proposed eigenpair
(v, lambda). This metics dataclass retains consistency error
and other useful LOBPCG values.
"""
lobpcg_iters: chex.Array = _default_zero_field()
max_consistency_error: chex.Array = _default_zero_field()
avg_consistency_error: chex.Array = _default_zero_field()
# Average of absolute value of off-diagonal of V^T V for eigenvalues V.
avg_orthogonality_error: chex.Array = _default_zero_field()
max_eigenvalue: chex.Array = _default_zero_field()
min_eigenvalue: chex.Array = _default_zero_field()
num_topk_eigenvectors: chex.Array = _default_zero_field()
@classmethod
def create(cls, matrix, eigvals, eigvecs, lobpcg_iters):
"""Generates LOBPCG diagnostics from the result of the routine."""
num_topk = len(eigvals)
num_off_diag = num_topk * (num_topk - 1)
precision = jax.lax.Precision.HIGHEST
mat_eigvecs = matrix.dot(eigvecs, precision=precision)
consistency_error_unnormalized = jnp.linalg.norm(
mat_eigvecs - eigvals * eigvecs, ord=2, axis=0)
normalization = jnp.linalg.norm(mat_eigvecs, ord=2, axis=0) + eigvals
consistency_error = consistency_error_unnormalized / normalization
orthogonality_error = eigvecs.T.dot(eigvecs, precision=precision)
orthogonality_error -= jnp.diag(jnp.diag(orthogonality_error))
return cls(
lobpcg_iters=jnp.array(lobpcg_iters, jnp.float32),
max_consistency_error=jnp.max(consistency_error).astype(jnp.float32),
avg_consistency_error=jnp.mean(consistency_error).astype(jnp.float32),
avg_orthogonality_error=(jnp.sum(orthogonality_error) /
num_off_diag).astype(jnp.float32),
max_eigenvalue=jnp.max(eigvals).astype(jnp.float32),
min_eigenvalue=jnp.min(eigvals).astype(jnp.float32),
num_topk_eigenvectors=jnp.array(num_topk, jnp.float32),
)
@struct.dataclass
class TrainingMetrics:
"""Diagnostic metrics from training."""
# Error for inverse-pth roots.
inverse_pth_root_errors: chex.Array = _default_zero_field()
# Iteration count for inverse-pth roots.
inverse_pth_root_iters: chex.Array = _default_zero_field()
# If final iteration error increases sufficiently, iteration terminates early.
# This field records the ratio of the final iteration error.
final_error_ratio: chex.Array = _default_zero_field()
# Max eigen value from either the power iteration or from LOBPCG.
max_eigen_value: chex.Array = _default_zero_field()
# Total retries of inverse pth root iterative method.
total_retries: chex.Array = _default_zero_field()
lobpcg_diagnostics: LOBPCGDiagnostics = struct.field(
default_factory=LOBPCGDiagnostics)
# Rich matrix entrywise error diagnostics, if enabled.
inverse_pth_root_diagnostics: InversePthRootDiagnostics = struct.field(
default_factory=InversePthRootDiagnostics)
# Diagnostics applied to the conditioned p-th root problem, after top
# eigenvectors are removed, if LOBPCG is being applied.
conditioned_inverse_pth_root_diagnostics: InversePthRootDiagnostics = (
struct.field(default_factory=InversePthRootDiagnostics))
# TODO(rohananil): Add more important metrics to track during training.
# Per parameter optimizer state used in data-parallel training.
class ParameterStats(NamedTuple):
"""State associated to each parameter of the model being trained."""
diagonal_statistics: QuantizedValue # Accumulator for diagonal preconditioner
statistics: Optional[List[Any]] # Statistics (QuantizedValue, chex.Array)
preconditioners: List[Any] # Preconditioners (QuantizedValue, chex.Array)
diagonal_momentum: QuantizedValue # Momentum for the diagonal preconditioner
momentum: QuantizedValue # Momentum for the shampoo preconditioner
training_metrics: Union[TrainingMetrics, optax.MaskedNode] # Optional.
# For training extremely large model; We keep a global state with a concatenated
# statistics and preconditioner states for all vars. This is so that we can
# annotate the leading axis to be sharded to save memory at the cost of
# communication.
@struct.dataclass
class GlobalShardedParameterStats:
statistics: chex.Array # Statistics
preconditioners: chex.Array # Preconditioners
exponents: chex.Array # exponents
# These are per-parameter local states; All statistics here mirror the parameter
# Thus the sharding is copied over from the param specification.
@struct.dataclass
class LocalShardedParameterStats:
"""State associated to each parameter of the model being trained."""
diagonal_statistics: QuantizedValue # Accumulator for diagonal preconditioner
diagonal_momentum: QuantizedValue # Momentum for the diagonal preconditioner
momentum: QuantizedValue # Momentum for the shampoo preconditioner
training_metrics: Union[TrainingMetrics, optax.MaskedNode]
index_start: Union[np.int32, int] = struct.field(
pytree_node=False) # Index into global statistics array
sizes: Any = struct.field(pytree_node=False) # Sizes of the statistics.
def default_training_metrics():
"""Create a default TrainingMetrics."""
return TrainingMetrics()
def init_training_metrics(
num_statistics,
generate_training_metrics,
):
"""Initialize TrainingMetrics, masked if disabled."""
if not generate_training_metrics:
return optax.MaskedNode()
return jax.tree_map(
functools.partial(jnp.repeat, repeats=num_statistics),
default_training_metrics())
def init_training_metrics_shapes(
num_statistics,
generate_training_metrics,
):
"""Initialize training metrics shape/dtype."""
seed = init_training_metrics(
num_statistics,
generate_training_metrics,
)
return jax.tree_map(lambda arr: [list(arr.shape), arr.dtype], seed)
def init_training_metrics_pspec(generate_training_metrics,):
"""Initialize training metrics partition specification."""
if not generate_training_metrics:
return optax.MaskedNode()
return jax.tree_map(lambda _: jax.sharding.PartitionSpec(),
default_training_metrics())
class ShardedShampooStats(NamedTuple):
"""Shampoo state in sharded mode."""
global_stats: Any
local_stats: Any
class ShampooState(NamedTuple):
count: chex.Array
stats: Any
class InitFnState(NamedTuple):
init_fn: Any
pspec_fn: Any
shape_and_dtype_fn: Any
class GraftingType(enum.IntEnum):
NONE = 0
SGD = 1
ADAGRAD = 2
RMSPROP = 3
RMSPROP_NORMALIZED = 4
SQRT_N = 5
ADAGRAD_NORMALIZED = 6
class PreconditionerType(enum.IntEnum):
# Default, computes preconditioner for each dim
ALL = 1
# One sided Shampoo, in this cases only on input dim.
# Assumes last dim is always the output dim and everything else input dim.
INPUT = 2
# One sided Shampoo, in this cases only on output dim.
# Assumes last dim is always the output dim and everything else input dim.
OUTPUT = 3
def power_iteration(
matrix,
num_iters=100,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
padding_start=None,
):
r"""Power iteration algorithm.
The power iteration algorithm takes a symmetric PSD matrix `A`, and produces
a scalar `\lambda` , which is the greatest (in absolute value) eigenvalue
of `A`, and a vector v, which is the corresponding eigenvector of `A`.
References:
[Wikipedia, 2021](https://en.wikipedia.org/wiki/Power_iteration)
Args:
matrix: the symmetric PSD matrix.
num_iters: Number of iterations.
error_tolerance: Iterative exit condition.
precision: precision XLA related flag, the available options are: a)
lax.Precision.DEFAULT (better step time, but not precise) b)
lax.Precision.HIGH (increased precision, slower) c)
lax.Precision.HIGHEST (best possible precision, slowest)
padding_start: if set, assumes rows and columns after padding_start are
zero.
Returns:
eigen vector, eigen value
"""
matrix_size = matrix.shape[-1]
def _iter_condition(state):
i, unused_v, unused_s, unused_s_v, run_step = state
return jnp.logical_and(i < num_iters, run_step)
def _iter_body(state):
"""One step of power iteration."""
i, new_v, s, s_v, unused_run_step = state
new_v = new_v / jnp.linalg.norm(new_v)
s_v = jnp.einsum("ij,j->i", matrix, new_v, precision=precision)
s_new = jnp.einsum("i,i->", new_v, s_v, precision=precision)
return (i + 1,
s_v,
s_new,
s_v,
jnp.greater(jnp.abs(s_new - s), error_tolerance))
# Figure out how to use step as seed for random.
v_0 = np.random.RandomState(1729).uniform(-1.0, 1.0,
matrix_size).astype(matrix.dtype)
v_0 = jnp.array(v_0)
if padding_start is not None:
v_0 *= (jnp.arange(len(v_0), dtype=jnp.int32) < padding_start)
init_state = tuple([0, v_0, jnp.zeros([], dtype=matrix.dtype), v_0, True])
_, v_out, s_out, _, _ = lax.while_loop(_iter_condition, _iter_body,
init_state)
v_out = v_out / jnp.linalg.norm(v_out)
return v_out, s_out
def mat_power(
mat_m,
p,
precision=lax.Precision.HIGHEST,
):
"""A simple matrix power method. M^p where p can be TracedValue."""
power = jnp.eye(mat_m.shape[0], dtype=_MAT_INV_PTH_ROOT_DTYPE)
def _iter_condition(state):
i, _, _ = state
return i > 0
def _iter_body(state):
i, power, mat = state
power = jax.lax.cond(i % 2 == 1,
lambda: jnp.matmul(mat, power, precision=precision),
lambda: power)
i //= 2
mat = jnp.matmul(mat, mat, precision=precision)
return i, power, mat
_, result, _ = lax.while_loop(_iter_condition, _iter_body, (p, power, mat_m))
return result
def _pth_root_difference(w, alpha, beta, p):
"""Computes (w+alpha)^(-1/p)-(w+beta)^(-1/p)."""
a = w + alpha
b = w + beta
a_minus_b = alpha - beta
exp = -1 / p
def _stable_subtract(b, a_minus_b):
# Mathematically identical to the target expression, with (w+beta)^(-1/p)
# term factored out and w cancellation in the subtraction.
return (b**exp) * jnp.expm1(exp * jnp.log1p(a_minus_b / b))
return jnp.where(
# Choose the branch with the best log1p approximation.
jnp.abs(a_minus_b / b) < jnp.abs(a_minus_b / a),
-_stable_subtract(a, -a_minus_b),
_stable_subtract(b, a_minus_b))
def matrix_inverse_pth_root(
matrix,
p,
num_iters=100,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
relative_matrix_epsilon=True,
lobpcg_topk_precondition=0,
lobpcg_max_iter=0,
padding_start=None,
prev=None,
eigh=False,
):
"""Computes `matrix^(-1/p)`, where `p` is a positive integer.
This function uses the Eigh or Coupled newton iterations algorithm for
the computation of a matrix's inverse pth root.
References:
[Functions of Matrices, Theory and Computation,
Nicholas J Higham, Pg 184, Eq 7.18](
https://epubs.siam.org/doi/book/10.1137/1.9780898717778)
Args:
matrix: the symmetric PSD matrix whose power it to be computed
p: exponent, for p a positive integer.
num_iters: Maximum number of iterations.
ridge_epsilon: Ridge epsilon added to make the matrix positive definite.
error_tolerance: Error indicator, useful for early termination.
precision: precision XLA related flag, the available options are: a)
lax.Precision.DEFAULT (better step time, but not precise) b)
lax.Precision.HIGH (increased precision, slower) c)
lax.Precision.HIGHEST (best possible precision, slowest)
relative_matrix_epsilon: Whether to use relative epsilon to the max eigen
value when computing inverse-pth root.
lobpcg_topk_precondition: If nonzero, specifies the number of top
eigenvectors to subtract out before performing LOBPCG. Note this makes
relative_matrix_epsilon essentially free.
lobpcg_max_iter: Maximum iteration count for LOBPCG, defaults to
`lobpcg_topk_precondition`.
padding_start: If the input matrix was padded, then zeros out columns and
rows at the padding start.
prev: previous iteration's solution, zero-padded (unused)
eigh: If True, uses eigh for inverse-pth root computation.
Returns:
`(matrix + eps)^(-1/p)` and error metrics.
Note `eps` is not added to zeroed out padding rows and
columns. `eps` is just `ridge_epsilon` if
`relative_matrix_epsilon` is set to `False`, otherwise, it is the
ridge epsilon value scaled by the derived maximum eigenvalue of
the input matrix.
"""
if eigh:
return matrix_inverse_pth_root_eigh(matrix,
p,
ridge_epsilon,
error_tolerance,
precision,
relative_matrix_epsilon,
padding_start,
prev)
del prev
assert matrix.shape[0] == matrix.shape[1]
# We use _MAT_INV_PTH_ROOT_DTYPE for the matrix inverse pth root.
# Switch to f64 if you have hardware that supports it. Enable the jax flag
# jax_enable_x64 for this to work.
matrix_size = matrix.shape[0]
orig_dtype = matrix.dtype
matrix = matrix.astype(_MAT_INV_PTH_ROOT_DTYPE)
alpha = jnp.asarray(-1.0 / p, _MAT_INV_PTH_ROOT_DTYPE)
identity = jnp.eye(matrix_size, dtype=_MAT_INV_PTH_ROOT_DTYPE)
if padding_start is not None:
# Zero out padding in identity as well for convergence checks.
ix = (jnp.arange(matrix_size, dtype=jnp.int32) < padding_start).astype(
matrix.dtype)
matrix *= ix[jnp.newaxis, :]
matrix *= ix[:, jnp.newaxis]
identity *= ix
original_matrix = matrix
# Only used in lobpcg branches, but required by pytype.
eigvals, eigvecs, lobpcg_diagnostics = None, None, None
if lobpcg_topk_precondition > 0:
# TODO(vladf): reuse previous top-k as the initial search directions
pad_shape = (matrix_size - lobpcg_topk_precondition,
lobpcg_topk_precondition)
search_dirs = jnp.concatenate(
(jnp.eye(lobpcg_topk_precondition), jnp.zeros(pad_shape)), axis=0)
eigvals, eigvecs, lobpcg_iters = linalg.lobpcg_standard( # pylint: disable=unbalanced-tuple-unpacking
matrix, search_dirs,
lobpcg_topk_precondition if lobpcg_max_iter == 0 else lobpcg_max_iter)
lobpcg_diagnostics = LOBPCGDiagnostics.create(
matrix,
eigvals,
eigvecs,
lobpcg_iters,
)
# The minimal eigenvalue among top-k becomes the maximal one in the whole
# matrix after deflation.
deflation = eigvals - jnp.min(eigvals)
scaled_vecs = eigvecs * jnp.sqrt(deflation)
# Deflate out top eigenvectors to reduce matrix condition number.
matrix -= scaled_vecs.dot(
scaled_vecs.T, precision=jax.lax.Precision.HIGHEST)
if relative_matrix_epsilon:
if eigvals is not None:
max_ev = jnp.max(eigvals)
else:
# Only use power iteration if lobpcg wasn't already used to derive the
# top eigenvalue.
_, max_ev = power_iteration(
matrix=matrix,
num_iters=100,
error_tolerance=1e-6,
precision=precision,
padding_start=padding_start)
else:
# Use absolute matrix epsilon scaling otherwise.
max_ev = 1.0
ridge_epsilon = ridge_epsilon * jnp.maximum(max_ev, _EPSILON)
# Sometimes error increases after an iteration before decreasing and
# converging. 1.2 factor is used to bound the maximal allowed increase.
max_error_ratio = 1.2
def _iter_condition(state):
i, unused_mat_m, unused_mat_h, unused_old_mat_h, error, error_ratio = state
error_above_threshold = jnp.logical_and(error > error_tolerance,
error_ratio < max_error_ratio)
return jnp.logical_and(i < num_iters, error_above_threshold)
def _iter_body(state):
(i, mat_m, mat_h, unused_old_mat_h, error, unused_error_ratio) = state
mat_m_i = (1 - alpha) * identity + alpha * mat_m
new_mat_m = jnp.matmul(mat_power(mat_m_i, p), mat_m, precision=precision)
new_mat_h = jnp.matmul(mat_h, mat_m_i, precision=precision)
new_error = jnp.max(jnp.abs(new_mat_m - identity))
return (i + 1, new_mat_m, new_mat_h, mat_h, new_error, new_error / error)
if matrix_size == 1:
damped_matrix = matrix + ridge_epsilon
resultant_mat_h = damped_matrix**alpha
error = jnp.array(0, jnp.float32)
iters = 0
error_ratio = 0.0
else:
retry_loop_error_threshold = 0.05
num_tries = 6
init_outer_state = tuple([0, identity, 1000.0, 100, 1.0, True])
def _outer_iter_condition_fn(state):
i, _, _, _, _, iter_failed = state
return jnp.logical_and(iter_failed, i < num_tries)
def _outer_body_fn(state):
i, _, _, _, _, _ = state
# Update the epsilon based on the loop iteration.
damped_matrix = matrix + (ridge_epsilon * (10**i) * identity)
z = (1 + p) / (2 * jnp.linalg.norm(damped_matrix))
new_mat_m_0 = damped_matrix * z
new_error = jnp.max(jnp.abs(new_mat_m_0 - identity))
new_mat_h_0 = identity * jnp.power(z, 1.0 / p)
init_state = tuple(
[0, new_mat_m_0, new_mat_h_0, new_mat_h_0, new_error, 1.0])
iters, mat_m, mat_h, old_mat_h, error, error_ratio = lax.while_loop(
_iter_condition, _iter_body, init_state)
error = jnp.max(jnp.abs(mat_m - identity)).astype(jnp.float32)
is_converged = jnp.asarray(error_ratio < max_error_ratio, old_mat_h.dtype)
resultant_mat_h = is_converged * \
mat_h + (1 - is_converged) * old_mat_h
return (i + 1,
resultant_mat_h,
error,
iters,
error_ratio,
error > retry_loop_error_threshold)
loop_outputs = jax.lax.while_loop(_outer_iter_condition_fn,
_outer_body_fn,
init_outer_state)
total_retries, resultant_mat_h, error, iters, error_ratio, _ = loop_outputs
conditioned_resultant_mat = resultant_mat_h
if lobpcg_topk_precondition > 0:
# Since we deflated the top eigenvectors prior to p-th root inverse,
# the resultant matrix has larger eigenvalues associated with those
# same eigenvectors, which we need to now re-deflate.
#
# Note that _pth_root_difference returns positive values for this
# particular argument ordering as min(eigvals) <= eigvals for the
# jnp.sqrt below.
pth_diff = _pth_root_difference(ridge_epsilon, jnp.min(eigvals), eigvals, p)
scaled_vecs = eigvecs * jnp.sqrt(pth_diff)
resultant_mat_h = conditioned_resultant_mat - scaled_vecs.dot(
scaled_vecs.T, precision=jax.lax.Precision.HIGHEST)
error_metrics = TrainingMetrics(
inverse_pth_root_errors=jnp.array(error, jnp.float32),
inverse_pth_root_iters=jnp.array(iters, jnp.float32),
final_error_ratio=jnp.array(error_ratio, jnp.float32),
max_eigen_value=jnp.array(max_ev, jnp.float32),
total_retries=jnp.array(total_retries, jnp.float32))
if lobpcg_topk_precondition > 0:
damped_matrix = matrix + \
(ridge_epsilon * (10**total_retries) * identity)
conditioned_diagnostics = InversePthRootDiagnostics.create(
conditioned_resultant_mat, damped_matrix, p)
unconditioned_damped_matrix = original_matrix + ridge_epsilon * identity
unconditioned_diagnostics = InversePthRootDiagnostics.create(
resultant_mat_h, unconditioned_damped_matrix, p)
# The max entrywise error in error_metrics.inverse_pth_root_errors refers
# to what was derived from the inverse pth root iteration, which with
# LOBPCG refers to the conditioned problem. Make sure to use the error
# from the unconditioned problem.
unconditional_errors = jnp.maximum(
unconditioned_diagnostics.max_diag_error,
unconditioned_diagnostics.max_off_diag_error)
error_metrics = error_metrics.replace(
inverse_pth_root_errors=unconditional_errors,
lobpcg_diagnostics=lobpcg_diagnostics,
conditioned_inverse_pth_root_diagnostics=conditioned_diagnostics,
inverse_pth_root_diagnostics=unconditioned_diagnostics,
)
if padding_start is not None:
# Occasionally, pure-padding matrices are handed to the inversion routine
# due to some TPU hosts not having the same number of preconditioning
# matrices.
resultant_mat_h = jnp.where(padding_start == 0, 0.0, resultant_mat_h)
error = jnp.where(padding_start == 0,
0.0,
error_metrics.inverse_pth_root_errors)
error_metrics = error_metrics.replace(inverse_pth_root_errors=error)
resultant_mat_h = jnp.asarray(resultant_mat_h, orig_dtype)
return resultant_mat_h, error_metrics
def matrix_inverse_pth_root_eigh(
matrix,
p,
ridge_epsilon=1e-6,
error_tolerance=1e-6,
precision=lax.Precision.HIGHEST,
relative_matrix_epsilon=True,
padding_start=None,
prev=None,
):
"""Computes `matrix^(-1/p)`, where `p` is a positive integer.
This function uses eigh for the computation of a matrix's inverse pth
root.
Args:
matrix: the symmetric PSD matrix whose power it to be computed
p: exponent, for p a positive integer.
ridge_epsilon: Ridge epsilon added to make the matrix positive definite.
error_tolerance: Error indicator, useful for early termination.
precision: precision XLA related flag, the available options are: a)
lax.Precision.DEFAULT (better step time, but not precise) b)
lax.Precision.HIGH (increased precision, slower) c)
lax.Precision.HIGHEST (best possible precision, slowest)
relative_matrix_epsilon: Whether to use relative epsilon to the max eigen
value when computing inverse-pth root.
padding_start: If the input matrix was padded, then zeros out columns and
rows at the padding start.
prev: previous iteration's solution, zero-padded (unused)
Returns:
`(matrix + eps)^(-1/p)` and error metrics.
Note `eps` is not added to zeroed out padding rows and
columns. `eps` is just `ridge_epsilon` if
`relative_matrix_epsilon` is set to `False`, otherwise, it is the
ridge epsilon value scaled by the derived maximum eigenvalue of
the input matrix.
"""
del prev
assert matrix.shape[0] == matrix.shape[1]
matrix_size = matrix.shape[0]
orig_dtype = matrix.dtype
matrix = matrix.astype(_MAT_INV_PTH_ROOT_DTYPE)
alpha = jnp.asarray(-1.0 / p, _MAT_INV_PTH_ROOT_DTYPE)
identity = jnp.eye(matrix_size, dtype=_MAT_INV_PTH_ROOT_DTYPE)
if padding_start is not None:
ix = (jnp.arange(matrix_size, dtype=jnp.int32) < padding_start).astype(
matrix.dtype)
matrix *= ix[jnp.newaxis, :]
matrix *= ix[:, jnp.newaxis]
identity *= ix
if relative_matrix_epsilon:
_, max_ev = power_iteration(
matrix=matrix,
num_iters=100,
error_tolerance=error_tolerance,
precision=precision,
padding_start=padding_start)
else:
# Use absolute matrix epsilon scaling otherwise.
max_ev = 1.0
ridge_epsilon = ridge_epsilon * jnp.maximum(max_ev, error_tolerance)
regularized_input = matrix + ridge_epsilon * identity
e, u = jnp.linalg.eigh(regularized_input)
# Due to padding, we may have to zero out eigenvalues.
if padding_start is not None:
e *= jnp.flip(ix)
mm = functools.partial(jnp.matmul, precision=precision)
inv_e = jnp.where(e == 0.0,
0.0,
jnp.power(jnp.maximum(e, ridge_epsilon), alpha))
val = mm(mm(u, jnp.diag(inv_e)), u.T)
root = u * jnp.sqrt(inv_e)
val = mm(root, root.T)
recovered_e = mm(u.T, mm(regularized_input, u))
eig_error = recovered_e - jnp.diag(e)
if padding_start is not None:
eig_error *= jnp.flip(ix)
error = jnp.max(jnp.abs(eig_error))
error_metrics = TrainingMetrics(
inverse_pth_root_errors=jnp.array(error, jnp.float32))
if padding_start is not None:
val = jnp.where(padding_start == 0, 0.0, val)
error = jnp.where(padding_start == 0,
0.0,
error_metrics.inverse_pth_root_errors)
error_metrics = error_metrics.replace(inverse_pth_root_errors=error)
val = jnp.asarray(val, orig_dtype)
return val, error_metrics
def merge_small_dims(shape_to_merge, max_dim):
"""Merge small dimensions.
If there are some small dimensions, we collapse them:
e.g. [1, 2, 512, 1, 2048, 1, 3, 4] --> [1024, 2048, 12] if max_dim = 1024
[1, 2, 768, 1, 2048] --> [2, 768, 2048]
Args:
shape_to_merge: Shape to merge small dimensions.
max_dim: Maximal dimension of output shape used in merging.
Returns:
Merged shape.
"""
if shape_to_merge and np.all(np.array(shape_to_merge) == 1):
return [1]
resulting_shape = []
product = 1
for d in shape_to_merge:
if product * d <= max_dim:
product *= d
else:
if product > 1:
resulting_shape.append(product)
product = d
if product > 1:
resulting_shape.append(product)
return resulting_shape
def pad_square_matrix(mat, max_size):
"""Pad a square matrix up to max_size.
Args:
mat: a matrix to pad.
max_size: matrix size requested.
Returns:
Given M returns [[M, 0], [0, I]]
"""
rows, cols = mat.shape
if rows != cols:
raise ValueError("Must have rows == cols, instead got "
f"rows={rows}, cols={cols}")
if cols > max_size:
raise ValueError("Must have cols <= max_size. Instead got "
f"cols={cols}, max_size={max_size}.")
if rows == max_size:
return mat
pad_size = max_size - rows
zs1 = jnp.zeros([rows, pad_size], dtype=mat.dtype)
zs2 = jnp.zeros([pad_size, rows], dtype=mat.dtype)
eye = jnp.eye(pad_size, dtype=mat.dtype)
mat = jnp.concatenate([mat, zs1], 1)
mat = jnp.concatenate([mat, jnp.concatenate([zs2, eye], 1)], 0)
return mat
def pad_vector(vec, max_size):
"""Pad a vector to a max_size.
Args:
vec: a vector to pad.
max_size: matrix size requested.
Returns:
Given V returns [V, 0]
"""
size = vec.shape[0]
assert size <= max_size
if size == max_size:
return vec
pad_size = max_size - size
zs1 = jnp.zeros([pad_size], dtype=vec.dtype)
return jnp.concatenate([vec, zs1], 0)
def efficient_cond(predicate, compute_fn, init_state, *args, **kwargs):
"""Avoids wasteful buffer allocation with XLA."""
def _iter_body(unused_state):
results = compute_fn(*args, **kwargs)
return tuple([False] + list(results))
def _iter_condition(state):
return state[0]
results = jax.lax.while_loop(_iter_condition,
_iter_body,
tuple([predicate] + init_state))
return tuple(results[1:])
class BlockPartitioner:
"""Partitions a tensor into smaller tensors."""
def __init__(self, param, block_size):
self._shape = param.shape
self._splits = []
split_sizes = []
# We split params into smaller blocks. Here we store the metadata to make
# that split.
for i, d in enumerate(param.shape):
if 0 < block_size < d:
# d-1, otherwise split appends a 0-size array.
nsplit = (d - 1) // block_size
indices = (np.arange(nsplit, dtype=np.int32) + 1) * block_size
sizes = np.ones(nsplit + 1, dtype=np.int32) * block_size
sizes[-1] = d - indices[-1]
self._splits.append((i, indices))
split_sizes.append(sizes)
else:
split_sizes.append(np.array([d], dtype=np.int32))
self._split_sizes = split_sizes
def split_sizes(self):
return self._split_sizes
def partition(self, tensor):
"""Partition tensor into blocks."""
assert tensor.shape == self._shape
tensors = [tensor]
for (i, indices) in self._splits:
tensors_local = []
for t in tensors:
tensors_local.extend(jnp.split(t, indices_or_sections=indices, axis=i))
tensors = tensors_local
return tensors
def merge_partitions(self, partitions):
"""Merge partitions back to original shape."""
for (i, indices) in reversed(self._splits):
n = len(indices) + 1
partial_merged_tensors = []
ind = 0
while ind < len(partitions):
partial_merged_tensors.append(
jnp.concatenate(partitions[ind:ind + n], axis=i))
ind += n
partitions = partial_merged_tensors
assert len(partitions) == 1
return partitions[0]
def gram_weighted_update(old_stats, g, axis, w1, w2, precision=None):
"""Updated statistics via weighted average with new Gram matrix.
Returns w₁ R + w₂ Gᵀ G where R is `old_stats` and G is the matrix whose
columns are the flattened slices of the tensor `g` along the given `axis`.
(So, `old_stats` and the returned matrix have dimensions n x n where
n = `g.shape[axis]`).
Args:
old_stats: Old statistics.
g: Gradient tensor.
axis: Axis along which to slice `g`.
w1: Scalar weight for old statistics.
w2: Scalar weight for new Gram matrix.
precision: Optional precision XLA related flag, the available options are:
a) lax.Precision.DEFAULT (better step time, but not precise) b)
lax.Precision.HIGH (increased precision, slower) c)
lax.Precision.HIGHEST (best possible precision, slowest)
Returns:
Weighted average of old and new statistics.
"""
axes = [i for i in range(g.ndim) if i != axis]
gram_matrix = jnp.tensordot(g, g, axes=(axes, axes), precision=precision)
return w1 * old_stats + w2 * gram_matrix
class Preconditioner:
"""Compute statistics/shape from gradients for preconditioning."""
def __init__(
self,
param,
block_size,
merge_small_dims_block_size,
best_effort_shape_interpretation,
preconditioner_type=PreconditionerType.ALL,
):
"""Initializes the preconditioner.
Args:
param: parameter to precondition.
block_size: Block size used to split param.
merge_small_dims_block_size: Block size for merging dims.
best_effort_shape_interpretation: Whether to
collapse/merge dims together.
preconditioner_type: Type of preconditioner to use.
"""
self._original_shape = param.shape
self._transformed_shape = param.shape
if best_effort_shape_interpretation:
self._transformed_shape = merge_small_dims(self._original_shape,
merge_small_dims_block_size)
reshaped_param = jnp.reshape(param, self._transformed_shape)
self._partitioner = BlockPartitioner(reshaped_param, block_size)
self._preconditioner_type = preconditioner_type
def updated_statistics_from_grad(
self,
stats,
grad,
w1,
w2,
to_float=None,
from_float=None,
precision=None,
):
"""Update statistics from gradients.
Args:
stats: Old statistics or its Cholesky factor if `cholesky` is True.
grad: Gradient to compute statistics from.
w1: Weight for old statistics.
w2: Weight for new statistics.
to_float: Optional function for converting stats to floating point.
from_float: Optional function for converting from floating point.
precision: Optional precision XLA related flag, the available options
are:
a) lax.Precision.DEFAULT (better step time, but not precise) b)
lax.Precision.HIGH (increased precision, slower) c)
lax.Precision.HIGHEST (best possible precision, slowest)
Returns:
A list of updated gradient statistics for each partition.
"""
to_float = to_float if to_float is not None else (lambda x: x)
from_float = from_float if from_float is not None else (lambda x: x)
reshaped_grad = jnp.reshape(grad, self._transformed_shape)
partitioned_grads = self._partitioner.partition(reshaped_grad)
should_preconditioned_dims = self.should_precondition_dims()
preconditioned_dims = [
i for i, p in enumerate(should_preconditioned_dims) if p
]
new_stats = []
index = 0
for g in partitioned_grads:
for axis in preconditioned_dims:
update = functools.partial(gram_weighted_update, precision=precision)
new_stat = update(to_float(stats[index]), g, axis, w1, w2)
new_stats.append(from_float(new_stat))
index += 1
return new_stats
def should_precondition_dims(self):
"""A vector containing indicator indicating if the dim is preconditioned."""
split_sizes = self._partitioner.split_sizes()
rank = len(split_sizes)
if self._preconditioner_type == PreconditionerType.ALL or rank <= 1:
return [True] * rank
elif self._preconditioner_type == PreconditionerType.INPUT:
return [True] * (rank - 1) + [False]
elif self._preconditioner_type == PreconditionerType.OUTPUT:
return [False] * (rank - 1) + [True]
def _preconditioner_shape(self, dim):
"""Returns possibly rank-compressed preconditioner shape."""
return [dim, dim]
def _preconds_for_grad(self, preconditioners, rank, start, end):
"""Returns a slice of preconditioners of length rank."""
preconditioners_for_grad = preconditioners[start:end]
if self._preconditioner_type == PreconditionerType.INPUT:
# When _preconditioner_type is INPUT, we append a None value to the end of
# the list to handle the False index.
preconditioners_for_grad = preconditioners_for_grad + [None]
elif self._preconditioner_type == PreconditionerType.OUTPUT:
# When _preconditioner_type is OUTPUT, we append (rank - 1) many None
# values to the beginning of the list to handle the False indices.
preconditioners_for_grad = [None] * \
(rank - 1) + preconditioners_for_grad
assert len(preconditioners_for_grad) == rank
return preconditioners_for_grad
def shapes_for_preconditioners(self):
"""Returns shape from statistics."""
split_sizes = self._partitioner.split_sizes()
rank = len(split_sizes)
# We ignore preconditioner types if rank == 1
preconditioner_shapes = []
for t in itertools.product(*split_sizes):
if self._preconditioner_type == PreconditionerType.ALL or rank <= 1:
preconditioner_shapes.extend(map(self._preconditioner_shape, t))
elif self._preconditioner_type == PreconditionerType.INPUT:
preconditioner_shapes.extend(map(self._preconditioner_shape, t[:-1]))
elif self._preconditioner_type == PreconditionerType.OUTPUT:
preconditioner_shapes.extend(map(self._preconditioner_shape, t[-1:]))
return preconditioner_shapes
def exponent_for_preconditioner(self):
"""Returns exponent to use for inverse-pth root M^{-1/p}."""
should_preconditioned_dims = self.should_precondition_dims()
num_preconditioners = sum(should_preconditioned_dims)
return 2 * num_preconditioners
def preconditioned_grad(self, grad, preconditioners):
"""Precondition the gradient.
Args:
grad: A gradient tensor to precondition.
preconditioners: A list of preconditioners to apply.
Returns:
A preconditioned gradient.
"""
reshaped_grad = jnp.reshape(grad, self._transformed_shape)
partitioned_grads = self._partitioner.partition(reshaped_grad)
should_preconditioned_dims = self.should_precondition_dims()
num_preconditioners = sum(should_preconditioned_dims)
preconditioned_partitioned_grads = []
for i, g in enumerate(partitioned_grads):
preconditioners_for_grad = self._preconds_for_grad(
preconditioners,
rank=len(should_preconditioned_dims),
start=i * num_preconditioners,
end=(i + 1) * num_preconditioners,
)
precond_g = self._precondition_block(g,
should_preconditioned_dims,
preconditioners_for_grad)
preconditioned_partitioned_grads.append(precond_g)
merged_grad = self._partitioner.merge_partitions(
preconditioned_partitioned_grads)
return jnp.reshape(merged_grad, self._original_shape)
def _precondition_block(self, g, should_precondition_dim, preconditioners):
"""Perform a preconditioning op on a single gradient block."""
for j, should_precondition in enumerate(should_precondition_dim):
# Loop invariant: the dimension to be preconditioned is first; we keep
# all axes in the same cyclic order they were originally.
# Case: skip preconditioning this dimension.
rank = len(g.shape)
roll = tuple(range(1, rank)) + (0,)
if not should_precondition:
g = jnp.transpose(g, axes=roll)
continue
# Case: full Shampoo matrix precondition this dimension
g = jnp.tensordot(g, preconditioners[j], axes=[[0], [0]])
return g
def _convert_to_parameter_stats(global_stats,
local_stat,
convert_statistics=True):
"""Creates parameter stats from sharded stats."""
index_start = int(local_stat.index_start)
index_end = int(len(local_stat.sizes)) + index_start
statistics = global_stats.statistics[index_start:index_end, :, :]
preconditioners = global_stats.preconditioners[index_start:index_end, :, :]
new_statistics = []
new_preconditioners = []
for i, size in enumerate(local_stat.sizes):
new_statistics.append(statistics[i][:size, :size])
pd = size
new_preconditioners.append(preconditioners[i][:size, :pd])
if not convert_statistics:
new_statistics = None
return ParameterStats(
local_stat.diagonal_statistics,
new_statistics,
new_preconditioners,
local_stat.diagonal_momentum,
local_stat.momentum,
local_stat.training_metrics,
)
def _convert_from_parameter_stats(parameter_stats, local_stats):
"""Creates sharded stats from paramter stats."""
return LocalShardedParameterStats(
parameter_stats.diagonal_statistics,
parameter_stats.diagonal_momentum,
parameter_stats.momentum,
parameter_stats.training_metrics,
local_stats.index_start,
local_stats.sizes,
)
def _add_metrics_into_local_stats(local_stats, metrics, keep_old):
"""Adds errors back into local statistics."""
new_local_stats = []
for local_stat in local_stats:
index_start = int(local_stat.index_start)
index_end = int(len(local_stat.sizes)) + index_start
# pylint:disable=cell-var-from-loop Used immediately.
per_stat_metrics = jax.tree_map(lambda x: x[index_start:index_end], metrics)
# We don't want to update the metrics if we didn't do a new inverse p-th
# root calculation to find a new preconditioner, so that TensorBoard curves
# look consistent (otherwise they'd oscillate between NaN and measured
# values).
per_stat_metrics = efficient_cond(keep_old,
lambda: [local_stat.training_metrics],
[per_stat_metrics])[0]
# pylint:enable=cell-var-from-loop
new_local_stats.append(
local_stat.replace(training_metrics=per_stat_metrics))
return new_local_stats
def batch(x, num_devices):
"""Batch `x` so that so that leading axis is num_devices."""
n = len(x)
b = int(n / num_devices)
return jnp.stack([jnp.stack(x[idx:idx + b]) for idx in range(0, n, b)])
def unbatch(batched_values):
"""Unbatch values across leading axis and return a list of elements."""
b1, b2 = batched_values.shape[0], batched_values.shape[1]
results = []
for v_array in jnp.split(batched_values, indices_or_sections=b1, axis=0):
v_array = jnp.squeeze(v_array)
# b2 = batches (number of preconditioner computation) per core.
if b2 > 1:
for v in jnp.split(v_array, indices_or_sections=b2, axis=0):
results.append(jnp.squeeze(v))
else:
results.append(v_array)
return results
def distributed_shampoo(
learning_rate,
block_size=1024,
beta1=0.9,
beta2=0.999,
diagonal_epsilon=1e-8,
matrix_epsilon=1e-6,
weight_decay=0.0,
start_preconditioning_step=101,
preconditioning_compute_steps=20,
statistics_compute_steps=1,
best_effort_shape_interpretation=True,
graft_type=GraftingType.RMSPROP_NORMALIZED,
nesterov=True,
exponent_override=0,
# Pass pmap 'batch axis name' in pmap mode.
batch_axis_name=None,
# Only set following 3 params in pjit/spmd mode.
# WARNING: Experimental
statistics_partition_spec=None,
preconditioner_partition_spec=None,
num_devices_for_pjit=None,
shard_optimizer_states=False,
###
# Experimental memory reduction mode
best_effort_memory_usage_reduction=True,
###
inverse_failure_threshold=0.1,
moving_average_for_momentum=True,
skip_preconditioning_dim_size_gt=0,
clip_by_scaled_gradient_norm=None,
precision=lax.Precision.HIGHEST,
tensordot_precision=None,
relative_matrix_epsilon=True,
merge_small_dims_block_size=4096,
lobpcg_topk_precondition=0,
lobpcg_max_iter=0,
precondtioner_type=PreconditionerType.ALL,
custom_preconditioner=False,
skip_preconditioning_rank_lt=1,
decoupled_learning_rate=True,
decoupled_weight_decay=False,
generate_training_metrics=True,
reuse_preconditioner=False,
eigh=True,
):
"""Distributed Shampoo optimizer.
Distributed Shampoo is a second-order preconditioned method (concretely, a
variant of full-matrix Adagrad), that provides significant convergence and
wall-clock time improvements compared to conventional first-order methods,
and that has been shown to scale to large state-of-the-art deep learning
models.
References:
Scalable Second Order Optimization for Deep Learning,
Rohan Anil, Vineet Gupta, Tomer Koren, Kevin Regan, Yoram Singer
Preprint: https://arxiv.org/abs/2002.09018
Args:
learning_rate: the step size used to update the parameters.
block_size: Block size for large layers (if > 0). Preconditioning compute
operation is cubic in the dimension of the tensor. Block size allows us
to chunk the layers into sub-layers of maximal dimension dictated by
this value. Use 128 as default (increase if you have compute budget).
beta1: momentum parameter.
beta2: second moment averaging parameter.
diagonal_epsilon: epsilon for diagonal adagrad (only if layerwise grafting
to AdaGrad is enabled).
matrix_epsilon: epsilon to add to statistics before computing inverse pth
root. If you are running in f32 precision for inverse pth root
(recommended today) this can go upto 1e-6. If you have latest hardware
with native f64 precision, set this upto 1e-12.
weight_decay: Weight decay for regularization.
start_preconditioning_step: When to start Shampoo update before which
diagonal update is used. This is because we dont have enough information
to do stable inverse.
preconditioning_compute_steps: How often to compute preconditioner.
Performance tuning params for controlling memory and compute
requirements.
Ideally set this and statistics_compute_steps params to 1.
statistics_compute_steps: How often to compute statistics.
best_effort_shape_interpretation: If there are some small dimensions,
collapse them e.g. [1, 2, 512, 1, 2048, 1, 3, 4] --> [1024, 2048, 12] if
block = 1024, [1, 2, 768, 1, 2048] --> [2, 768, 2048]
graft_type: Grafting is a technique to fix the layerwise scale of Shampoo
optimizer. This allows us to plugin the Shampoo optimizer into settings
where SGD/AdaGrad is already well tuned.
nesterov: Nesterov momentum.
exponent_override: Override the exponent used in matrix inverse.
batch_axis_name: labeled axis over pmap for data-parallel training the
optimizer used for.
statistics_partition_spec: PartitionSpec to be used in sharded mode.
preconditioner_partition_spec: PartitionSpec to be used in sharded mode.
num_devices_for_pjit: Number of devices to parallelize over when using
pjit.
shard_optimizer_states: Shard optimizer states to save memory in model
parallel training.
best_effort_memory_usage_reduction: Best effort memory usage reduction. -
diagonal_statistics -> jnp.bfloat16 - momentum buffers (2x)
-> jnp.int8 - statistics, preconditioners -> jnp.int16 + diagonals
inverse_failure_threshold: numerics are hard and inverses fail sometimes;
we determine that using this threshold.
moving_average_for_momentum: Whether to use moving average for momentum
instead of exponential moving average.
skip_preconditioning_dim_size_gt: Skip if preconditioning dim size is
greater than this value.
clip_by_scaled_gradient_norm: Clip by scaled gradient norm (only useful
when using RMSProp Grafting).
precision: precision XLA related flag, the available options are: a)
lax.Precision.DEFAULT (better step time, but not precise) b)
lax.Precision.HIGH (increased precision, slower) c)
lax.Precision.HIGHEST (best possible precision, slowest)
tensordot_precision: Optional precision to use for the tensordot operation
when computing statistics (e.g., G Gᵀ). Same options as `precision`
above.
relative_matrix_epsilon: Whether to use relative epsilon to the max eigen
value when computing inverse-pth root.
merge_small_dims_block_size: Used as the maximum block size to merge the
shapes.
lobpcg_topk_precondition: If nonzero, specifies the number of top
eigenvectors to subtract out before performing LOBPCG. Note this makes
relative_matrix_epsilon essentially free.
lobpcg_max_iter: Number of LOBPCG iterations, if zero defaults to
`lobpcg_topk_precondition`.
precondtioner_type: Preconditioner type to select all, left only or right
only preconditioners.
skip_preconditioning_rank_lt: Skips preconditioning for parameters with
rank less than this value.
decoupled_learning_rate: If True, use decoupled learning rate, otherwise
couple it with preconditioned gradient computation. (Default True)
decoupled_weight_decay: If True, use decoupled weight decay, otherwise
couple with weight decay. (Default False)
generate_training_metrics: If True, gather training metrics, otherwise
avoid generating them (to reduce memory usage).
reuse_preconditioner: If True, pass the previous derived preconditioner
as a warm start to the next iteratin's inverse pth root computation.
eigh: If True, and uses eigen decomposition for inverse-pth root.
Returns:
a GradientTransformation.
"""
reset_frequency = None
def _graft_type_has_diagonal_statistics():
"""Returns True if using diagonal firt order method for grafting."""
return graft_type not in [
GraftingType.SGD, GraftingType.SQRT_N, GraftingType.NONE
]
def quantized_dtype_for_momentum_buffers(var):
return jnp.int8 if best_effort_memory_usage_reduction and len(
var.shape) > 1 else jnp.float32
quantize_second_moment = (
best_effort_memory_usage_reduction and batch_axis_name)
# Preconditioner and statistics are both stores as int16 in this mode.
# We take out the diagonal to make quantization easier.
def quantized_dtype_for_second_moment_statistics_buffers():
return jnp.int16 if quantize_second_moment else jnp.float32
# Preconditioner and statistics are both stores as int16 in this mode.
# We take out the diagonal to make quantization easier.
def quantized_dtype_for_second_moment_preconditioner_buffers():
return jnp.int16 if quantize_second_moment else jnp.float32
# _quantized_matrix_inverse_pth_root_vmap implementation assumes
# that preconditioner is quantized if and only if stats is quantized.
qdt_precond = quantized_dtype_for_second_moment_preconditioner_buffers()
qdt_stat = quantized_dtype_for_second_moment_statistics_buffers()
assert qdt_precond == qdt_stat
def _to_float(maybe_quantized):
if isinstance(maybe_quantized, QuantizedValue):
return maybe_quantized.to_float()
else:
return maybe_quantized
def preconditioner_from_params(param):
"""Returns a Preconditioner object for given param."""
return Preconditioner(
param,
block_size,
merge_small_dims_block_size,
best_effort_shape_interpretation,
precondtioner_type,
)
def precond_dim(max_size):
"""Derives largest preconditioner dimension."""
return max_size
def pad_and_maybe_zero_preconditioners(preconditioners, total, max_size,
step):
"""Pad preconditioners up to total x max_size x precond_dim(max_size)."""
pd = precond_dim(max_size)
def maybe_reset_preconditioner(step, preconditioner):
if reset_frequency is None:
return preconditioner
return jnp.where(step % reset_frequency == 0, 0.0, 1.0) * preconditioner
def _pad_preconditioner(preconditioner):
assert preconditioner.ndim == 2
r, c = preconditioner.shape
assert r <= max_size
assert c <= pd
pad_rows = [(0, max_size - r)]
pad_cols = [(0, pd - c)]
padding = pad_rows + pad_cols
preconditioner = maybe_reset_preconditioner(step, preconditioner)
return jnp.pad(preconditioner, padding)
last_dims_padded = [_pad_preconditioner(p) for p in preconditioners]
dt = preconditioners[0].dtype if preconditioners else jnp.float32
num_extra = total - len(last_dims_padded)
extra = [jnp.zeros([max_size, pd], dtype=dt)] * num_extra
return last_dims_padded + extra
def sharded_init_fn(params):
"""Returns optimizer state (for PJIT mode).
Args:
params: the parameters that should be updated.
"""
params_flat, treedef = jax.tree_util.tree_flatten(params)
# Find max size to pad to.
max_size = 0
for param in params_flat:
preconditioner = preconditioner_from_params(param)
if not _skip_preconditioning(param):
shapes = preconditioner.shapes_for_preconditioners()
sizes = [s[0] for s in shapes]
max_size = max(*sizes, max_size)
padded_statistics = []
padded_preconditioners = []
local_stats_flat = []
exponents = []
for param in params_flat:
preconditioner = preconditioner_from_params(param)
shapes = preconditioner.shapes_for_preconditioners()
sizes = []
statistics = []
preconditioners = []
index_start = len(padded_statistics)
if not _skip_preconditioning(param):
sizes = [s[0] for s in shapes]
shapes = preconditioner.shapes_for_preconditioners()
statistics = [
matrix_epsilon * jnp.eye(max_size, dtype=jnp.float32)
for s in shapes
]
pd = precond_dim(max_size)
# If the preconditioner is using a low-rank representation, initialize
# it to zero instead of an invalid eye.
preconditioners = [
jnp.eye(max_size, pd, dtype=jnp.float32) * (pd == max_size)
for s in shapes
]
padded_statistics.extend(statistics)
padded_preconditioners.extend(preconditioners)
exponent = (
preconditioner.exponent_for_preconditioner()
if exponent_override == 0 else exponent_override)
exponents.extend([exponent] * len(shapes))
diagonal_statistics = jnp.zeros_like(param)
diagonal_momentum = jnp.zeros_like(param)
momentum = jnp.zeros_like(param)
local_stats_flat.append(
LocalShardedParameterStats(
diagonal_statistics,
diagonal_momentum,
momentum,
init_training_metrics(
len(sizes),
generate_training_metrics,
),
index_start,
sizes))
local_stats = jax.tree_util.tree_unflatten(treedef, local_stats_flat)
to_pad = -len(padded_statistics) % num_devices_for_pjit
if max_size == 0:
to_pad = num_devices_for_pjit
max_size = block_size
stat_dtype = jnp.float32
else:
stat_dtype = padded_statistics[0].dtype
# Pad the statistics and preconditioner matrices to be a multiple of
# num devices.
# TODO(rohananil): Relax to only the size of the mesh axis where the dim
# is split on.
padded_statistics.extend(
[jnp.eye(max_size, dtype=stat_dtype) for _ in range(to_pad)])
pd = precond_dim(max_size)
# If the preconditioner is using a low-rank representation, initialize
# it to zero instead of an invalid eye.
padded_preconditioners.extend([
jnp.eye(max_size, pd, dtype=stat_dtype) * (pd == max_size)
for _ in range(to_pad)
])
exponents.extend([1 for _ in range(to_pad)])
global_stats = GlobalShardedParameterStats(
jnp.stack(padded_statistics),
jnp.stack(padded_preconditioners),
jnp.stack(exponents))
return ShampooState(
count=jnp.zeros([], jnp.int32),
stats=ShardedShampooStats(global_stats, local_stats))
def _max_statistics_size_from_params(params):
max_size = 0
for param in params:
param_clone = jnp.zeros(param.shape, dtype=param.dtype)
preconditioner = preconditioner_from_params(param_clone)
if not _skip_preconditioning(param):
shapes = preconditioner.shapes_for_preconditioners()
sizes = [s[0] for s in shapes]
max_size = max(*sizes, max_size)
return max_size
def _remove_leading_sharding_annotation(pspec):
"""Mapping from N-d to (N-1)-d, used for quantization, factoring etc."""
# None and PSpec(None) are valid PSpecs.
if pspec and len(pspec) > 1:
return jax.sharding.PartitionSpec(*pspec[1:])
else:
return []
def sharded_init_partition_spec_fn(params,
params_partition_spec,
partition_spec_for_statistics):
"""Returns a parallel state tree with PartitionSpec associated with state.
Args:
params: A pytree with params.
params_partition_spec: A pytree with PartitionSpec for params.
partition_spec_for_statistics: PartitionSpec for the statistics.
"""
# Parallel lists of spec, and params.
param_pspec_flat, _ = jax.tree_util.tree_flatten(
params_partition_spec, is_leaf=lambda x: x is None)
params_flat, treedef = jax.tree_util.tree_flatten(params)
assert param_pspec_flat
assert params_flat
# Step is replicated across cores.
# None means cores.
local_stats_flat = []
num_statistics = 0
for param, param_pspec in zip(params_flat, param_pspec_flat):
param_clone = jnp.zeros(param.shape, dtype=param.dtype)
preconditioner = preconditioner_from_params(param_clone)
shapes = preconditioner.shapes_for_preconditioners()
sizes = []
index_start = num_statistics
if not _skip_preconditioning(param):
sizes = [s[0] for s in shapes]
shapes = preconditioner.shapes_for_preconditioners()
num_statistics += len(shapes)
qdtype = quantized_dtype_for_momentum_buffers(param)
m1_pspec = param_pspec
m2_pspec = param_pspec
m1_scale_pspec = []
m2_scale_pspec = []
if qdtype != jnp.float32:
m1_scale_pspec = _remove_leading_sharding_annotation(m1_pspec)
m2_scale_pspec = _remove_leading_sharding_annotation(m2_pspec)
local_stats_flat.append(
LocalShardedParameterStats(
QuantizedValue(
param_pspec,
[],
[],
jnp.float32,
False, # pytype: disable=wrong-arg-types # numpy-scalars
list(param.shape)),
QuantizedValue(
m1_pspec,
[],
m1_scale_pspec,
qdtype,
False, # pytype: disable=wrong-arg-types # numpy-scalars
list(param.shape)),
QuantizedValue(
m2_pspec,
[],
m2_scale_pspec,
qdtype,
False, # pytype: disable=wrong-arg-types # numpy-scalars
list(param.shape)),
init_training_metrics_pspec(generate_training_metrics,),
index_start,
sizes))
local_stats = jax.tree_util.tree_unflatten(treedef, local_stats_flat)
global_stats = GlobalShardedParameterStats(partition_spec_for_statistics,
partition_spec_for_statistics,
jax.sharding.PartitionSpec())
count_pspec = jax.sharding.PartitionSpec()
return ShampooState( # pytype: disable=wrong-arg-types # numpy-scalars
count=count_pspec,
stats=ShardedShampooStats(global_stats, local_stats))
def sharded_init_shape_and_dtype_fn(params):
"""Returns a parallel state tree with shape, dtype associated with state.
Args:
params: A pytree with params.
"""
# Parallel lists of spec, and params.
params_flat, treedef = jax.tree_util.tree_flatten(params)
assert params_flat
# Step is replicated across cores.
# None means cores.
local_stats_flat = []
num_statistics = 0
for param in params_flat:
param_clone = jnp.zeros(param.shape, dtype=param.dtype)
preconditioner = preconditioner_from_params(param_clone)
shapes = preconditioner.shapes_for_preconditioners()
sizes = []
index_start = num_statistics
if not _skip_preconditioning(param):
sizes = [s[0] for s in shapes]
shapes = preconditioner.shapes_for_preconditioners()
num_statistics += len(shapes)
qdtype = quantized_dtype_for_momentum_buffers(param)
m1_shape_and_dtype = [list(param.shape), param.dtype]
m2_shape_and_dtype = [list(param.shape), param.dtype]
m1_scale_shape_and_dtype = []
m2_scale_shape_and_dtype = []
if qdtype != jnp.float32:
m1_scale_shape_and_dtype = [list(param.shape)[1:], qdtype]
m2_scale_shape_and_dtype = [list(param.shape)[1:], qdtype]
diagonal_statistics_shape_and_dtype = [list(param.shape), param.dtype]
local_stats_flat.append(
LocalShardedParameterStats(
QuantizedValue(
diagonal_statistics_shape_and_dtype,
[],
[], # pytype: disable=wrong-arg-types # numpy-scalars
jnp.float32,
False,
list(param.shape)),
QuantizedValue(m1_shape_and_dtype, [],
m1_scale_shape_and_dtype,
qdtype,
False,
list(param.shape)),
QuantizedValue(m2_shape_and_dtype, [],
m2_scale_shape_and_dtype,
qdtype,
False,
list(param.shape)),
init_training_metrics_shapes(
len(sizes),
generate_training_metrics,
),
index_start,
sizes,
))
local_stats = jax.tree_util.tree_unflatten(treedef, local_stats_flat)
max_statistics_size = _max_statistics_size_from_params(params_flat)
to_pad = -num_statistics % num_devices_for_pjit
num_statistics += to_pad
if num_statistics == 0:
num_statistics = num_devices_for_pjit
max_statistics_size = block_size
statistics_shape = [
num_statistics, max_statistics_size, max_statistics_size
]
preconditioners_shape = [
num_statistics, max_statistics_size, precond_dim(max_statistics_size)
]
global_stats = GlobalShardedParameterStats(
[statistics_shape, jnp.float32], [preconditioners_shape, jnp.float32],
[[num_statistics], jnp.int32])
return ShampooState( # pytype: disable=wrong-arg-types # numpy-scalars
count=[[], jnp.float32],
stats=ShardedShampooStats(global_stats, local_stats))
def sharded_update_fn(grads, state, params):
"""Transform the input gradient and update all statistics in sharded mode.
Args:
grads: the gradient tensors for the parameters.
state: a named tuple containing the state of the optimizer
params: the parameters that should be updated.
Returns:
A tuple containing the new parameters and the new optimizer state.
"""
params_flat, treedef = jax.tree_util.tree_flatten(params)
grads_flat = treedef.flatten_up_to(grads)
global_stats = state.stats.global_stats
local_stats_flat = treedef.flatten_up_to(state.stats.local_stats)
stats_flat = []
for local_stat in local_stats_flat:
stats_flat.append(_convert_to_parameter_stats(
global_stats,
local_stat,
))
new_stats_flat = jax.tree_map(
lambda g,
s,
p: _compute_stats(g, s, p, state.count),
grads_flat,
stats_flat,
params_flat)
outputs = jax.tree_map(
lambda g,
s,
p: _transform_grad(g, s, p, state.count),
grads_flat,
new_stats_flat,
params_flat)
updates_flat, new_stats_flat = list(zip(*outputs)) if outputs else ((), ())
updates = jax.tree_util.tree_unflatten(treedef, updates_flat)
new_local_stats_flat = []
for new_stat, local_stat in zip(new_stats_flat, local_stats_flat):
new_local_stats_flat.append(
_convert_from_parameter_stats(
new_stat,
local_stat,
))
max_size = global_stats.statistics.shape[1]
new_padded_statistics = []
padding_starts = []
for stat in new_stats_flat:
new_padded_statistics.extend(
[pad_square_matrix(stat, max_size) for stat in stat.statistics])
padding_starts.extend([len(stat) for stat in stat.statistics])
# Create global stats
# TODO(rohananil): Preconditioner is not updated every step, so cost of
# stack/pad can be obviated away.
# Pad the statistics and preconditioner matrices to be a multiple of
# num devices.
# TODO(rohananil): Relax to only the size of the mesh axis where the dim
# is split on.
to_pad = -len(new_padded_statistics) % num_devices_for_pjit
if not new_padded_statistics:
to_pad = num_devices_for_pjit
stat_dtype = jnp.float32
else:
stat_dtype = new_padded_statistics[0].dtype
new_padded_statistics.extend(
[jnp.eye(max_size, dtype=stat_dtype) for _ in range(to_pad)])
padding_starts += [0] * to_pad
if reuse_preconditioner:
prev_preconditioners = []
for stat in new_stats_flat:
prev_preconditioners.extend(stat.preconditioners)
prev_padded_preconditioners = pad_and_maybe_zero_preconditioners(
prev_preconditioners,
len(new_padded_statistics),
max_size,
state.count)
else:
prev_padded_preconditioners = None
new_stacked_padded_statistics = jnp.stack(new_padded_statistics)
new_stacked_padded_statistics = pjit.with_sharding_constraint(
new_stacked_padded_statistics, statistics_partition_spec)
stacked_padding_starts = jnp.array(padding_starts, jnp.int32)
prev_stacked_padded_preconditioners = _maybe(jnp.stack)(
prev_padded_preconditioners)
prev_stacked_padded_preconditioners = _maybe(pjit.with_sharding_constraint)(
prev_padded_preconditioners, statistics_partition_spec)
def _internal_inverse_pth_root_all():
preconditioners, metrics = _matrix_inverse_pth_root_pjit(
new_stacked_padded_statistics,
global_stats.exponents,
stacked_padding_starts,
prev_stacked_padded_preconditioners,
statistics_partition_spec,
)
return preconditioners, metrics
perform_step = state.count % preconditioning_compute_steps == 0
if preconditioning_compute_steps == 1:
new_preconditioners, metrics = _internal_inverse_pth_root_all()
else:
# Passing statistics instead of preconditioners as they are similarly
# shaped tensors. Note statistics will be ignored as we are passing in
# a large error value.
pd = precond_dim(new_stacked_padded_statistics.shape[2])
preconditioners_init = new_stacked_padded_statistics[:, :, :pd]
n = new_stacked_padded_statistics.shape[0]
metrics_init = cast(
TrainingMetrics,
init_training_metrics(
n,
generate_training_metrics=True,
))
new_errors = jnp.ones_like(metrics_init.inverse_pth_root_errors) * (
inverse_failure_threshold)
metrics_init = metrics_init.replace(inverse_pth_root_errors=new_errors)
init_state = [preconditioners_init, metrics_init]
new_preconditioners, metrics = efficient_cond(
perform_step, _internal_inverse_pth_root_all, init_state)
if generate_training_metrics:
new_local_stats_flat = _add_metrics_into_local_stats(
new_local_stats_flat, metrics, ~perform_step)
new_local_stats = jax.tree_util.tree_unflatten(treedef,
new_local_stats_flat)
errors = metrics.inverse_pth_root_errors
errors = errors.reshape((-1, 1, 1))
predicate = jnp.logical_or(
jnp.isnan(errors),
errors >= inverse_failure_threshold).astype(new_preconditioners.dtype)
# TODO(rohananil): Check for numerical instabilities.
new_conditional_preconditioners = (
predicate * global_stats.preconditioners +
(1.0 - predicate) * new_preconditioners)
new_global_stats = GlobalShardedParameterStats(
new_stacked_padded_statistics,
new_conditional_preconditioners,
global_stats.exponents)
new_shampoo_state = ShampooState(
count=state.count + 1,
stats=ShardedShampooStats(new_global_stats, new_local_stats))
return updates, new_shampoo_state
def init_fn(params):
"""Initialise the optimiser's state."""
def _init(param):
preconditioner = preconditioner_from_params(param)
statistics = []
preconditioners = []
if not _skip_preconditioning(param):
shapes = preconditioner.shapes_for_preconditioners()
statistics = [
matrix_epsilon * jnp.eye(s[0], dtype=jnp.float32) for s in shapes
]
# If the preconditioner is using a low-rank representation, initialize
# it to zero instead of an invalid eye.
preconditioners = [
jnp.eye(s[0], s[1], dtype=jnp.float32) * (s[0] == s[1])
for s in shapes
]
diagonal_statistics = []
if _graft_type_has_diagonal_statistics():
diagonal_statistics = jnp.zeros_like(param)
# diagonal_momentum = _quantize_momentum(jnp.zeros_like(param))
# momentum = _quantize_momentum(jnp.zeros_like(param))
diagonal_momentum = jnp.zeros_like(param)
momentum = jnp.zeros_like(param)
return ParameterStats(
diagonal_statistics,
statistics,
preconditioners,
# _quantize_diagonal_statistics(diagonal_statistics),
# _maybe_quantize_statistics(statistics),
# _maybe_quantize_preconditioners(preconditioners),
diagonal_momentum,
momentum,
init_training_metrics(
len(statistics),
generate_training_metrics,
))
return ShampooState(
count=jnp.zeros([], jnp.int32), stats=jax.tree_map(_init, params))
def _skip_preconditioning(param):
return len(param.shape) < skip_preconditioning_rank_lt or any(
s > skip_preconditioning_dim_size_gt for s in param.shape)
def _compute_stats(grad, state, param, step):
"""Compute per-parameter statistics."""
preconditioner = preconditioner_from_params(param)
new_statistics = [[]] * len(state.statistics)
w1 = beta2
w2 = jnp.where(beta2 == 1.0, beta2, 1.0 - beta2)
if not _skip_preconditioning(param):
def compute_updated_statistics():
return preconditioner.updated_statistics_from_grad(
state.statistics,
grad,
w1=w1,
w2=w2,
to_float=_to_float,
from_float=lambda x: x,
# from_float=lambda x: _maybe_quantize_statistics([x])[0],
precision=tensordot_precision,
)
if statistics_compute_steps > 1:
perform_step = step % statistics_compute_steps == 0
init_state = state.statistics
new_statistics = list(
efficient_cond(perform_step, compute_updated_statistics,
init_state))
else:
new_statistics = compute_updated_statistics()
return ParameterStats(state.diagonal_statistics,
new_statistics,
state.preconditioners,
state.diagonal_momentum,
state.momentum,
state.training_metrics)
mi_pth_root = functools.partial(
matrix_inverse_pth_root,
ridge_epsilon=matrix_epsilon,
precision=precision,
relative_matrix_epsilon=relative_matrix_epsilon,
lobpcg_topk_precondition=lobpcg_topk_precondition,
lobpcg_max_iter=lobpcg_max_iter,
eigh=eigh)
def _matrix_inverse_pth_root_vmap(xs, ps, padding_starts, prev):
return jax.vmap(mi_pth_root)(
xs, ps, padding_start=padding_starts, prev=prev)
def _matrix_inverse_pth_root_pjit(xs,
ps,
padding_starts,
prev_preconds=None,
statistics_partition_spec=None):
# Partition the concatenated statistics matrix across all cores.
pspec_for_partition = preconditioner_partition_spec
partitioned_xs = pjit.with_sharding_constraint(xs, pspec_for_partition)
if preconditioner_partition_spec:
partitioned_ps_spec = jax.sharding.PartitionSpec(
preconditioner_partition_spec[0])
else:
partitioned_ps_spec = None
partitioned_ps = pjit.with_sharding_constraint(ps, partitioned_ps_spec)
partitioned_prev_preconds = _maybe(pjit.with_sharding_constraint)(
prev_preconds, preconditioner_partition_spec)
partitioned_padding_starts = pjit.with_sharding_constraint(
padding_starts, partitioned_ps_spec) # paddings are scalars like ps.
# Run matrix inverse pth root on each shard.
partitioned_preconditioners, partitioned_metrics = (
_matrix_inverse_pth_root_vmap(
partitioned_xs,
partitioned_ps,
partitioned_padding_starts,
prev=partitioned_prev_preconds))
# Reshard output to have the same PSpec as input. This is required to avoid
# vmap seeing the full set of statistics.
partitioned_preconditioners = pjit.with_sharding_constraint(
partitioned_preconditioners, pspec_for_partition)
# Recombine the outputs at each core.
preconditioners = pjit.with_sharding_constraint(partitioned_preconditioners,
statistics_partition_spec)
metrics = pjit.with_sharding_constraint(partitioned_metrics,
jax.sharding.PartitionSpec())
return preconditioners, metrics
def _pmap_compute_preconditioners(states,
step,
statistics,
num_statistics_per_state,
original_shapes,
exponents,
max_size,
prev_preconditioners):
"""Computes preconditioners for given statistics in states in PMAP mode.
Args:
states: A list of optimizer states.
step: Current step number
statistics: A list of statistics for all variables (for every dim)
num_statistics_per_state: Number of statistis per state to reconstruct
output states.
original_shapes: A list of shapes of the statistics.
exponents: Exponent power to use for inverse-pth roots.
max_size: Maximum dim of the statistics to pad.
prev_preconditioners: Previously available preconditioner.
Returns:
New optimizer states after computing the preconditioner.
"""
if batch_axis_name:
num_devices = lax.psum(1, batch_axis_name)
else:
num_devices = 1
num_statistics = len(statistics)
# Pad statistics and exponents to next multiple of num_devices.
packed_statistics = [
pad_square_matrix(stat, max_size) for stat in statistics
]
to_pad = -num_statistics % num_devices
packed_statistics.extend([
jnp.eye(max_size, dtype=packed_statistics[0].dtype)
for _ in range(to_pad)
])
exponents.extend([1 for _ in range(to_pad)])
paddings = [len(stat) for stat in statistics] + [0] * to_pad
if not packed_statistics:
return states
if reuse_preconditioner:
assert len(prev_preconditioners) == num_statistics
packed_preconditioners = pad_and_maybe_zero_preconditioners(
prev_preconditioners, len(packed_statistics), max_size, step)
else:
packed_preconditioners = None
all_statistics = batch(packed_statistics, num_devices)
all_exponents = batch(exponents, num_devices)
all_paddings = batch(paddings, num_devices)
all_preconditioners = _maybe(batch)(packed_preconditioners, num_devices)
def _internal_inverse_pth_root_all():
if batch_axis_name:
current_replica = lax.axis_index(batch_axis_name)
preconditioners, metrics = _matrix_inverse_pth_root_vmap(
all_statistics[current_replica],
all_exponents[current_replica],
all_paddings[current_replica],
_maybe_ix(all_preconditioners, current_replica),
)
preconditioners = jax.lax.all_gather(preconditioners, batch_axis_name)
metrics = jax.lax.all_gather(metrics, batch_axis_name)
preconditioners_flat = unbatch(preconditioners)
metrics_flat = jax.tree_map(unbatch, metrics)
else:
preconditioners, metrics = _matrix_inverse_pth_root_vmap(
all_statistics[0],
all_exponents[0],
all_paddings[0],
_maybe_ix(all_preconditioners, 0),
)
preconditioners_flat = unbatch(jnp.stack([preconditioners]))
metrics = jax.tree_map(
functools.partial(jnp.expand_dims, axis=0), metrics)
metrics_flat = jax.tree_map(unbatch, metrics)
return preconditioners_flat, metrics_flat
perform_step = step % preconditioning_compute_steps == 0
if preconditioning_compute_steps == 1:
preconditioners_flat, metrics_flat = _internal_inverse_pth_root_all()
else:
# Passing statistics instead of preconditioners as they are similarly
# shaped tensors. Note statistics will be ignored as we are passing in
# a large error value.
preconditioners_init = [
s[:, :precond_dim(s.shape[0])] for s in packed_statistics
]
n = len(packed_statistics)
metrics_init = jax.tree_map(
lambda x: [x] * n,
default_training_metrics().replace(
inverse_pth_root_errors=inverse_failure_threshold))
init_state = [preconditioners_init, metrics_init]
preconditioners_flat, metrics_flat = efficient_cond(
perform_step, _internal_inverse_pth_root_all, init_state)
def _skip(error):
condition = jnp.logical_or(
jnp.isnan(error), error >= inverse_failure_threshold)
return condition.astype(error.dtype)
def _select_preconditioner(error, new_p, old_p):
return lax.cond(
_skip(error), lambda _: old_p, lambda _: new_p, operand=None)
new_preconditioners_flat = []
new_errors_flat = metrics_flat.inverse_pth_root_errors
for p, shape, prev_p, error in zip(preconditioners_flat, original_shapes,
prev_preconditioners, new_errors_flat):
new_preconditioners_flat.append(
_select_preconditioner(error, p[:shape[0], :shape[1]], prev_p))
assert len(states) == (len(num_statistics_per_state),
f"{len(states)} vs {len(num_statistics_per_state)}")
assert len(new_preconditioners_flat) == num_statistics
assert len(new_errors_flat) == len(packed_statistics), (
len(new_errors_flat), len(packed_statistics))
assert len(new_errors_flat) == num_statistics + to_pad, (
len(new_errors_flat), num_statistics, to_pad)
# Add back empty preconditioners so we that we can set the optimizer state.
preconditioners_for_states = []
idx = 0
metrics_for_states = []
for num_statistics, state in zip(num_statistics_per_state, states):
if num_statistics == 0:
preconditioners_for_states.append([])
metrics_for_states.append(
init_training_metrics(0, generate_training_metrics))
else:
preconditioners_for_state = new_preconditioners_flat[idx:idx +
num_statistics]
assert len(state.statistics) == len(preconditioners_for_state)
preconditioners_for_states.append(preconditioners_for_state)
if generate_training_metrics:
# pylint:disable=cell-var-from-loop Used immediately.
metrics_for_state = jax.tree_map(
lambda x: jnp.stack(x[idx:idx + num_statistics]),
metrics_flat,
is_leaf=lambda x: isinstance(x, list))
assert jax.tree_util.tree_all(
jax.tree_map(lambda x: len(state.statistics) == len(x),
metrics_for_state))
# If we skipped preconditioner computation, record old metrics.
metrics_for_state = efficient_cond(perform_step,
lambda: [metrics_for_state],
[state.training_metrics])[0]
# pylint:enable=cell-var-from-loop
else:
metrics_for_state = optax.MaskedNode()
metrics_for_states.append(metrics_for_state)
idx += num_statistics
new_states = []
for state, new_preconditioners, new_metrics in zip(
states, preconditioners_for_states, metrics_for_states):
# Note the preconditioner may have been skipped, but we still update the
# metrics with the new error values; whether the preconditioner that's
# actively being used is stale can be derived from the new_metrics
# being greater than the failure threshold.
new_states.append(
ParameterStats(state.diagonal_statistics,
state.statistics,
new_preconditioners,
state.diagonal_momentum,
state.momentum,
new_metrics))
return new_states
def _compute_preconditioners(states, params, step):
"""Computes preconditioners for given statistics in states.
Args:
states: A list of optimizer states.
params: A list of params.
step: Current step number
Returns:
New optimizer states after computing the preconditioner.
"""
statistics = []
num_statistics_per_state = []
original_shapes = []
exponents = []
max_size = 0
prev_preconditioners = []
for state, param in zip(states, params):
num_statistics = len(state.statistics)
num_statistics_per_state.append(num_statistics)
original_shapes_for_state = []
if num_statistics > 0:
preconditioner = preconditioner_from_params(param)
for statistic in state.statistics:
exponents.append(preconditioner.exponent_for_preconditioner(
) if exponent_override == 0 else exponent_override)
original_shapes_for_state.append(statistic.shape)
max_size = max(max_size, statistic.shape[0])
statistics.extend(state.statistics)
prev_preconditioners.extend(state.preconditioners)
original_shapes.extend(original_shapes_for_state)
return _pmap_compute_preconditioners(states,
step,
statistics,
num_statistics_per_state,
original_shapes,
exponents,
max_size,
prev_preconditioners)
def _transform_grad(grad, state, param, step):
"""Transform per-parameter gradients."""
preconditioner = preconditioner_from_params(param)
sgd_update = grad
new_diagonal_statistics = state.diagonal_statistics
if (graft_type == GraftingType.ADAGRAD or
graft_type == GraftingType.ADAGRAD_NORMALIZED):
scaled_grad = grad
if graft_type == GraftingType.ADAGRAD_NORMALIZED:
scaled_grad = grad / (jnp.linalg.norm(grad) + _EPSILON)
new_diagonal_statistics = (
state.diagonal_statistics.to_float() + jnp.square(scaled_grad))
adagrad_update = scaled_grad / (
jnp.sqrt(new_diagonal_statistics) + diagonal_epsilon)
grafting_update = adagrad_update
elif (graft_type == GraftingType.RMSPROP or
graft_type == GraftingType.RMSPROP_NORMALIZED):
scaled_grad = grad
if graft_type == GraftingType.RMSPROP_NORMALIZED:
scaled_grad = grad / (jnp.linalg.norm(grad) + _EPSILON)
w1 = beta2
w2 = jnp.where(beta2 == 1.0, beta2, 1.0 - beta2)
new_diagonal_statistics = (
w1 * state.diagonal_statistics + w2 * jnp.square(scaled_grad))
rmsprop_update = scaled_grad / (
jnp.sqrt(new_diagonal_statistics) + diagonal_epsilon)
if clip_by_scaled_gradient_norm:
scaled_grad_norm = jnp.linalg.norm(rmsprop_update) / (
jnp.sqrt(float(rmsprop_update.size)))
clipping_denom = jnp.maximum(
1., scaled_grad_norm / clip_by_scaled_gradient_norm)
rmsprop_update /= clipping_denom
grafting_update = rmsprop_update
elif graft_type == GraftingType.SGD:
grafting_update = sgd_update
elif graft_type == GraftingType.NONE:
grafting_update = sgd_update # Use SGD during warmup.
else:
grafting_update = jnp.ones_like(sgd_update) * jnp.sign(sgd_update)
lr = learning_rate
if callable(learning_rate):
lr = learning_rate(step)
preconditioner_multiplier = lr if not decoupled_learning_rate else 1.0
grafting_update = grafting_update * preconditioner_multiplier
precond_grad = grad
if not _skip_preconditioning(param):
precond_grad = preconditioner.preconditioned_grad(precond_grad,
state.preconditioners)
else:
if graft_type == GraftingType.NONE:
logging.error("skipping preconditioning without grafting for param %s",
param)
precond_grad = grafting_update
grafting_update_norm = jnp.linalg.norm(grafting_update)
precond_grad_norm = jnp.linalg.norm(precond_grad)
if graft_type is not GraftingType.NONE:
multiplier = grafting_update_norm / (precond_grad_norm + _EPSILON)
else:
multiplier = 1.0
shampoo_update = precond_grad * multiplier
shampoo_update_with_wd = shampoo_update
grafting_update_with_wd = grafting_update
if (weight_decay != 0 and weight_decay is not None and
not decoupled_weight_decay):
shampoo_update_with_wd = shampoo_update + weight_decay * param
grafting_update_with_wd = grafting_update + weight_decay * param
w = (1.0 - beta1) if moving_average_for_momentum else 1.0
shampoo_update_with_wd_momentum = (
state.momentum * beta1 + w * shampoo_update_with_wd)
grafting_update_with_wd_momentum = (
state.diagonal_momentum * beta1 + w * grafting_update_with_wd)
run_shampoo = (step >= start_preconditioning_step).astype(
grafting_update_with_wd_momentum.dtype)
momentum_update = (
run_shampoo * shampoo_update_with_wd_momentum +
(1.0 - run_shampoo) * grafting_update_with_wd_momentum)
wd_update = (
run_shampoo * shampoo_update_with_wd +
(1.0 - run_shampoo) * grafting_update_with_wd)
nesterov_momentum_update = momentum_update
if nesterov:
nesterov_momentum_update = w * wd_update + beta1 * momentum_update
if (weight_decay != 0 and weight_decay is not None and
decoupled_weight_decay):
nesterov_momentum_update = (
nesterov_momentum_update + lr * weight_decay * param)
momentum_multiplier = lr if decoupled_learning_rate else 1.0
transformed_update = -1.0 * momentum_multiplier * nesterov_momentum_update
new_diagonal_momentum = grafting_update_with_wd_momentum
new_momentum = shampoo_update_with_wd_momentum
param_stats = ParameterStats(new_diagonal_statistics,
state.statistics,
state.preconditioners,
new_diagonal_momentum,
new_momentum,
state.training_metrics)
return transformed_update, param_stats
def update_fn(grads, state, params):
"""Transform the input gradient and update all statistics.
Args:
grads: the gradient tensors for the parameters and any custom
gradients for preconditioners.
state: a named tuple containing the state of the optimizer
params: the parameters that should be updated.
Returns:
A tuple containing the new parameters and the new optimizer state.
"""
grads_custom = None
if custom_preconditioner and isinstance(grads, tuple):
grads, grads_custom = grads
params_flat, treedef = jax.tree_util.tree_flatten(params)
stats_flat = treedef.flatten_up_to(state.stats)
grads_flat = treedef.flatten_up_to(grads)
stats_grads = grads_flat
if custom_preconditioner and grads_custom is not None:
stats_grads = treedef.flatten_up_to(grads_custom)
new_stats_flat = jax.tree_map(
lambda g,
s,
p: _compute_stats(g, s, p, state.count),
stats_grads,
stats_flat,
params_flat)
new_stats_flat = _compute_preconditioners(new_stats_flat,
params_flat,
state.count)
outputs = jax.tree_map(
lambda g,
s,
p: _transform_grad(g, s, p, state.count),
grads_flat,
new_stats_flat,
params_flat)
updates_flat, new_stats_flat = list(zip(*outputs)) if outputs else ((), ())
updates = jax.tree_util.tree_unflatten(treedef, updates_flat)
new_stats = jax.tree_util.tree_unflatten(treedef, new_stats_flat)
new_state = ShampooState(count=state.count + 1, stats=new_stats)
return updates, new_state
if shard_optimizer_states:
# Hijacks the init_fn signature so we can return an OptState with
# appropriate init_fns.
opt_init_fn = sharded_init_fn
def _init_fns(unused_params):
return InitFnState(
init_fn=opt_init_fn,
pspec_fn=sharded_init_partition_spec_fn,
shape_and_dtype_fn=sharded_init_shape_and_dtype_fn)
opt_update_fn = sharded_update_fn
return optax.GradientTransformation(_init_fns, opt_update_fn)
else:
return optax.GradientTransformation(init_fn, update_fn)
|
import argparse
import os
import yaml
from collections import OrderedDict
import cwrap_parser
import nn_parse
import native_parse
import preprocess_declarations
import function_wrapper
import copy_wrapper
from code_template import CodeTemplate
# This file is the top-level entry point for code generation in ATen.
# It takes an arbitrary number of arguments specifying metadata files to
# process (.cwrap, .yaml and .h) and outputs a number generated header
# and cpp files in ATen/ (see invocations of 'write' for each file that
# is written.) It is invoked from cmake; look for the 'cwrap_files'
# variable for an up-to-date list of files which are passed.
parser = argparse.ArgumentParser(description='Generate ATen source files')
parser.add_argument('files', help='cwrap files', nargs='+')
parser.add_argument(
'-s',
'--source-path',
help='path to source directory for ATen',
default='.')
parser.add_argument(
'-o',
'--output-dependencies',
help='output a list of dependencies into the given file and exit')
parser.add_argument(
'-n',
'--no-cuda',
action='store_true',
help='disable generation of cuda files')
parser.add_argument(
'-d', '--output-dir', help='output directory', default='ATen')
options = parser.parse_args()
if options.output_dir is not None and not os.path.exists(options.output_dir):
os.makedirs(options.output_dir)
class FileManager(object):
def __init__(self):
self.filenames = set()
self.outputs_written = False
self.undeclared_files = []
def will_write(self, filename):
filename = '{}/{}'.format(options.output_dir, filename)
if self.outputs_written:
raise Exception("'will_write' can only be called before " +
"the call to write_outputs, refactor so outputs are registered " +
"before running the generators")
self.filenames.add(filename)
def _write_if_changed(self, filename, contents):
try:
with open(filename, 'r') as f:
old_contents = f.read()
except IOError:
old_contents = None
if contents != old_contents:
with open(filename, 'w') as f:
f.write(contents)
def write_outputs(self, filename):
"""Write a file containing the list of all outputs which are
generated by this script."""
self._write_if_changed(
filename,
''.join(name + ";" for name in sorted(self.filenames)))
self.outputs_written = True
def write(self, filename, s):
filename = '{}/{}'.format(options.output_dir, filename)
self._write_if_changed(filename, s)
if filename not in self.filenames:
self.undeclared_files.append(filename)
else:
self.filenames.remove(filename)
def check_all_files_written(self):
if len(self.undeclared_files) > 0:
raise Exception(
"trying to write files {} which are not ".format(self.undeclared_files) +
"in the list of outputs this script produces. " +
"use will_write to add them.")
if len(self.filenames) > 0:
raise Exception("Outputs declared with 'will_write' were " +
"never written: {}".format(self.filenames))
TEMPLATE_PATH = options.source_path + "/templates"
GENERATOR_DERIVED = CodeTemplate.from_file(
TEMPLATE_PATH + "/GeneratorDerived.h")
STORAGE_DERIVED_CPP = CodeTemplate.from_file(
TEMPLATE_PATH + "/StorageDerived.cpp")
STORAGE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/StorageDerived.h")
TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.cpp")
TYPE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.h")
TYPE_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Type.h")
TYPE_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/Type.cpp")
TENSOR_DERIVED_CPP = CodeTemplate.from_file(
TEMPLATE_PATH + "/TensorDerived.cpp")
TENSOR_SPARSE_CPP = CodeTemplate.from_file(
TEMPLATE_PATH + "/TensorSparse.cpp")
TENSOR_DENSE_CPP = CodeTemplate.from_file(
TEMPLATE_PATH + "/TensorDense.cpp")
TENSOR_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorDerived.h")
TENSOR_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Tensor.h")
TENSOR_METHODS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorMethods.h")
FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Functions.h")
NATIVE_FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/NativeFunctions.h")
file_manager = FileManager()
generators = {
'CPUGenerator.h': {
'name': 'CPU',
'th_generator': 'THGenerator * generator;',
'header': 'TH/TH.h',
},
'CUDAGenerator.h': {
'name': 'CUDA',
'th_generator': 'THCGenerator * generator;',
'header': 'THC/THC.h'
},
}
backends = ['CPU']
if not options.no_cuda:
backends.append('CUDA')
densities = ['Dense', 'Sparse']
# scalar_name, c_type, accreal, th_scalar_type, is_floating_type
scalar_types = [
('Byte', 'uint8_t', 'Long', 'uint8_t', False),
('Char', 'int8_t', 'Long', 'int8_t', False),
('Double', 'double', 'Double', 'double', True),
('Float', 'float', 'Double', 'float', True),
('Int', 'int', 'Long', 'int32_t', False),
('Long', 'int64_t', 'Long', 'int64_t', False),
('Short', 'int16_t', 'Long', 'int16_t', False),
('Half', 'Half', 'Double', 'THHalf', True),
]
# shared environment for non-derived base classes Type.h Tensor.h Storage.h
top_env = {
'type_registrations': [],
'type_headers': [],
'type_method_declarations': [],
'type_method_definitions': [],
'type_method_inline_definitions': [],
'tensor_method_declarations': [],
'tensor_method_definitions': [],
'function_declarations': [],
'function_definitions': [],
'type_ids': [],
'native_function_declarations': [],
}
def dict_representer(dumper, data):
return dumper.represent_dict(data.items())
def postprocess_output_declarations(output_declarations):
# ensure each return has a name associated with it
for decl in output_declarations:
has_named_ret = False
for n, ret in enumerate(decl.returns):
if 'name' not in ret:
assert not has_named_ret
if decl.inplace:
ret['name'] = 'self'
elif len(decl.returns) == 1:
ret['name'] = 'result'
else:
ret['name'] = 'result' + str(n)
else:
has_named_ret = True
def remove_key_if_none(dictionary, key):
if key in dictionary.keys() and dictionary[key] is None:
del dictionary[key]
return dictionary
return [remove_key_if_none(decl._asdict(), 'buffers')
for decl in output_declarations]
def format_yaml(data):
if options.output_dependencies:
# yaml formatting is slow so don't do it if we will ditch it.
return ""
noalias_dumper = yaml.dumper.SafeDumper
noalias_dumper.ignore_aliases = lambda self, data: True
# Support serializing OrderedDict
noalias_dumper.add_representer(OrderedDict, dict_representer)
return yaml.dump(data, default_flow_style=False, Dumper=noalias_dumper)
def generate_storage_type_and_tensor(backend, density, scalar_type, declarations):
scalar_name, c_type, accreal, th_scalar_type, is_floating_type = scalar_type
env = {}
density_tag = 'Sparse' if density == 'Sparse' else ''
th_density_tag = 'S' if density == 'Sparse' else ''
env['Density'] = density
env['ScalarName'] = scalar_name
env['ScalarType'] = c_type
env['THScalarType'] = th_scalar_type
env['AccScalarName'] = accreal
env['isFloatingType'] = is_floating_type
env['isIntegralType'] = not is_floating_type
env['Storage'] = "{}{}Storage".format(backend, scalar_name)
env['Type'] = "{}{}{}Type".format(density_tag, backend, scalar_name)
env['Tensor'] = "{}{}{}Tensor".format(density_tag, backend, scalar_name)
env['DenseTensor'] = "{}{}Tensor".format(backend, scalar_name)
env['SparseTensor'] = "Sparse{}{}Tensor".format(backend, scalar_name)
env['Backend'] = density_tag + backend
env['DenseBackend'] = backend
# used for generating switch logic for external functions
tag = density_tag + backend + scalar_name
env['TypeID'] = 'TypeID::' + tag
top_env['type_ids'].append(tag + ',')
if backend == 'CUDA':
env['th_headers'] = [
'#include <THC/THC.h>',
'#include <THCUNN/THCUNN.h>',
'#undef THNN_',
'#undef THCIndexTensor_',
'#include <THCS/THCS.h>',
'#undef THCIndexTensor_',
]
env['extra_cuda_headers'] = ['#include <ATen/cuda/CUDAHalf.cuh>']
sname = '' if scalar_name == "Float" else scalar_name
env['THType'] = 'Cuda{}'.format(sname)
env['THStorage'] = 'THCuda{}Storage'.format(sname)
if density == 'Dense':
env['THTensor'] = 'THCuda{}Tensor'.format(sname)
else:
env['THTensor'] = 'THCS{}Tensor'.format(scalar_name)
env['THIndexTensor'] = 'THCudaLongTensor'
env['state'] = ['context->thc_state']
env['isCUDA'] = 'true'
env['storage_device'] = 'return storage->device;'
env['Generator'] = 'CUDAGenerator'
else:
env['th_headers'] = [
'#include <TH/TH.h>',
'#include <THNN/THNN.h>',
'#undef THNN_',
'#include <THS/THS.h>',
]
env['extra_cuda_headers'] = []
env['THType'] = scalar_name
env['THStorage'] = "TH{}Storage".format(scalar_name)
env['THTensor'] = 'TH{}{}Tensor'.format(th_density_tag, scalar_name)
env['THIndexTensor'] = 'THLongTensor'
env['state'] = []
env['isCUDA'] = 'false'
env['storage_device'] = 'throw std::runtime_error("CPU storage has no device");'
env['Generator'] = 'CPUGenerator'
env['AS_REAL'] = env['ScalarType']
if scalar_name == "Half":
env['SparseTensor'] = 'Tensor'
if backend == "CUDA":
env['to_th_type'] = 'HalfFix<__half,Half>'
env['to_at_type'] = 'HalfFix<Half,__half>'
env['AS_REAL'] = 'convert<half,double>'
env['THScalarType'] = 'half'
else:
env['to_th_type'] = 'HalfFix<THHalf,Half>'
env['to_at_type'] = 'HalfFix<Half,THHalf>'
elif scalar_name == 'Long':
env['to_th_type'] = 'long'
env['to_at_type'] = 'int64_t'
else:
env['to_th_type'] = ''
env['to_at_type'] = ''
declarations, definitions = function_wrapper.create_derived(
env, declarations)
env['type_derived_method_declarations'] = declarations
env['type_derived_method_definitions'] = definitions
if density != 'Sparse':
# there are no special storage types for Sparse, they are composed
# of Dense tensors
file_manager.write(env['Storage'] + ".cpp", STORAGE_DERIVED_CPP.substitute(env))
file_manager.write(env['Storage'] + ".h", STORAGE_DERIVED_H.substitute(env))
env['TensorDenseOrSparse'] = TENSOR_DENSE_CPP.substitute(env)
env['THTensor_nDimension'] = 'tensor->nDimension'
else:
env['TensorDenseOrSparse'] = TENSOR_SPARSE_CPP.substitute(env)
env['THTensor_nDimension'] = 'tensor->nDimensionI + tensor->nDimensionV'
file_manager.write(env['Type'] + ".cpp", TYPE_DERIVED_CPP.substitute(env))
file_manager.write(env['Type'] + ".h", TYPE_DERIVED_H.substitute(env))
file_manager.write(env['Tensor'] + ".cpp", TENSOR_DERIVED_CPP.substitute(env))
file_manager.write(env['Tensor'] + ".h", TENSOR_DERIVED_H.substitute(env))
type_register = (('context->type_registry[static_cast<int>(Backend::{})]' +
'[static_cast<int>(ScalarType::{})].reset(new {}(context));')
.format(env['Backend'], scalar_name, env['Type']))
top_env['type_registrations'].append(type_register)
top_env['type_headers'].append(
'#include "ATen/{}.h"'.format(env['Type']))
return env
def iterate_types():
for backend in backends:
for density in densities:
for scalar_type in scalar_types:
if density == 'Sparse' and scalar_type[0] == 'Half':
# THS does not do half type yet.
continue
yield (backend, density, scalar_type)
###################
# declare what files will be output _before_ we do any work
# so that the script runs quickly when we are just querying the
# outputs
def declare_outputs():
files = ['Declarations.yaml', 'Type.h', 'Type.cpp', 'Tensor.h',
'TensorMethods.h', 'Functions.h',
'Copy.cpp', 'NativeFunctions.h']
for f in files:
file_manager.will_write(f)
for fname in sorted(generators.keys()):
if generators[fname]['name'] in backends:
file_manager.will_write(fname)
for backend, density, scalar_types in iterate_types():
scalar_name = scalar_types[0]
full_backend = "Sparse" + backend if density == "Sparse" else backend
for kind in ["Storage", "Type", "Tensor"]:
if kind == 'Storage' and density == "Sparse":
continue
file_manager.will_write("{}{}{}.h".format(full_backend, scalar_name, kind))
file_manager.will_write("{}{}{}.cpp".format(full_backend, scalar_name, kind))
def filter_by_extension(files, *extensions):
filtered_files = []
for file in files:
for extension in extensions:
if file.endswith(extension):
filtered_files.append(file)
return filtered_files
def generate_outputs():
cwrap_files = filter_by_extension(options.files, '.cwrap')
nn_files = filter_by_extension(options.files, 'nn.yaml', '.h')
native_files = filter_by_extension(options.files, 'native_functions.yaml')
declarations = [d
for file in cwrap_files
for d in cwrap_parser.parse(file)]
declarations += nn_parse.run(nn_files)
declarations += native_parse.run(native_files)
declarations = preprocess_declarations.run(declarations)
for fname, env in generators.items():
if env['name'] in backends:
file_manager.write(fname, GENERATOR_DERIVED.substitute(env))
# note: this will fill in top_env['type/tensor_method_declarations/definitions']
# and modify the declarations to include any information that will all_backends
# be used by function_wrapper.create_derived
output_declarations = function_wrapper.create_generic(top_env, declarations)
output_declarations = postprocess_output_declarations(output_declarations)
file_manager.write("Declarations.yaml", format_yaml(output_declarations))
# populated by generate_storage_type_and_tensor
all_types = []
for backend, density, scalar_type in iterate_types():
all_types.append(generate_storage_type_and_tensor(
backend, density, scalar_type, declarations))
file_manager.write('Type.h', TYPE_H.substitute(top_env))
file_manager.write('Type.cpp', TYPE_CPP.substitute(top_env))
file_manager.write('Tensor.h', TENSOR_H.substitute(top_env))
file_manager.write('TensorMethods.h', TENSOR_METHODS_H.substitute(top_env))
file_manager.write('Functions.h', FUNCTIONS_H.substitute(top_env))
file_manager.write('Copy.cpp', copy_wrapper.create(all_types))
file_manager.write('NativeFunctions.h', NATIVE_FUNCTIONS_H.substitute(top_env))
file_manager.check_all_files_written()
declare_outputs()
if options.output_dependencies is not None:
file_manager.write_outputs(options.output_dependencies)
else:
generate_outputs()
|
# this code should be common among cwrap and ATen preprocessing
# for now, I have put it in one place but right now is copied out of cwrap
from copy import deepcopy
from itertools import product
def parse_arguments(args):
new_args = []
for arg in args:
# Simple arg declaration of form "<type> <name>"
if isinstance(arg, str):
t, _, name = arg.partition(' ')
new_args.append({'type': t, 'name': name})
elif isinstance(arg, dict):
if 'arg' in arg:
arg['type'], _, arg['name'] = arg['arg'].partition(' ')
del arg['arg']
new_args.append(arg)
else:
assert False
return new_args
def set_declaration_defaults(declaration):
declaration.setdefault('arguments', [])
declaration.setdefault('return', 'void')
if 'cname' not in declaration:
declaration['cname'] = declaration['name']
if 'backends' not in declaration:
declaration['backends'] = ['CPU', 'CUDA']
if 'api_name' not in declaration:
declaration['api_name'] = (declaration['python_name']
if 'python_name' in declaration else declaration['name'])
# Simulate multiple dispatch, even if it's not necessary
if 'options' not in declaration:
declaration['options'] = [{'arguments': declaration['arguments']}]
del declaration['arguments']
# Parse arguments (some of them can be strings)
for option in declaration['options']:
option['arguments'] = parse_arguments(option['arguments'])
# Propagate defaults from declaration to options
for option in declaration['options']:
for k, v in declaration.items():
# TODO(zach): why does cwrap not propagate 'name'? I need it
# propagaged for ATen
if k != 'options':
option.setdefault(k, v)
# TODO(zach): added option to remove keyword handling for C++ which cannot
# support it.
def filter_unique_options(options, allow_kwarg, type_to_signature, remove_self):
def exclude_arg(arg):
return arg.get('ignore_check') or arg['type'] == 'CONSTANT'
def exclude_arg_with_self_check(arg):
return exclude_arg(arg) or (remove_self and arg['name'] == 'self')
def signature(option, kwarg_only_count):
if kwarg_only_count == 0:
kwarg_only_count = None
else:
kwarg_only_count = -kwarg_only_count
arg_signature = '#'.join(
type_to_signature.get(arg['type'], arg['type'])
for arg in option['arguments'][:kwarg_only_count]
if not exclude_arg_with_self_check(arg))
if kwarg_only_count is None:
return arg_signature
kwarg_only_signature = '#'.join(
arg['name'] + '#' + arg['type']
for arg in option['arguments'][kwarg_only_count:]
if not exclude_arg(arg))
return arg_signature + "#-#" + kwarg_only_signature
seen_signatures = set()
unique = []
for option in options:
# if only check num_kwarg_only == 0 if allow_kwarg == False
limit = len(option['arguments']) if allow_kwarg else 0
for num_kwarg_only in range(0, limit + 1):
sig = signature(option, num_kwarg_only)
if sig not in seen_signatures:
if num_kwarg_only > 0:
for arg in option['arguments'][-num_kwarg_only:]:
arg['kwarg_only'] = True
unique.append(option)
seen_signatures.add(sig)
break
return unique
def enumerate_options_due_to_default(declaration,
allow_kwarg=True, type_to_signature=[], remove_self=True):
# Checks to see if an argument with a default keyword is a Tensor that
# by default can be NULL. In this case, instead of generating another
# option that excludes this argument, we will instead generate a single
# function call that allows for the Tensor to be NULL
def is_nullable_tensor_arg(arg):
return arg['type'] == 'THTensor*' and arg['default'] == 'nullptr'
# TODO(zach): in cwrap this is shared among all declarations
# but seems to assume that all declarations will have the same
new_options = []
for option in declaration['options']:
optional_args = []
for i, arg in enumerate(option['arguments']):
if 'default' in arg:
optional_args.append(i)
for permutation in product((True, False), repeat=len(optional_args)):
option_copy = deepcopy(option)
option_copy['has_full_argument_list'] = sum(permutation) == len(optional_args)
for i, bit in zip(optional_args, permutation):
arg = option_copy['arguments'][i]
# PyYAML interprets NULL as None...
arg['default'] = 'NULL' if arg['default'] is None else arg['default']
if not bit:
arg['declared_type'] = arg['type']
arg['type'] = 'CONSTANT'
arg['ignore_check'] = True
new_options.append(option_copy)
declaration['options'] = filter_unique_options(new_options,
allow_kwarg, type_to_signature, remove_self)
def sort_by_number_of_options(declaration, reverse=True):
def num_checked_args(option):
return sum(map(lambda a: not a.get('ignore_check', False), option['arguments']))
declaration['options'].sort(key=num_checked_args, reverse=reverse)
class Function(object):
def __init__(self, name):
self.name = name
self.arguments = []
def add_argument(self, arg):
assert isinstance(arg, Argument)
self.arguments.append(arg)
def __repr__(self):
return self.name + '(' + ', '.join(map(lambda a: a.__repr__(), self.arguments)) + ')'
class Argument(object):
def __init__(self, _type, name, is_optional):
self.type = _type
self.name = name
self.is_optional = is_optional
def __repr__(self):
return self.type + ' ' + self.name
def parse_header(path):
with open(path, 'r') as f:
lines = f.read().split('\n')
# Remove empty lines and prebackend directives
lines = filter(lambda l: l and not l.startswith('#'), lines)
# Remove line comments
lines = map(lambda l: l.partition('//'), lines)
# Select line and comment part
lines = map(lambda l: (l[0].strip(), l[2].strip()), lines)
# Remove trailing special signs
lines = map(lambda l: (l[0].rstrip(');').rstrip(','), l[1]), lines)
# Split arguments
lines = map(lambda l: (l[0].split(','), l[1]), lines)
# Flatten lines
new_lines = []
for l, c in lines:
for split in l:
new_lines.append((split, c))
lines = new_lines
del new_lines
# Remove unnecessary whitespace
lines = map(lambda l: (l[0].strip(), l[1]), lines)
# Remove empty lines
lines = filter(lambda l: l[0], lines)
generic_functions = []
for l, c in lines:
if l.startswith('TH_API void THNN_'):
fn_name = l.lstrip('TH_API void THNN_')
if fn_name[0] == '(' and fn_name[-2] == ')':
fn_name = fn_name[1:-2]
else:
fn_name = fn_name[:-1]
generic_functions.append(Function(fn_name))
elif l:
t, name = l.split()
if '*' in name:
t = t + '*'
name = name[1:]
generic_functions[-1].add_argument(
Argument(t, name, '[OPTIONAL]' in c))
return generic_functions
|
import re
from copy import deepcopy
from function_wrapper import TYPE_FORMAL_GENERIC
import common_with_cwrap
type_map = {
'floating_point': [
'Float',
'Double',
'Half',
],
'integral': [
'Byte',
'Char',
'Short',
'Int',
'Long'
],
}
all_types = type_map['floating_point'] + type_map['integral']
type_map['all'] = all_types
all_backends = ['CPU', 'CUDA', 'SparseCPU', 'SparseCUDA']
default_backends = ['CPU', 'CUDA']
sparse_map = {
'CPU': 'SparseCPU',
'CUDA': 'SparseCUDA',
}
def process_types_and_backends(option):
# if specific pairs were not listed, then enumerate them
# based on the backend and type attributes
# if backend or type is not defined, it is assumed to be all of them
if 'backend_type_pairs' not in option:
backends = option.get('backends', default_backends)
if option.get('aten_sparse', False):
backends.extend([sparse_map[p] for p in backends if p in sparse_map])
backends = set(backends)
types = option.get('types', all_types)
pairs = [[p, t] for p in backends for t in types]
else:
pairs = option['backend_type_pairs']
# expand type alias (integral, floating_point, all)
def expand(pair):
p, t = pair
assert(p in all_backends)
if t in type_map:
return [(p, tt) for tt in type_map[t]]
assert(t in all_types)
return [(p, t)]
pairs = set(p for pair in pairs for p in expand(pair))
# disable CUDA Half if there is a Sparse argument
for arg in option.get('arguments', []):
if arg['type'] == 'THSTensor*':
pairs.discard(('CUDA', 'Half'))
# special case remove Half for cpu unless it is explicitly enabled,
if not option.get('cpu_half', False):
pairs.discard(('CPU', 'Half'))
# sort the result for easy reading
option['backend_type_pairs'] = sorted([p for p in pairs])
def exclude(declaration):
return 'only_register' in declaration or declaration.get('python_name') == 'ndimension'
def add_variants(option):
option.setdefault('variants', ['method'])
# if we have 'output' arguments, generate a variant where
# we mark oututs as allocate = True, and where the method variant
# is disabled...
def handle_outputs_taken_as_arguments(options):
new_options = []
def is_nullable(arg):
return (arg['type'] in {'THIntegerTensor*', 'THTensor*'} and
arg.get('default', '') in {None, 'NULL', 'nullptr'})
def should_generate_out_variant(option):
if 'function' in option['variants'] and option['mode'] != 'native':
# don't generate _out variants for in-place functions
return re.search('(^__i|[^_]_$)', option['api_name']) is None
return False
for option in options:
for arg in option['arguments']:
# mark arguments which can be null
if is_nullable(arg):
arg['is_nullable'] = True
if any('output' in arg for arg in option['arguments']):
allocate_option = deepcopy(option)
# the allocating option needs to be marked
for arg in allocate_option['arguments']:
if 'output' in arg:
arg['allocate'] = True
# the original option, which takes arguments for the results,
# is no longer a method, and has _out added to indicte it takes
# output arguments
if should_generate_out_variant(option):
if 'method' in option['variants']:
option['variants'].remove('method')
option['api_name'] += '_out'
new_options.append(option)
new_options.append(allocate_option)
else:
new_options.append(option)
return new_options
def sanitize_return(option):
ret = option['return']
m = re.match('argument (\d+(,\d+)*)', ret)
if m is not None:
arguments = [int(x) for x in m.group(1).split(',')]
option['return'] = {'kind': 'arguments', 'arguments': arguments}
elif ret == 'self':
option['return'] = {'kind': 'arguments', 'arguments': []}
for i, x in enumerate(option['arguments']):
if x['name'] == 'self':
option['return']['arguments'].append(i)
break
else:
option['return'] = {'kind': 'type', 'type': option['return']}
def set_mode(option):
option['mode'] = option.get('mode', 'TH')
# To enable 0-dim support in TH operations
# we find all places where a single Scalar replaced with a Tensor
# as an argument is still a valid function
# we then mark the tensor variant with a key zero_dim_dispatch_when_scalar: name
# where 'name' is the name of the argument that should be a scalar
# during dispatch, if that argument is marked internally as holding a scalar
# then the method will dispatch to that function.
def discover_zero_dim_tensor_operations(declaration):
def exclude(arg):
return arg.get('ignore_check')
def signature(option, i=None, value=None):
elements = [TYPE_FORMAL_GENERIC.get(arg['type'], arg['type'])
if i is None or j != i else value
for j, arg in enumerate(option['arguments'])
if not exclude(arg)]
return '#'.join(elements)
signature_to_option = {signature(option): option
for option in declaration['options']}
for option in declaration['options']:
for i, arg in enumerate(option['arguments']):
if arg['type'] == 'real':
signature_of_tensor_version = signature(option, i, 'Tensor &')
if signature_of_tensor_version in signature_to_option:
tensor_version = \
signature_to_option[signature_of_tensor_version]
names = [arg['name'] for arg in tensor_version['arguments']
if not exclude(arg)]
tensor_version['zero_dim_dispatch_when_scalar'] = names[i]
# print("FOUND "+str(i) )
# print("Scalar Version ===== ")
# print(yaml.dump(option))
# print("Tensor Version ===== ")
# print(yaml.dump(tensor_version))
# print("SHARED "+names[i])
def discover_sparse_tensor_operations(declaration):
def exclude(arg):
return arg.get('ignore_check')
def signature(option, i=None, value=None):
elements = [TYPE_FORMAL_GENERIC.get(arg['type'], arg['type'])
if i is None or j != i else value
for j, arg in enumerate(option['arguments'])
if not exclude(arg)]
return '#'.join(elements)
# Determine if any options have the 'aten_dense_sparse' flag
dense_sparse_options = [option
for option in declaration['options']
if option.get('aten_dense_sparse', False)]
if len(dense_sparse_options) > 0:
signature_to_option = {signature(option): option
for option in declaration['options']}
for option in declaration['options']:
for i, arg in enumerate(option['arguments']):
if (arg['type'] == 'THSTensor*' and
option.get('aten_dense_sparse', False)):
signature_of_tensor_version = signature(
option, i, 'Tensor &')
if signature_of_tensor_version in signature_to_option:
tensor_version = \
signature_to_option[signature_of_tensor_version]
raw_args = len(tensor_version['arguments'])
names = [arg['name'] for arg in tensor_version['arguments']
if not exclude(arg)]
filtered_args = len(names)
tensor_version['when_sparse_dispatch'] = names[i -
(raw_args - filtered_args)]
def run(declarations):
declarations = [d for d in declarations if not exclude(d)]
for declaration in declarations:
common_with_cwrap.set_declaration_defaults(declaration)
declaration['options'] = [deepcopy(o) for o in declaration['options']]
declaration['options'] = common_with_cwrap.filter_unique_options(
declaration['options'],
allow_kwarg=False,
type_to_signature=TYPE_FORMAL_GENERIC,
remove_self=True)
common_with_cwrap.sort_by_number_of_options(declaration)
discover_zero_dim_tensor_operations(declaration)
discover_sparse_tensor_operations(declaration)
for option in declaration['options']:
set_mode(option)
if option['mode'] != 'native':
sanitize_return(option)
process_types_and_backends(option)
add_variants(option)
declaration['options'] = handle_outputs_taken_as_arguments(
declaration['options'])
return declarations
|
import re
# match $identifier or ${identifier} and replace with value in env
# If this identifier is at the beginning of whitespace on a line
# and its value is a list then it is treated as
# block subsitution by indenting to that depth and putting each element
# of the list on its own line
# if the identifier is on a line starting with non-whitespace and a list
# then it is comma separated ${,foo} will insert a comma before the list
# if this list is not empty and ${foo,} will insert one after.
class CodeTemplate(object):
substitution_str = '(^[^\n\S]*)?\$([^\d\W]\w*|\{,?[^\d\W]\w*\,?})'
# older versions of Python have a bug where \w* does not work,
# so we need to replace with the non-shortened version [a-zA-Z0-9_]*
# https://bugs.python.org/issue18647
substitution_str = substitution_str.replace('\w', '[a-zA-Z0-9_]')
subtitution = re.compile(substitution_str, re.MULTILINE)
@staticmethod
def from_file(filename):
with open(filename, 'r') as f:
return CodeTemplate(f.read())
def __init__(self, pattern):
self.pattern = pattern
def substitute(self, env={}, **kwargs):
def lookup(v):
return kwargs[v] if v in kwargs else env[v]
def indent_lines(indent, v):
return "".join([indent + l + "\n" for e in v for l in str(e).splitlines()]).rstrip()
def replace(match):
indent = match.group(1)
key = match.group(2)
comma_before = ''
comma_after = ''
if key[0] == "{":
key = key[1:-1]
if key[0] == ",":
comma_before = ', '
key = key[1:]
if key[-1] == ',':
comma_after = ', '
key = key[:-1]
v = lookup(key)
if indent is not None and isinstance(v, list):
return indent_lines(indent, v)
elif isinstance(v, list):
middle = ', '.join([str(x) for x in v])
if len(v) == 0:
return middle
return comma_before + middle + comma_after
else:
return (indent or '') + str(v)
return self.subtitution.sub(replace, self.pattern)
if __name__ == "__main__":
c = CodeTemplate("""\
int foo($args) {
$bar
$bar
$a+$b
}
int commatest(int a${,stuff})
int notest(int a${,empty,})
""")
print(c.substitute(args=["hi", 8], bar=["what", 7],
a=3, b=4, stuff=["things...", "others"], empty=[]))
|
import copy
import re
import common_with_cwrap
import yaml
from collections import OrderedDict, defaultdict
try:
# use faster C loader if available
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
# matches `name`, `params` in `name(params)`
NAME_PARAM_REGEX = r'(\w+)\((.*)\)'
def argument_to_declaration(param, func=None):
arg = {}
arg['type'], name = param.split(' ')
if arg['type'] == 'Tensor':
arg['type'] = 'THTensor*'
elif arg['type'] == 'LongTensor':
arg['type'] = 'THIndexTensor*'
elif arg['type'] == 'Scalar':
arg['type'] = 'accreal'
elif arg['type'] == 'Generator*':
arg['type'] = 'THGenerator*'
match = re.match(r'IntList\[(\d+)\]', arg['type'])
if match:
arg['type'] = 'IntList'
arg['size'] = int(match.group(1))
if '=' in name:
name, default = name.split('=')
arg['optional'] = True
arg['default'] = default
arg['name'] = name
if func is not None:
default_inits = func.get('default_init', {})
wrap_dims = func.get('wrap_dim', {})
if name in default_inits:
# non constexpr defaults
arg['default_init'] = default_inits[name]
if name in wrap_dims:
arg['wrap_dim'] = wrap_dims[name]
return arg
def output_arguments(thnn_function):
cname = thnn_function.name
output_args = []
# function_wrapper expects everything in a declaration to be in
# the base type (i.e. THTensor*), but if we pull a THCUNN only
# implementation, it will have THCTensor* as the arg type. So we
# strip the THC here before returning
def map_to_th_type(t):
if t.startswith('THC'):
t = t.replace('THC', 'TH')
return t
def is_output_arg(arg_name, func_name):
if arg_name == 'output' and 'updateOutput' in cname:
return True
if name in {'gradInput', 'gradWeight', 'gradBias'}:
return True
if arg_name == 'indices' and 'updateOutput' in cname and 'Unpool' not in cname:
# indices is an output argument in pooling and an input in unpooling
return True
return False
for arg in thnn_function.arguments:
name = arg.name
if is_output_arg(name, cname):
desc = {
'type': map_to_th_type(arg.type),
'name': camel_to_snake(name),
'output': True,
}
if name.startswith('grad_'):
desc['is_nullable'] = True
output_args.append(desc)
return output_args
def get_return(args):
indices = [str(idx) for idx, arg in enumerate(args) if arg.get('output')]
return 'argument {}'.format(','.join(indices))
ARGUMENT_MAPPINGS = {
'k': 'kernel_size',
'd': 'stride',
'pad': 'padding',
'p': 'padding',
'o': 'output_size',
'osize': 'output_size',
'output': 'output_size', # as a prefix e.g. outputW
'isize': 'input_size',
'dilation': 'dilation',
'adj': 'output_padding',
'a': 'output_padding',
}
DIMENSION_OFFSET = {
'width': -1,
'height': -2,
'B': 0,
'C': 1,
'W': -1,
'H': -2,
'T': -3,
'left': 0,
'right': 1,
'top': 2,
'bottom': 3,
'front': 4,
'back': 5,
}
SUBSTITUTIONS = {
'input': 'self',
'weights': 'weight',
'train': 'training',
'val': 'value',
'lambda': 'lambd',
'negval': 'negative_slope',
}
def camel_to_snake(name):
# from https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def get_thnn_args(thnn_function, params, inplace):
params_by_name = {p['name']: p for p in params}
def arg_expr(prefix, suffix):
# e.g kW, kH
name = ARGUMENT_MAPPINGS[prefix]
if name not in params_by_name:
raise RuntimeError('missing arg "{}" in {}'.format(name, thnn_function.name))
param = params_by_name[name]
if param['type'] == 'IntList' and 'size' in param:
name = name + '_'
index = DIMENSION_OFFSET[suffix]
if index < 0:
index += param['size']
expr = '{}[{}]'.format(name, index)
return {'type': 'EXPRESSION', 'name': expr}
thnn_args = []
for arg in thnn_function.arguments:
name = arg.name
if name == 'state':
continue
if inplace and name == 'output':
name = 'self'
aten_name = camel_to_snake(SUBSTITUTIONS.get(name, name))
parts = aten_name.split('_')
if aten_name in params_by_name:
param = params_by_name[aten_name]
if arg.is_optional:
param['is_nullable'] = True
thnn_args.append(copy.deepcopy(param))
elif len(parts) == 2 and parts[0] in ARGUMENT_MAPPINGS and parts[1] in DIMENSION_OFFSET:
# e.g. pad_left
thnn_args.append(arg_expr(parts[0], parts[1]))
elif name[-1] in DIMENSION_OFFSET and name[:-1] in ARGUMENT_MAPPINGS:
# e.g kW, kH
thnn_args.append(arg_expr(name[:-1], name[-1]))
elif name == 'owidth' or name == 'oheight':
thnn_args.append(arg_expr(name[0], name[1:]))
elif name == 'scale':
thnn_args.append({'type': 'EXPRESSION', 'name': '1'})
elif name == 'inplace':
thnn_args.append({'type': 'EXPRESSION', 'name': str(inplace).lower()})
else:
raise RuntimeError("{}: can't find binding for '{}'"
.format(thnn_function.name, name))
return thnn_args
def remove_unused_args(args, thnn_args):
"""Returns the subset of args whose name appears in thnn_args"""
def clean_name(name):
name = name[:name.index('[')] if '[' in name else name
if name.endswith('_'):
name = name[:-1]
return name
uses = set([clean_name(arg['name']) for arg in thnn_args])
uses.add('output_mask')
args = [arg for arg in args if arg['name'] in uses]
for arg in args:
if 'default' in arg:
del arg['default']
return args
def unique_args(argslist):
result = []
seen = set()
for args in argslist:
for arg in args:
if arg['name'] in seen:
continue
seen.add(arg['name'])
result.append(arg)
return result
def function_info(name, arguments, cimpls, buffers, backends, inplace, scalar_check):
"""
cimpls contains information use to call into THNN:
cname: THNN function name
arguments: arguments to functional call
condition: [optional] guard around call
"""
return {
'mode': 'NN',
'name': name,
'types': ['Float', 'Double', 'Half'], # Half will be stripped for CPU backend
'arguments': arguments,
'return': 'argument 0' if inplace else get_return(arguments),
'buffers': buffers,
'backends': backends,
'cimpls': cimpls,
'scalar_check': scalar_check,
'variants': ['function'],
}
def base_declaration(func, thnn_function, backends, inplace=False):
"""Creates the NN function without any buffers in it's signature"""
name, params = re.match(NAME_PARAM_REGEX, func['name']).groups()
if inplace:
name += '_'
params = params.split(', ')
arguments = [argument_to_declaration(a, func) for a in params]
if not inplace:
arguments += output_arguments(thnn_function)
buffers = [argument_to_declaration('Tensor ' + buf)
for buf in func.get('buffers', [])]
return function_info(name, arguments, None, buffers, backends, inplace, func.get('scalar_check'))
def forward_declaration(base, thnn_function, inplace=False):
name = '{}_forward'.format(base['name'])
if inplace:
name += '_'
arguments = [copy.deepcopy(arg) for arg in base['arguments']
if not arg.get('output')]
arguments += output_arguments(thnn_function)
for buffer in base['buffers']:
buffer = copy.deepcopy(buffer)
buffer['output'] = True
arguments.append(buffer)
thnn_args = get_thnn_args(thnn_function, arguments, inplace)
arguments = remove_unused_args(arguments, thnn_args)
cimpl = {'cname': thnn_function.name, 'arguments': thnn_args}
scalar_check = base['scalar_check']
if scalar_check is not None:
output_arg_names = [arg['name'] for arg in arguments if arg.get('output', False)]
scalar_check = {k: v for (k, v) in scalar_check.items() if k in output_arg_names}
return function_info(name, arguments, [cimpl], [], base['backends'], inplace, scalar_check)
def backward_declaration(base, thnn_functions):
name = '{}_backward'.format(base['name'])
arguments = []
arguments.append({'type': 'THTensor*', 'name': 'grad_output'})
arguments += [copy.deepcopy(arg) for arg in base['arguments']
if arg['name'] != 'inplace']
arguments += base['buffers']
if 'upsample' in base['name']:
# Add input_size as parameter to upsample backwards functions
# Note that input_size is 4-dim for upsample_xxx2d
size = 2 + int(re.search(r'(\d+)d', base['name']).group(1))
input_size_arg = {'type': 'IntList', 'name': 'input_size', 'size': size}
for output_size_idx, arg in enumerate(arguments):
if arg['name'] == 'output_size':
break
arguments.insert(output_size_idx + 1, input_size_arg)
# outputs from the forward may be inputs to the backwards
for arg in arguments:
if 'output' in arg:
del arg['output']
arguments += unique_args([output_arguments(f) for f in thnn_functions])
def initialize_output_arg(arg):
# the mask array<bool, N> specifies which return values to compute
arg['mask'] = True
arg['is_nullable'] = True
# grad_weight and grad_bias need to be resized and zeroed
if arg['name'] == 'grad_weight':
arg['resize'] = 'weight'
arg['zero'] = True
if arg['name'] == 'grad_bias':
dim = 1 if 'transpose' in name else 0
arg['resize'] = [('weight', dim)]
arg['zero'] = True
is_batch_norm_backward = '_backward' in thnn_functions[0].name
grad_params = []
if len(thnn_functions) > 1 or is_batch_norm_backward:
for arg in arguments:
if arg.get('output', False):
initialize_output_arg(arg)
if 'Tensor' in arg['type'] and arg['name'].startswith('grad_') and \
'input' not in arg['name'] and 'output' not in arg['name']:
grad_params.append(arg['name'])
thnn_args = [get_thnn_args(f, arguments, False) for f in thnn_functions]
arguments = remove_unused_args(arguments, unique_args(thnn_args))
cimpls = []
def get_condition(func):
# only call into the THNN functions if the output args are not null
if '_updateGradInput' in func.name:
return 'grad_input_'
if '_accGradParameters' in func.name:
return ' || '.join(p + '_' for p in grad_params)
return None
for func, args in zip(thnn_functions, thnn_args):
cimpl = {'cname': func.name, 'arguments': args}
if len(thnn_functions) > 1:
cimpl['condition'] = get_condition(func)
cimpls.append(cimpl)
output_args = [arg for arg in arguments if arg.get('output', False)]
scalar_check_arg = base['scalar_check'] if base['scalar_check'] is not None else dict()
scalar_check = {k: v for (k, v) in scalar_check_arg.items() if k in [a['name'] for a in output_args]}
for arg in output_args:
# resize automatically sets scalar_check
if scalar_check.get(arg['name']) is not None or arg.get('resize', False):
pass
else:
base_name = arg['name'][len('grad_'):] if arg['name'] != 'grad_input' else 'self'
if base_name in [a['name'] for a in arguments]:
scalar_check[arg['name']] = base_name + '_->isScalar()'
else:
raise ValueError(("Could not infer scalar_check for {} argument of func {} because {} "
"does not exist. Please explicitly specify scalar_check."
.format(arg['name'], name, base_name)))
return function_info(name, arguments, cimpls, [], base['backends'], False, scalar_check)
def parse_nn_yaml(filename):
with open(filename, 'r') as f:
return yaml.load(f, Loader=Loader)
include_only = '(updateOutput|updateGradInput|accGradParameters|backward)$'
exclude = 'LookupTable'
def run(paths):
function_backends = defaultdict(list)
header_functions = OrderedDict()
headers = [p for p in paths if p.endswith('.h')]
yamls = [p for p in paths if p.endswith('.yaml')]
for path in headers:
backend = 'CUDA' if re.search('THCU', path) else 'CPU'
for func in common_with_cwrap.parse_header(path):
if re.search(include_only, func.name) is None or re.search(exclude, func.name) is not None:
continue
function_backends[func.name].append(backend)
if func.name not in header_functions:
header_functions[func.name] = func
bwd_suffixes = ['_updateGradInput', '_accGradParameters', '_backward']
declarations = []
for path in yamls:
for func in parse_nn_yaml(path):
cname = func['cname']
backends = function_backends[cname + '_updateOutput']
fwd_function = header_functions[cname + '_updateOutput']
bwd_functions = []
for suffix in bwd_suffixes:
if cname + suffix in header_functions:
bwd_functions.append(header_functions[cname + suffix])
base = base_declaration(func, fwd_function, backends)
declarations.append(base)
declarations.append(forward_declaration(base, fwd_function))
declarations.append(backward_declaration(base, bwd_functions))
if func.get('has_inplace', False):
declarations.append(base_declaration(func, fwd_function, backends, True))
declarations.append(forward_declaration(base, fwd_function, True))
return declarations
|
# HEY! Trying to understand what this file does? Read
# "what has to be done to add a Operation ..." first!
import re
from code_template import CodeTemplate
try:
import typing # noqa: F401
except ImportError:
raise RuntimeError(
'Missing build dependency: Unable to import the `typing` module. '
'Please install it via `conda install typing` or `pip install typing`')
# flake8 doesn't take into account usages in type annotations.
from typing import Union, Set # noqa: F401
from typing import Any, Dict, List, Optional, Tuple, NamedTuple
try:
from mypy_extensions import TypedDict
except ImportError:
# Avoid the dependency on the mypy_extensions package.
# It is required, however, for type checking.
def TypedDict(name, attrs, total=True): # type: ignore
return Dict[Any, Any]
import sys
if sys.version_info[0] == 3:
string_type = str
else:
string_type = basestring
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# what has to be done to add a Operation ...
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# 1. if broadcasting or without the full list of arguments, add a non-virtual
# declaration under Type.h (right now, we call this template
# BROADCAST but it also handles default arguments)
TYPE_METHOD_DECLARATION_BROADCAST = CodeTemplate("""\
${return_type} ${api_name}(${type_method_formals_with_defaults}) const;
""")
# 2. broadcasting functions are implemented in Type.cpp
TYPE_METHOD_DEFINITION_BROADCAST = CodeTemplate("""\
${return_type} Type::${api_name}(${type_method_formals}) const {
Tensor ${broadcast_returns};
std::tie(${broadcast_returns}) = ${broadcast_function}(${broadcast_actuals}, "${api_name}");
return ${method_prefix_derived}${api_name}(${broadcast_modified_actuals});
}
""")
# 3. add virtual dispatch declaration to Type.h and impl to Type.cpp; method_prefix_derived
# is present for providing a base-class definition for a derived-type method with a prefix.
#
# If the declaration is abstract, then the actual implementation will
# be in a derived type; we put in a simple default "not implemented"
# stub. However, if the declaration is concrete, we dispatch to the
# actual implementation. At the moment, this situation *only* occurs
# for 'native' declarations (so the native dispatch is hardcoded into
# the template here.)
TYPE_METHOD_DECLARATION_ABSTRACT = CodeTemplate("""\
virtual ${return_type} ${method_prefix_derived}${api_name}(${type_method_formals_with_defaults}) const;
""")
TYPE_METHOD_DEFINITION_ABSTRACT = CodeTemplate("""\
${return_type} Type::${method_prefix_derived}${api_name}(${type_method_formals}) const {
AT_ERROR("${method_prefix_derived}${api_name} is not implemented for type %s", toString());
}
""")
TYPE_METHOD_DECLARATION_CONCRETE = CodeTemplate("""\
virtual ${return_type} ${api_name}(${type_method_formals_with_defaults}) const;
""")
TYPE_METHOD_DEFINITION_CONCRETE = CodeTemplate("""\
${return_type} Type::${api_name}(${type_method_formals}) const {
${type_definition_body}
}
""")
# 4. add virtual override to TypeDerived.h
TYPE_DERIVED_DECLARATION = CodeTemplate("""\
virtual ${return_type} ${method_prefix_derived}${api_name}(${type_method_formals}) const override;
""")
# 5. add override definition to TypeDerived.cpp
TYPE_DERIVED_DEFINITION = CodeTemplate("""\
${return_type} ${Type}::${method_prefix_derived}${api_name}(${type_method_formals}) const {
${type_definition_body}
}
""")
# NB: As far as ezyang can tell, we don't *have* to codegen this,
# because we will inherit it from the TYPE_METHOD_DEFINITION_CONCRETE in
# the superclass. But it doesn't seem to be harmful.
TYPE_DERIVED_DEFINITION_NATIVE = CodeTemplate("""\
${return_type} ${Type}::${api_name}(${type_method_formals}) const {
${return_call} at::native::${native_type_method_dispatch}(${actuals});
}
""")
TYPE_DEFINITION_BODY_NATIVE = CodeTemplate("""\
${return_call} at::native::${native_type_method_dispatch}(${native_actuals});
""")
# 6. add non-virtual declaration to Tensor.h
TENSOR_METHOD_DECLARATION = CodeTemplate("""\
${return_type} ${api_name}(${method_formals_with_defaults})${const_mark};
""")
# 7. add non-virtual declaration to Tensor.cpp
TENSOR_METHOD_DEFINITION = CodeTemplate("""\
inline ${return_type} Tensor::${api_name}(${method_formals})${const_mark} {
return type().${api_name}(${method_actuals});
}
""")
# 8. add a method declaration in Functions.h
FUNCTION_DECLARATION = CodeTemplate("""\
static inline ${return_type} ${api_name}(${formals_with_defaults});
""")
# 9. add method definition in Functions.h
FUNCTION_DEFINITION = CodeTemplate("""\
static inline ${return_type} ${api_name}(${formals}) {
return ${inferred_type}.${api_name}(${type_method_actuals});
}
""")
# 10. add a native declaration for a native function
NATIVE_DECLARATION = CodeTemplate("""\
${return_type} ${native_type_method_dispatch}(${formals_with_defaults});
""")
# We need to cast to the base type because C++ may hide the base class
# implementation of ${api_name} if we have overloaded a function with
# the same name (but different signature) already
ZERO_DIM_CHECK = CodeTemplate("""\
if (${check_name}.dim() == 0) {
return static_cast<const Type*>(this)->${api_name}(${zero_dim_actuals});
}""")
ZERO_DIM_ONLY = CodeTemplate("""\
AT_ERROR("${api_name} only supports a 0-dimensional ${check_name} tensor, but got tensor "
"with %" PRId64 " dimension(s)", ${check_name}.dim());
""")
SPARSE_CHECK = CodeTemplate("""\
if(${check_name}.type().is_sparse()) {
return static_cast<const Type*>(this)->${api_name}(${sparse_actuals});
}""")
BUFFER_DEFINITION = CodeTemplate("""\
auto ${name}_ = new ${Tensor}(context);
auto ${name} = Tensor(${name}_, false);""")
CONDITIONAL_INITIALIZER = CodeTemplate("""\
if (${name}.defined()) {
${initializer}
}""")
CALL_TEMPLATE = CodeTemplate("${cname}(${actuals})")
HALF_CONVERSION = CodeTemplate("convert<half>(${value})")
class NYIError(Exception):
"""Indicates we don't support this declaration yet"""
def __init__(self, reason):
self.reason = reason
TYPE_FORMAL_GENERIC = {
'THTensor*': 'Tensor &',
'THSTensor*': 'SparseTensor',
'THBoolTensor*': 'Tensor &',
'THIndexTensor*': 'Tensor &',
'THIntegerTensor*': 'Tensor &',
'THDenseTensor*': 'Tensor &',
'THDenseIndexTensor*': 'Tensor &',
'THStorage*': 'Storage &',
'THGenerator*': 'Generator *',
'THSize*': 'IntList',
'THStride*': 'IntList',
'accreal': 'Scalar',
'real': 'Scalar',
'long': 'int64_t',
}
DYNAMIC_TYPE = {
'THTensor*': 'Tensor',
'THSTensor*': 'SparseTensor',
'THBoolTensor*': 'BoolTensor',
'THIndexTensor*': 'IndexTensor',
'THIntegerTensor*': 'IntegerTensor',
'THDenseTensor*': 'Tensor',
'THDenseIndexTensor*': 'IndexTensor',
'THStorage*': 'Storage',
'THGenerator*': 'Generator*',
'THSize*': 'IntList',
'THStride*': 'IntList',
'accreal': 'accreal',
'real': 'real',
'long': 'int64_t',
}
TYPE_RETURN = {
'THTensor*': 'Tensor',
'THIndexTensor*': 'Tensor',
'THBoolTensor*': 'Tensor',
'THIntegerTensor*': 'Tensor',
'THSTensor*': 'Tensor',
'THDenseTensor*': 'Tensor',
'THDenseIndexTensor*': 'Tensor',
'real': 'Tensor',
'accreal': 'Tensor',
'long': 'int64_t',
}
CHECKED_CAST = {
'THTensor*':
CodeTemplate(
'checked_cast_tensor<${Tensor}>(${arg_name}.pImpl,"${arg_name}",${arg_pos}, ${null_okay})'),
'THSTensor*':
CodeTemplate(
'checked_cast_tensor<Sparse${Tensor}>(${arg_name}.tref.pImpl,"${arg_name}",${arg_pos},false)'),
'THBoolTensor*':
CodeTemplate(
'checked_cast_tensor<${Backend}ByteTensor>(${arg_name}.pImpl,"${arg_name}",${arg_pos}, ${null_okay})'),
'THIndexTensor*':
CodeTemplate(
'checked_cast_tensor<${Backend}LongTensor>(${arg_name}.pImpl,"${arg_name}",${arg_pos}, ${null_okay})'),
'THIntegerTensor*':
CodeTemplate(
'checked_cast_tensor<${Backend}IntTensor>(${arg_name}.pImpl,"${arg_name}",${arg_pos}, ${null_okay})'),
'THDenseTensor*':
CodeTemplate(
'checked_cast_tensor<${DenseTensor}>(${arg_name}.pImpl,"${arg_name}",${arg_pos}, ${null_okay})'),
'THDenseIndexTensor*':
CodeTemplate(
'checked_cast_tensor<${DenseBackend}LongTensor>(${arg_name}.pImpl,"${arg_name}",${arg_pos}, ${null_okay})'),
'THStorage*': CodeTemplate('checked_cast_storage<${Storage}>(&${arg_name},"${arg_name}",${arg_pos})'),
'THGenerator*':
CodeTemplate(
'check_generator<${Backend}Generator>(${arg_name}, &context->defaultGenerator(backend()))'),
'THSize*': CodeTemplate('THLongStorageView::makeFromSize(${arg_name})'),
'THStride*': CodeTemplate('THLongStorageView::makeFromStride(${arg_name}, ${noelem_to_empty})'),
'real': CodeTemplate('${arg_name}.to${ScalarName}()'),
'accreal': CodeTemplate('${arg_name}.to${AccScalarName}()'),
'TensorList': CodeTemplate('tensor_list_checked_cast<${Tensor}, Tensor, '
'${THTensor}>(${arg_name},"${arg_name}",${arg_pos})'),
'IntList': CodeTemplate('check_intlist<${size}>(${arg_name}, "${arg_name}", ${arg_pos}${,default_init})')
}
CHECKED_USE = {
'THTensor*': '{}_->tensor',
'THSTensor*': '{}_->tensor',
'THIndexTensor*': '{}_->tensor',
'THBoolTensor*': '{}_->tensor',
'THIntegerTensor*': '{}_->tensor',
'THDenseTensor*': '{}_->tensor',
'THDenseIndexTensor*': '{}_->tensor',
'THStorage*': '{}_->storage',
'THGenerator*': '{}_->generator',
'TensorList': "{0}_.data(), {0}_.size()",
}
CHECKED_USE_NULLABLE = CodeTemplate('${arg_name}_ ? ${usage} : NULL')
ALLOC_WRAP = {
'THTensor*': 'new ${Tensor}(context${,arguments})',
'THBoolTensor*': 'new ${Backend}ByteTensor(context${,arguments})',
'THIndexTensor*': 'new ${Backend}LongTensor(context${,arguments})',
'THIntegerTensor*': 'new ${Backend}IntTensor(context${,arguments})',
'THSTensor*': 'new Sparse${Tensor}(context${,arguments})',
'THDenseTensor*': 'new ${DenseTensor}(context${,arguments})',
'THDenseIndexTensor*': 'new ${DenseBackend}LongTensor(context${,arguments})',
}
# Replacements for constants when calling into TH
CONSTANT_REPLACEMENTS = [
('AS_REAL', '${AS_REAL}'),
('__storage_size.get\\(\\)',
'THLongStorageView::makeFromLength(static_cast<int64_t>(storage.size()))'),
('__last_dim', 'self.ndimension()-1'),
]
# Replacements for constants in header file function definitions
HEADER_CONSTANT_REPLACEMENTS = [
(r'AS_REAL\((.*)\)', r'\1'),
('__last_dim', '-1'),
]
class nested_dict(object):
def __init__(self, base, parent):
self.base, self.parent = base, parent
def __getitem__(self, x):
r = self.base.get(x)
if r is not None:
return r
return self.parent[x]
Environment = TypedDict('Environment', {
'ScalarName': str,
'THTensor': str,
'THType': str,
'THTensor': str,
'Backend': str,
'AccScalarName': str,
})
TopEnvironment = TypedDict('TopEnvironment', {
'type_registrations': List[str],
'type_headers': List[str],
'type_method_declarations': List[str],
'type_method_definitions': List[str],
'type_method_inline_definitions': List[str],
'tensor_method_declarations': List[str],
'tensor_method_definitions': List[str],
'function_declarations': List[str],
'function_definitions': List[str],
'type_ids': List[str],
'native_function_declarations': List[str],
})
# A Declarations.cwrap formal argument
# type can contain THTensor* types
THFormal = TypedDict('THFormal', {
'name': str,
'type': str,
'dynamic_type': str,
'kwarg_only': bool,
'is_nullable': bool,
'default': str,
'default_init': str,
'python_default_init': str,
'output': bool,
'size': int,
'declared_type': str,
'ignore_check': bool,
'allocate': bool,
'mask': bool,
'if_true': bool,
'if_false': bool,
'wrap_dim': str,
# Broadcast is originally a str but gets unwrapped to a List or Dict in-place
'broadcast': Any,
'resize': str,
'cpu_zero': bool,
'zero': bool,
'is_type_dispatched': bool,
}, total=False)
# Generic ATen formal or native_functions.yaml formal argument.
# type can contain Tensor& reference types.
AtFormal = TypedDict('AtFormal', {
'name': str,
'type': str,
'dynamic_type': str,
'kwarg_only': bool,
'is_nullable': bool,
'default': str,
'default_init': str,
'python_default_init': str,
'output': bool,
'size': int,
'is_type_dispatched': bool,
}, total=False)
ReturnType = TypedDict('ReturnType', {
'name': str,
'type': str,
'dynamic_type': str,
}, total=False)
ReturnDecl = TypedDict('ReturnDecl', {
'kind': str,
'type': str,
'arguments': List[int],
}, total=False)
# Represents a buffer in nn.yaml
NNBuffer = TypedDict('NNBuffer', {
'name': str,
})
FunctionOption = TypedDict('FunctionOption', {
'arguments': List[THFormal],
'mode': str,
'name': str,
'return': ReturnDecl,
'variants': str,
'type_method_definition_dispatch': str,
'type_method_formals': List[str],
'type_method_formals_with_defaults': List[str],
'type_method_actuals': List[str],
'cname': str,
'backends': List[str],
'api_name': str,
'backend_type_pairs': List[Tuple[str, str]],
'inplace': bool,
'aten_dense_sparse': bool,
'sparse': bool,
'scalar_check': str,
'aten_custom_call': str,
'type_definition_body': List[str],
# cimpls is really a List[FunctionOption]
'cimpls': List[Any],
'when_spares_dispatch': str,
'actuals': List[str],
'buffers': List[NNBuffer],
'zero_dim_dispatch_when_scalar': str,
'zero_dim_tensor_only': bool,
'when_sparse_dispatch': str,
'formals_list': List[AtFormal],
'condition': str,
'auto_gpu': bool,
'cpu_half': bool,
# options should be List[FunctionOption]
'options': Any,
'formals': List[str],
'formals_with_defaults': List[str],
'returns': List[ReturnType],
'return_type': str,
'return_call': str,
'method_formals': List[str],
'method_formals_with_defaults': List[str],
'method_actuals': List[str],
'const_mark': str,
'method_prefix_derived': str,
'broadcast_actuals': List[str],
'broadcast_returns': List[str],
'inferred_type': str,
'broadcast_function': str,
'broadcast_modified_actuals': List[str],
'native_type_method_dispatch': str,
'native_actuals': List[str],
})
OutputDeclaration = NamedTuple('OutputDeclaration', [
('name', str),
('method_prefix_derived', str),
('arguments', List[AtFormal]),
('method_of', List[str]),
('mode', str),
('buffers', Optional[List[str]]),
('returns', List[ReturnType]),
('inplace', bool),
('abstract', bool),
])
def is_real_argument_to_wrapper(argument):
# type: (THFormal) -> bool
return not argument.get('output', False) and\
argument['type'] != 'CONSTANT' and\
argument['type'] != 'argument'
def is_mutable_formal_argument(argument, option):
# type: (THFormal, FunctionOption) -> bool
return argument.get('output') or option['inplace'] and argument['name'] == 'self'
def to_return_type(arg, option):
# type: (THFormal, FunctionOption) -> ReturnType
t = arg['type']
rt = TYPE_RETURN.get(t, t)
if rt == 'Tensor' and not arg.get('allocate'):
rt = rt + ' &'
if not is_mutable_formal_argument(arg, option):
rt = 'const ' + rt
return {
'name': arg['name'],
'type': rt,
'dynamic_type': DYNAMIC_TYPE.get(arg['type'], arg['type']),
}
def create_generic(top_env, declarations):
# type: (TopEnvironment, List[FunctionOption]) -> List[OutputDeclaration]
# translates defaults from cwrap types to C++ values
def translate_default(argument, type_str, default):
# type: (THFormal, str, Any) -> Any
if default is None:
# cause the default constructor for the object to run
return '{}'
if 'if_true' in argument:
return argument['default'] == argument['if_true']
for pattern, replacement in HEADER_CONSTANT_REPLACEMENTS:
default = re.sub(pattern, replacement, str(default))
if type_str in {'Scalar', 'int64_t', 'double'}:
return float(default) if '.' in default else int(default)
elif type_str == 'bool':
assert default.lower() in ['true', 'false']
return default.lower() == 'true'
else:
return default
# change from THTensor* to Tensor & so we get how it will appear
# in the aten argument list...
def translate_formal(argument, option):
# type: (THFormal, FunctionOption) -> AtFormal
type_str = TYPE_FORMAL_GENERIC.get(argument['type'], argument['type'])
if type_str == 'Tensor &' and not is_mutable_formal_argument(argument, option):
type_str = 'const ' + type_str
translated = {
'name': argument['name'],
'type': type_str,
'dynamic_type': DYNAMIC_TYPE.get(argument['type'], argument['type']),
} # type: AtFormal
if 'kwarg_only' in argument:
translated['kwarg_only'] = argument['kwarg_only']
if 'default' in argument:
default = translate_default(argument, type_str, argument['default'])
translated['default'] = default
translated['default_init'] = argument.get('default_init', default)
if 'python_default_init' in argument:
assert 'default' not in argument
default = translate_default(argument, type_str, argument['python_default_init'])
translated['python_default_init'] = default
if argument.get('output'):
translated['output'] = True
if argument.get('size'):
translated['size'] = argument['size']
if argument.get('is_nullable') is not None:
translated['is_nullable'] = argument['is_nullable']
return translated
def get_formals(option, include_constants=False):
# type: (FunctionOption, bool) -> List[AtFormal]
seen = set() # type: Set[str]
pos_args = [] # type: List[THFormal]
kwd_args = [] # type: List[THFormal]
def insert(argument):
# type: (THFormal) -> None
if argument['name'] not in seen:
seen.add(argument['name'])
if argument.get('kwarg_only', False):
kwd_args.append(argument)
else:
pos_args.append(argument)
def has_output_mask(argument):
# type: (THFormal) -> bool
return argument.get('allocate', False) and argument.get('mask', False)
for argument in option['arguments']:
if argument.get('output') and not argument.get('allocate', False):
insert(argument)
for argument in option['arguments']:
if argument['type'] == 'THSTensor*':
# only enable for a subset of Dense/Sparse ops
if not (option.get('aten_dense_sparse', False)):
raise NYIError("Sparse Tensor")
if include_constants and argument['type'] == 'CONSTANT':
insert(argument)
elif is_real_argument_to_wrapper(argument):
insert(argument)
if any(has_output_mask(arg) for arg in option['arguments']):
mask_size = sum(has_output_mask(arg) for arg in option['arguments'])
insert({
'name': 'output_mask',
# NB: Lack of space in comma works around parsing
# problem in gen_variable_type.py
'type': 'std::array<bool,{}>'.format(mask_size),
'default': '{{' + ', '.join(['true'] * mask_size) + '}}',
})
result = pos_args + kwd_args
return [translate_formal(argument, option) for argument in result]
def get_return_types(option):
# type: (FunctionOption) -> List[ReturnType]
ret = option['return']
if ret['kind'] == 'arguments':
argument_indices = ret['arguments']
if len(argument_indices) == 1:
the_arg = option['arguments'][argument_indices[0]]
return [to_return_type(the_arg, option)]
else:
return [to_return_type(option['arguments'][idx], option)
for idx in argument_indices]
elif ret['kind'] == 'type':
return [{
'type': TYPE_RETURN.get(ret['type'], ret['type']),
'dynamic_type': DYNAMIC_TYPE.get(ret['type'], ret['type']),
}]
else:
raise Exception("format_return_type")
def format_return_type(return_types):
# type: (List[ReturnType]) -> str
if len(return_types) == 1:
return return_types[0]['type']
return "std::tuple<{}>".format(','.join(r['type'] for r in return_types))
def find_dispatch_tensor(formals):
# type: (List[AtFormal]) -> Optional[str]
# dispatch to self if it's a parameter
for formal in formals:
if formal['name'] == 'self' and formal['dynamic_type'] == 'Tensor':
return formal['name']
# otherwise dispatch to the first Tensor or TensorList
for formal in formals:
if 'TensorList' == formal['dynamic_type'] or formal['dynamic_type'] == 'Tensor':
return formal['name']
return None
def format_formal(f):
# type: (AtFormal) -> str
return '{} {}'.format(f['type'], f['name'])
def formal_with_default(f):
# type: (AtFormal) -> str
s = format_formal(f)
v = f.get('default')
if v is None:
return s
if isinstance(v, bool):
v = str(v).lower()
return '{}={}'.format(s, v)
def get_broadcast_argument(option):
# type: (FunctionOption) -> Optional[THFormal]
for argument in option['arguments']:
if argument.get('broadcast'):
return argument
return None
def get_broadcast_actuals(broadcast_arg, broadcast_inplace, broadcast_dims):
# type: (THFormal, bool, bool) -> List[str]
# Note: broadcast_dims can change type...
# return the actuals that will be passed to the broadcast function.
# 1) in the common case, this is the broadcasted argument (e.g. "self") followed by the tensors
# that it is broadcasted against (comma-separated) (e.g. "self, tensor1, tensor2").
# 2) in the broadcast_dims case, this is the broadcasted argument (e.g. "self") followed by the sizes
# it is broadcasted to (as an initializer list), so e.g. the specification
# "mat1.dim0,mat2.dim1" gets transformed to "self, {mat1.size(0),mat2.size(1)}"
if not broadcast_dims:
broadcast_actuals = [broadcast_arg['name']] + broadcast_arg['broadcast'].split()[0].split(",")
else:
broadcast_dims_spec = broadcast_arg['broadcast'].split()[1].split(':')[1].split(',')
# generate size call for each dimension
broadcast_dims = ([x.split('.')[0] + '.size(' + x.split('.')[1].replace('dim', '') + ')' # type: ignore
for x in broadcast_dims_spec])
broadcast_dims_init_list = '{' + ','.join(broadcast_dims) + '}' # type: ignore
broadcast_actuals = [broadcast_arg['name'], broadcast_dims_init_list]
return broadcast_actuals
def emit_nn_body(option):
# type: (FunctionOption) -> Union[str, List[str]]
# Concrete definition on Type.cpp for NN functions. Delegates to the
# xxx_forward variant variant after creating any necessary buffers.
actuals = option['actuals']
base_name = option['name'][:-1] if option['inplace'] else option['name']
fwd_name = option['api_name'].replace(base_name, base_name + '_forward')
if len(option['buffers']) == 0:
return 'return {}({});'.format(fwd_name, ', '.join(actuals))
body = [] # type: List[str]
if option['api_name'].endswith('_out'):
# _out variants must create buffers and insert them in the
# arguments list between output and input arguments
for buffer in option['buffers']:
body.append('Tensor {} = tensor();'.format(buffer['name']))
actuals = [arg['name'] for arg in option['arguments'] if arg.get('output')]
actuals += [buffer['name'] for buffer in option['buffers']]
actuals += [arg['name'] for arg in option['arguments'] if not arg.get('output')]
body.append('return std::get<0>({}({}));'.format(fwd_name, ', '.join(actuals)))
return body
def process_option(option, output_options):
# type: (FunctionOption, List[OutputDeclaration]) -> None
option['inplace'] = re.search(
'(^__i|[^_]_$)', option['api_name']) is not None
# print(yaml.dump(option))
formals = get_formals(option)
option['formals_list'] = formals
option['formals'] = [format_formal(f) for f in formals]
option['formals_with_defaults'] = [formal_with_default(f) for f in formals]
option['returns'] = get_return_types(option)
option['return_type'] = format_return_type(option['returns'])
option['return_call'] = 'return ' if option['return_type'] != 'void' else ''
option['actuals'] = [f['name'] for f in formals]
option['method_formals'] = [format_formal(f) for f in formals
if f['name'] != 'self']
option['method_formals_with_defaults'] = (
[formal_with_default(f) for f in formals if f['name'] != 'self'])
option['method_actuals'] = [
f['name'] if f['name'] != 'self' else '*this' for f in formals]
# There are no cases where these differ, but they do in native_functions
option['type_method_formals'] = option['formals']
option['type_method_formals_with_defaults'] = option['formals_with_defaults']
option['type_method_actuals'] = option['actuals']
option['const_mark'] = '' if option['inplace'] else ' const'
is_method = 'method' in option['variants']
is_function = 'function' in option['variants']
dispatch_tensor = find_dispatch_tensor(formals)
is_namespace_function = is_function and dispatch_tensor is not None
broadcast_arg = get_broadcast_argument(option)
# "s_" for "same size".
option['method_prefix_derived'] = '' if broadcast_arg is None else 's_'
env = nested_dict(option, top_env)
mode = option['mode']
abstract = True
if mode == 'NN' and option.get('cimpls') is None:
# NN function with no _forward/_backward suffix don't have cimpls.
# They call the _forward function and discard any buffer returns
abstract = False
top_env['type_method_declarations'].append(
TYPE_METHOD_DECLARATION_CONCRETE.substitute(env))
body = emit_nn_body(option)
top_env['type_method_definitions'].append(
TYPE_METHOD_DEFINITION_CONCRETE.substitute(
env, type_definition_body=body))
elif broadcast_arg is None:
top_env['type_method_declarations'].append(
TYPE_METHOD_DECLARATION_ABSTRACT.substitute(env))
top_env['type_method_definitions'].append(
TYPE_METHOD_DEFINITION_ABSTRACT.substitute(env))
else:
top_env['type_method_declarations'].append(
TYPE_METHOD_DECLARATION_BROADCAST.substitute(env))
top_env['type_method_declarations'].append(
TYPE_METHOD_DECLARATION_ABSTRACT.substitute(env))
top_env['type_method_definitions'].append(
TYPE_METHOD_DEFINITION_ABSTRACT.substitute(env))
broadcast_inplace = 'inplace' in broadcast_arg['broadcast']
broadcast_dims = 'dims:' in broadcast_arg['broadcast']
option['broadcast_actuals'] = get_broadcast_actuals(broadcast_arg, broadcast_inplace, broadcast_dims)
if not broadcast_dims:
option['broadcast_returns'] = (["b_" + x for x in option['broadcast_actuals']
if x != broadcast_arg['name'] or not broadcast_inplace])
else:
option['broadcast_returns'] = ["b_" + broadcast_arg['name']]
option['broadcast_function'] = 'expand_' + ('inplace' if broadcast_inplace
else 'size' if broadcast_dims else 'outplace')
option['broadcast_modified_actuals'] = ['b_' + y if 'b_' + y in option['broadcast_returns'] else y
for y in option['actuals']]
top_env['type_method_definitions'].append(
TYPE_METHOD_DEFINITION_BROADCAST.substitute(env))
method_of = ['Type']
if is_method:
top_env['tensor_method_declarations'].append(
TENSOR_METHOD_DECLARATION.substitute(env))
top_env['tensor_method_definitions'].append(
TENSOR_METHOD_DEFINITION.substitute(env))
method_of.append('Tensor')
if is_namespace_function:
option['inferred_type'] = 'infer_type({})'.format(dispatch_tensor)
top_env['function_declarations'].append(
FUNCTION_DECLARATION.substitute(env))
top_env['function_definitions'].append(
FUNCTION_DEFINITION.substitute(env))
method_of.append('namespace')
buffer_names = [buffer['name'] for buffer in option.get('buffers', [])]
output_options.append(OutputDeclaration(
name=option['api_name'],
method_prefix_derived=option['method_prefix_derived'],
arguments=formals,
method_of=method_of,
mode=mode,
buffers=buffer_names,
returns=option['returns'],
inplace=option['inplace'],
# See Note [Abstract ATen methods]
abstract=abstract,
))
def native_get_formals(option, include_constants=False):
# type: (FunctionOption, bool) -> List[AtFormal]
seen = set() # type: Set[str]
pos_args = []
kwd_args = []
def insert(argument):
# type: (AtFormal) -> None
if argument['name'] not in seen:
seen.add(argument['name'])
if argument.get('kwarg_only', False):
kwd_args.append(argument)
else:
pos_args.append(argument)
for argument in option['arguments']:
insert(argument)
# not clear we need dynamic_type translation as we can specify the correct type
# directly in native functions
def add_type_as_dynamic_type(argument, option):
# type: (AtFormal, FunctionOption) -> AtFormal
argument['dynamic_type'] = argument['type']
return argument
result = pos_args + kwd_args
result = [add_type_as_dynamic_type(argument, option) for argument in result]
# ensure we get reference-type formals when appropriate
def native_translate_formals(argument, option):
# type: (AtFormal, FunctionOption) -> AtFormal
def translate_map(const):
# type: (bool) -> Dict[str, str]
return {
'Tensor': 'const Tensor &' if const else 'Tensor &',
'BoolTensor': 'const Tensor &' if const else 'Tensor &',
'IndexTensor': 'const Tensor &' if const else 'Tensor &',
'Type': 'const Type &' if const else 'Type &',
}
if (option['inplace'] and argument['name'] == 'self') or argument.get('output', False):
argument['type'] = translate_map(False).get(argument['type'], argument['type'])
else:
argument['type'] = translate_map(True).get(argument['type'], argument['type'])
return argument
result = [native_translate_formals(argument, option) for argument in result]
return result
# this can return multiple return types in a list, e.g. ['Tensor', 'Tensor']
def native_get_return_types(option):
# type: (FunctionOption) -> List[ReturnType]
ret = option['return']
return_types = [] # List[ReturnType]
for t_raw in ret:
if isinstance(t_raw, string_type):
t = t_raw
name = None
else:
t = t_raw['type']
name = t_raw['name']
# can't actually return a TensorList (since it's a reference object)
actual_return_type = {'TensorList': 'std::vector<Tensor>'}.get(t, t)
if actual_return_type == 'Tensor' and (option['inplace'] or option['api_name'].endswith('_out')):
# follow normal ATen convention of returning Tensor & for inplace functions.
actual_return_type = 'Tensor &'
rtype = {
'type': actual_return_type,
'dynamic_type': t,
} # type: ReturnType
if name is not None:
rtype['name'] = name
return_types.append(rtype)
return return_types
def process_native(option, output_options):
# type: (FunctionOption, List[OutputDeclaration]) -> None
option['inplace'] = re.search(
'(^__i|[^_]_$)', option['api_name']) is not None
formals = native_get_formals(option)
option['formals_list'] = formals
option['formals'] = [format_formal(f) for f in formals]
option['formals_with_defaults'] = [formal_with_default(f) for f in formals]
option['returns'] = native_get_return_types(option)
option['return_type'] = format_return_type(option['returns'])
option['return_call'] = 'return ' if option['return_type'] != 'void' else ''
option['actuals'] = [f['name'] for f in formals]
option['method_formals'] = [format_formal(f) for f in formals
if f['name'] != 'self']
option['method_formals_with_defaults'] = (
[formal_with_default(f) for f in formals if f['name'] != 'self'])
option['method_actuals'] = [
f['name'] if f['name'] != 'self' else '*this' for f in formals]
def find_dispatch_type(formals):
for formal in formals:
if 'Type' == formal['dynamic_type']:
return formal
return None
dispatch_tensor = find_dispatch_tensor(formals)
dispatch_type = None if dispatch_tensor else find_dispatch_type(formals)
if dispatch_type:
dispatch_type['is_type_dispatched'] = True
option['type_method_formals'] = [format_formal(f) for f in formals if f != dispatch_type]
option['type_method_formals_with_defaults'] = [formal_with_default(f) for f in formals if f != dispatch_type]
option['type_method_actuals'] = [f['name'] for f in formals if f != dispatch_type]
option['native_actuals'] = [f['name'] if f != dispatch_type else '*this' for f in formals]
option['const_mark'] = '' if option['inplace'] else ' const'
is_method = 'method' in option['variants']
is_function = 'function' in option['variants']
is_namespace_function = is_function and (dispatch_tensor or dispatch_type)
option['method_prefix_derived'] = ''
env = nested_dict(option, top_env)
broadcast_arg = get_broadcast_argument(option)
if broadcast_arg is not None:
raise Exception("broadcasting is not yet supported for native functions, "
"but specified for function {}", option['name'])
top_env['type_method_declarations'].append(
TYPE_METHOD_DECLARATION_CONCRETE.substitute(env))
dispatch = option['type_method_definition_dispatch']
option['native_type_method_dispatch'] = dispatch
# Note [Abstract ATen methods]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# An abstract ATen method is one whose dispatch differs between
# types. These are implemented in derived types (with a
# standard (throwing) definition in Type). A concrete ATen
# method is one which has the same dispatch for all types;
# we just implement it in the base Type. This is exposed
# in Declarations.yaml via a field named 'abstract'.
if isinstance(dispatch, dict):
abstract = True
top_env['type_method_definitions'].append(
TYPE_METHOD_DEFINITION_ABSTRACT.substitute(env))
else:
abstract = False
body = TYPE_DEFINITION_BODY_NATIVE.substitute(env)
top_env['type_method_definitions'].append(
TYPE_METHOD_DEFINITION_CONCRETE.substitute(
env, type_definition_body=body))
# generate the at::native function declarations (i.e. what the user will implement)
if isinstance(dispatch, dict):
generated_native_functions = [] # type: List[str]
for key in sorted(dispatch.keys()):
value = dispatch[key]
if value not in generated_native_functions:
option['native_type_method_dispatch'] = value
top_env['native_function_declarations'].append(
NATIVE_DECLARATION.substitute(env))
generated_native_functions.append(value)
else:
top_env['native_function_declarations'].append(
NATIVE_DECLARATION.substitute(env))
method_of = ['Type']
if is_method:
top_env['tensor_method_declarations'].append(
TENSOR_METHOD_DECLARATION.substitute(env))
top_env['tensor_method_definitions'].append(
TENSOR_METHOD_DEFINITION.substitute(env))
method_of.append('Tensor')
if is_namespace_function:
if dispatch_type:
option['inferred_type'] = dispatch_type['name']
else:
option['inferred_type'] = 'infer_type({})'.format(dispatch_tensor)
top_env['function_declarations'].append(
FUNCTION_DECLARATION.substitute(env))
top_env['function_definitions'].append(
FUNCTION_DEFINITION.substitute(env))
method_of.append('namespace')
output_options.append(OutputDeclaration(
name=option['api_name'],
method_prefix_derived=option['method_prefix_derived'],
arguments=formals,
method_of=method_of,
mode=option['mode'],
buffers=None,
returns=option['returns'],
inplace=option['inplace'],
# See Note [Abstract ATen methods]
abstract=abstract,
))
output_declarations = [] # type: List[OutputDeclaration]
for declaration in declarations:
output_options = [] # type: List[OutputDeclaration]
for option in declaration['options']:
try:
if option['mode'] != 'native':
process_option(option, output_options)
else:
process_native(option, output_options)
except NYIError:
option['skip'] = True
output_declarations.extend(output_options)
return output_declarations
def create_derived(backend_type_env, declarations):
# type: (Environment, List[FunctionOption]) -> Tuple[List[str], List[str]]
type_object_declarations = []
type_object_definitions = []
is_cuda = 'CUDA' in backend_type_env['Backend']
real_is_half = backend_type_env['ScalarName'] == 'Half'
def replace_with_null(argument):
# type: (THFormal) -> bool
return (argument['type'] == 'THGenerator*' and
backend_type_env['Backend'] == 'CUDA')
def requires_checked_cast(argument):
# type: (THFormal) -> bool
if argument['type'] == 'IntList':
return 'size' in argument
return argument['type'] in CHECKED_CAST
def nullable_argument(argument):
# type: (THFormal) -> bool
return argument.get('is_nullable', False)
def bool_option_is_string(argument):
# type: (THFormal) -> bool
return 'if_true' in argument and isinstance(argument['if_true'], string_type)
def get_argument(argument, option):
# type: (THFormal, FunctionOption) -> str
if replace_with_null(argument):
return 'NULL'
elif requires_checked_cast(argument):
checked_use = CHECKED_USE.get(
argument['type'], '{}_').format(argument['name'])
if real_is_half and argument['type'] == 'real':
checked_use = HALF_CONVERSION.substitute(value=checked_use)
if nullable_argument(argument):
checked_use = CHECKED_USE_NULLABLE.substitute(
env={}, arg_name=argument['name'], usage=checked_use)
return checked_use
elif argument['type'] == 'bool' and 'if_true' in argument:
if bool_option_is_string(argument):
tpl = '({}) ? "{}" : "{}"'
else:
tpl = '({}) ? {} : {}'
return tpl.format(argument['name'],
argument['if_true'], argument['if_false'])
elif argument['type'] == 'CONSTANT':
# this is a bool that is actually a string...
if bool_option_is_string(argument):
return '"{}"'.format(argument['name'])
v = str(argument.get('default', argument['name']))
for pattern, replacement in CONSTANT_REPLACEMENTS:
v = re.sub(pattern, replacement, v)
return CodeTemplate(v).substitute(backend_type_env)
# e.g. argument 0, i.e. repeat the 0th argument in this position...
elif argument['type'] == 'argument':
index = int(argument['name'])
return get_argument(option['arguments'][index], option)
else:
return argument['name']
def drop_argument(argument, option):
# type: (THFormal, FunctionOption) -> bool
return 'CUDA' in backend_type_env['Backend'] and (
option['mode'] == 'TH' and argument['type'] == 'THGenerator*')
def get_arguments(arguments, option):
# type: (List[THFormal], FunctionOption) -> List[str]
return [get_argument(argument, option)
for argument in arguments if not drop_argument(argument, option)]
def is_actual_return_long(ret):
# type: (ReturnDecl) -> bool
if ret['type'] == 'long':
return True
if ret['type'] == 'real':
return backend_type_env['ScalarName'] == 'Long'
if ret['type'] == 'accreal':
return backend_type_env['AccScalarName'] == 'Long'
return False
def handle_zero_dim(env, option):
# type: (Environment, FunctionOption) -> List[str]
zero_dim_dispatch = option.get('zero_dim_dispatch_when_scalar', '')
if not zero_dim_dispatch:
return []
broadcasts_arg = zero_dim_dispatch in option.get('broadcast_actuals', '')
zero_dim_only = option.get('zero_dim_tensor_only', False)
# this combination doesn't seem to make sense
assert not (broadcasts_arg and zero_dim_only)
# if the argument broadcasts, then this would only affect cases where all broadcasted
# tensors were zero-dim, which is inconsistent with the scalar handling.
if broadcasts_arg:
return []
zero_dim_actuals = [arg['name']
if arg['name'] != zero_dim_dispatch else "Scalar({})".format(arg['name'])
for arg in option['formals_list']]
return [ZERO_DIM_CHECK.substitute(env, check_name=zero_dim_dispatch, zero_dim_actuals=zero_dim_actuals)]
def handle_only_zero_dim(env, option):
# type: (Environment, FunctionOption) -> List[str]
if option.get('zero_dim_tensor_only', False):
check_name = option['zero_dim_dispatch_when_scalar']
return [ZERO_DIM_ONLY.substitute(env, check_name=check_name)]
else:
return None
def handle_sparse(env, option):
# type: (Environment, FunctionOption) -> List[str]
if 'when_sparse_dispatch' not in option or 'Sparse' in backend_type_env['Backend']:
return []
check_name = option['when_sparse_dispatch']
sparse_actuals = [arg['name']
if arg['name'] != check_name else "SparseTensor({})".format(arg['name'])
for arg in option['formals_list']]
return [SPARSE_CHECK.substitute(env, check_name=check_name, sparse_actuals=sparse_actuals)]
def allocate_arg(env, arg, output_count):
# type: (Environment, THFormal, int) -> List[str]
name = arg['name']
allocation = CodeTemplate(ALLOC_WRAP[arg['type']]).substitute(env, arguments=[])
tensor_arg = '{}_'.format(name)
if arg.get('mask', False):
allocation = 'output_mask[{}] ? {} : nullptr'.format(output_count, allocation)
tensor_arg = ('{}_ == nullptr ? (TensorImpl*)UndefinedTensor::singleton() : (TensorImpl*){}_'
.format(name, name))
return [
'auto {}_ = {};'.format(name, allocation),
'auto {} = Tensor({}, false);'.format(name, tensor_arg),
]
def resize_arg(arg):
# type: (THFormal) -> str
resize = arg['resize']
if isinstance(resize, str):
return "{}.resize_({}.sizes());".format(arg['name'], resize)
else:
resize_scalar = arg.get('resize_scalar', False)
if resize_scalar:
dims = ['{}.dim() == 0 ? 1 : {}.size({})'.format(name, name, dim) for name, dim in resize]
else:
dims = ['{}.size({})'.format(name, dim) for name, dim in resize]
return "{}.resize_({{ {} }});".format(arg['name'], ','.join(dims))
def handle_call(env, option, cimpl):
# type: (Environment, FunctionOption, FunctionOption) -> str
is_nn = option['mode'] == 'NN'
actuals = get_arguments(cimpl['arguments'], option)
if is_cuda or is_nn:
actuals = ['context->thc_state'] + actuals
cname = cimpl['cname']
if option.get('sparse', False):
if is_cuda:
cname = 'THCS' + env['ScalarName'] + "Tensor_" + cname
else:
cname = env['THTensor'].replace('TH', 'THS') + '_' + cname
elif is_nn:
cname = 'THNN_{}'.format(env['THType']) + cname
else:
cname = env['THTensor'] + '_' + cname
call = CALL_TEMPLATE.substitute(actuals=actuals, cname=cname)
if cimpl.get('condition') is not None:
call = 'if ({}) {}'.format(cimpl['condition'], call)
return call
def emit_body(env, option):
# type: (Environment, FunctionOption) -> List[str]
body = [] # type: List[str]
body += handle_sparse(env, option)
body += handle_zero_dim(env, option)
only_zero_dim_check = handle_only_zero_dim(env, option)
if only_zero_dim_check is not None:
# code below only_zero_dim_check is unreachable so we do not need to generate the rest.
body += only_zero_dim_check
return body
# arguments are potentially duplicated because of one argument
# referencing another
seen_names = set() # type: Set[str]
seen_tensorlists = set() # type: Set[str]
count = 0
output_count = 0
# scalar_check is the heuristic conditions when a result may be a scalar_check
# if there is a THSize* argument, then its dimensions are used to determine scalar.
# otherwise, it is true if all the input tensors are scalars,
scalar_check_is_from_size = False
scalar_check_is_from_option = False
scalar_check = None
scalar_check_opt = option.get('scalar_check')
if scalar_check_opt is not None:
if isinstance(scalar_check_opt, bool):
scalar_check = str(scalar_check_opt).lower()
else:
scalar_check = scalar_check_opt
scalar_check_is_from_option = True
for arg in option['arguments']:
if is_real_argument_to_wrapper(arg):
count += 1
if arg['type'] == 'THSize*' and not scalar_check_is_from_option:
scalar_check_is_from_size = True
scalar_check = '{}.size() == 0'.format(arg['name'])
if arg['type'] == 'TensorList':
seen_tensorlists.add(arg['name'])
wrap_dim_target = arg.get('wrap_dim', None)
if wrap_dim_target is not None:
# for Tensors, "name_" is the TensorImpl, but for TensorLists, it is an
# std::vector of TH*s. Since TH*s have different dimension rules, we used
# "name" instead, but keep "name_" for tensor to avoid an extra function call.
if wrap_dim_target not in seen_tensorlists:
wrap_dim_target = wrap_dim_target + "_"
body.append("{} = maybe_wrap_dim({}, {});"
.format(arg['name'], arg['name'], wrap_dim_target))
# only generated checked casts the first time we see it
if arg['name'] not in seen_names and requires_checked_cast(arg):
seen_names.add(arg['name'])
# make a new allocation of TensorImpl, then wrap a Tensor around it.
if arg.get('allocate', False):
body += allocate_arg(env, arg, output_count)
output_count += 1
# extract the TensorImpl from an existing tensor (or Storage, etc.)
else:
# special case where we allow undefined Tensors, and thus
# the checked cast succeeds even if the Tensor is not
# defined
null_okay = 'true' if nullable_argument(arg) else 'false'
default_init = []
if 'default_init' in arg:
default_init.append(arg['default_init'])
noelem_to_empty = 'is_noelem_tensor_size(size)' if 'size' in seen_names else 'false'
check_cast = CHECKED_CAST[arg['type']].substitute(
env, arg_name=arg['name'], arg_pos=count,
null_okay=null_okay, default_init=default_init,
size=arg.get('size'),
noelem_to_empty=noelem_to_empty)
body.append("auto {}_ = {};".format(
arg['name'], check_cast))
if drop_argument(arg, option) or replace_with_null(arg):
body.append(
"(void) {}_; //silence unused warning".format(arg['name']))
initializers = []
# resize tensors for special ops that require it
if 'resize' in arg:
initializers.append(resize_arg(arg))
# also special handling where we zero some outputs.
if arg.get('zero', False) or (arg.get('cpu_zero', False) and not is_cuda):
initializers.append("{}.zero_();".format(arg['name']))
# only initialize non-null arguments
if nullable_argument(arg) and len(initializers) > 0:
body.append(CONDITIONAL_INITIALIZER.substitute({
'name': arg['name'],
'initializer': initializers
}))
else:
body += initializers
# for out-of-place: isScalar() for all input tensors is and'd to form
# the test for whether the output is also a scalar
# for in-place: isScalar() shouldn't change as a result of the operation
if (not arg.get('output') and 'Tensor' in arg['type'] and
'TensorList' not in arg['type'] and
'THS' not in arg['type'] and
not scalar_check_is_from_size and
not scalar_check_is_from_option and
not option['inplace']):
check = '{}->isScalar()'.format(arg['name'] + '_')
if nullable_argument(arg):
check = '(!{} || {})'.format(arg['name'] + '_', check)
scalar_check = (check if scalar_check is None
else scalar_check + ' && ' + check)
# cimpls, if it exists, contains the underlying C function names and
# arguments. Otherwise use option
cimpls = option.get('cimpls', [option])
calls = [handle_call(env, option, cimpl) for cimpl in cimpls]
ret = option['return']
if ret['kind'] == 'arguments':
if 'aten_custom_call' in option:
# all aten_custom_call bodies handle settings on their own.
scalar_check = None
body.append(CodeTemplate(
option['aten_custom_call']).substitute(env))
else:
body.extend([call + ';' for call in calls])
arguments_indices = ret['arguments']
arguments = [option['arguments'][argi]
for argi in arguments_indices]
if scalar_check is not None:
if not isinstance(scalar_check, dict):
if len(arguments) > 1:
body.append("bool maybe_scalar = {};".format(scalar_check))
scalar_check = 'maybe_scalar'
for arg in arguments:
scalar_check_arg = (scalar_check if not isinstance(scalar_check, dict)
else scalar_check.get(arg['name'])) # type: ignore
if scalar_check_arg is not None:
stmt = "{}_->maybeScalar({});".format(arg['name'], scalar_check_arg)
if nullable_argument(arg):
stmt = "if ({}_) {}".format(arg['name'], stmt)
body.append(stmt)
if len(arguments_indices) == 1:
arg = arguments[0]
body.append("return {};".format(arg['name']))
else:
types = [to_return_type(arg, option)['type']
for arg in arguments]
# TODO: check for move semantics...
names = [arg['name'] for arg in arguments]
body.append(CodeTemplate("return std::tuple<${types}>(${names});").substitute(
types=types, names=names))
elif ret['kind'] == 'type':
assert len(calls) == 1
call = calls[0]
if 'aten_custom_call' in option:
# all aten_custom_call bodies handle settings on their own.
scalar_check = None
body.append(CodeTemplate(
option['aten_custom_call']).substitute(env))
if ret['type'] in ALLOC_WRAP.keys():
maybe_scalar = "->maybeScalar({})".format(scalar_check) \
if scalar_check is not None \
else ""
wrapped_tensor = CodeTemplate(ALLOC_WRAP[ret['type']]).substitute(
env, arguments=[call])
return_tensor = "return Tensor((${wrapped_tensor})${maybe_scalar},false);"
body.append(CodeTemplate(return_tensor).substitute(
env, wrapped_tensor=wrapped_tensor, maybe_scalar=maybe_scalar))
# return the same underlying Tensor type for both real and accreal; this ensures
# e.g. x.sum(0) and x.sum() return the same type. We explicitly cast to the
# ScalarType before constructing the scalarTensor to avoid overflow checking.
elif ret['type'] == 'accreal' or ret['type'] == 'real':
return_scalar = 'return scalarTensor(convert<${ScalarType}>(${call}));'
body.append(CodeTemplate(return_scalar).substitute(env, call=call))
else:
# we using int64_t for long in the API, so correct it here...
if is_actual_return_long(ret):
call = "static_cast<int64_t>({})".format(call)
body.append("return {};".format(call))
else:
raise Exception("NYI - return handling")
return body
def process_option(option):
# type: (FunctionOption) -> None
pair = (backend_type_env['Backend'],
backend_type_env['ScalarName'])
if pair in option['backend_type_pairs']:
env = nested_dict(option, backend_type_env)
body = emit_body(env, option) # type: ignore
option['type_definition_body'] = body
type_object_declarations.append(
TYPE_DERIVED_DECLARATION.substitute(env))
type_object_definitions.append(
TYPE_DERIVED_DEFINITION.substitute(env))
def process_native(option):
# type: (FunctionOption) -> None
dispatch = option['type_method_definition_dispatch']
env = nested_dict(option, backend_type_env)
if isinstance(dispatch, dict):
pair = (backend_type_env['Backend'],
backend_type_env['ScalarName'])
if pair in option['backend_type_pairs']:
native_dispatch = dispatch.get(pair[0])
if native_dispatch is None:
raise Exception('could not find backend {} in native function dispatch specification {}'
.format(pair[0], dispatch))
option['native_type_method_dispatch'] = native_dispatch
type_object_declarations.append(
TYPE_DERIVED_DECLARATION.substitute(env))
type_object_definitions.append(
TYPE_DERIVED_DEFINITION_NATIVE.substitute(env))
for declaration in declarations:
for option in declaration['options']:
if not option.get('skip', False):
try:
if option['mode'] == 'NN' and option.get('cimpls') is None:
continue
if option['mode'] != 'native':
process_option(option)
else:
process_native(option)
except NYIError:
pass
return type_object_declarations, type_object_definitions
|
import yaml
# follows similar logic to cwrap, ignores !inc, and just looks for [[]]
def parse(filename):
with open(filename, 'r') as file:
declaration_lines = []
declarations = []
in_declaration = False
for line in file.readlines():
line = line.rstrip()
if line == '[[':
declaration_lines = []
in_declaration = True
elif line == ']]':
in_declaration = False
declaration = yaml.load('\n'.join(declaration_lines))
declarations.append(declaration)
elif in_declaration:
declaration_lines.append(line)
return declarations
|
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-o', '--output', help='where to write the result file.',
action='store', default='.')
options, _ = parser.parse_args()
files = [
# '../../csrc/cudnn/cuDNN.cwrap',
'../../csrc/generic/TensorMethods.cwrap',
# '../../csrc/generic/methods/SparseTensor.cwrap',
'../../csrc/generic/methods/Tensor.cwrap',
'../../csrc/generic/methods/TensorApply.cwrap',
'../../csrc/generic/methods/TensorCompare.cwrap',
'../../csrc/generic/methods/TensorCuda.cwrap',
'../../csrc/generic/methods/TensorMath.cwrap',
'../../csrc/generic/methods/TensorRandom.cwrap',
# '../../csrc/generic/methods/TensorSerialization.cwrap',
]
declaration_lines = []
for filename in files:
with open(filename, 'r') as file:
in_declaration = False
for line in file.readlines():
line = line.rstrip()
if line == '[[':
in_declaration = True
declaration_lines.append(line)
elif line == ']]':
in_declaration = False
declaration_lines.append(line)
elif in_declaration:
declaration_lines.append(line)
with open(options.output, 'w') as output:
output.write('\n'.join(declaration_lines) + '\n')
|
import re
import yaml
try:
# use faster C loader if available
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
def parse_default(s):
if s.lower() == 'true':
return True
elif s.lower() == 'false':
return False
elif s == 'nullptr':
return s
elif s == '{}':
return '{}'
elif s == 'nullopt':
return s
try:
return int(s)
except Exception:
return float(s)
def sanitize_types(typ):
# split tuples into constituent list
if typ[0] == '(' and typ[-1] == ')':
return [x.strip() for x in typ[1:-1].split(',')]
elif typ == 'Generator*':
return ['Generator *']
return [typ]
def parse_arguments(args, func_decl, func_name, func_return):
arguments = []
python_default_inits = func_decl.get('python_default_init', {})
is_out_fn = func_name.endswith('_out')
kwarg_only = False
# TODO: Use a real parser here; this will get bamboozled
# by signatures that contain things like std::array<bool, 2> (note the space)
for arg_idx, arg in enumerate(args.split(', ')):
type_and_name = [a.strip() for a in arg.rsplit(' ', 1)]
if type_and_name == ['*']:
assert not kwarg_only
kwarg_only = True
continue
t, name = type_and_name
default = None
python_default_init = None
if '=' in name:
ns = name.split('=', 1)
name, default = ns[0], parse_default(ns[1])
if name in python_default_inits:
assert default is None
python_default_init = python_default_inits[name]
typ = sanitize_types(t)
assert len(typ) == 1
argument_dict = {'type': typ[0].rstrip('?'), 'name': name, 'is_nullable': typ[0].endswith('?')}
match = re.match(r'IntList\[(\d+)\]', argument_dict['type'])
if match:
argument_dict['type'] = 'IntList'
argument_dict['size'] = int(match.group(1))
if default is not None:
argument_dict['default'] = default
if python_default_init is not None:
argument_dict['python_default_init'] = python_default_init
# TODO: convention is that the ith-argument correspond to the i-th return, but it would
# be better if we just named everything and matched by name.
if is_out_fn and arg_idx < len(func_return):
argument_dict['output'] = True
if kwarg_only:
argument_dict['kwarg_only'] = True
arguments.append(argument_dict)
return arguments
def has_sparse_dispatches(dispatches):
for dispatch in dispatches:
if 'Sparse' in dispatch:
return True
return False
def parse_native_yaml(path):
with open(path, 'r') as f:
return yaml.load(f, Loader=Loader)
def run(paths):
declarations = []
for path in paths:
for func in parse_native_yaml(path):
declaration = {'mode': 'native'}
if '->' in func['func']:
func_decl, return_type = [x.strip() for x in func['func'].split('->')]
return_type = sanitize_types(return_type)
else:
func_decl = func['func']
return_type = None
fn_name, arguments = func_decl.split('(')
arguments = arguments.split(')')[0]
declaration['name'] = func.get('name', fn_name)
declaration['return'] = list(func.get('return', return_type))
declaration['variants'] = func.get('variants', ['method', 'function'])
declaration['arguments'] = func.get('arguments', parse_arguments(arguments, func,
declaration['name'], declaration['return']))
declaration['type_method_definition_dispatch'] = func.get('dispatch', declaration['name'])
declaration['aten_sparse'] = has_sparse_dispatches(
declaration['type_method_definition_dispatch'])
declarations.append(declaration)
return declarations
|
from code_template import CodeTemplate
from function_wrapper import nested_dict
FILE = CodeTemplate("""\
#include "ATen/Config.h"
#include "TH/TH.h"
#if AT_CUDA_ENABLED()
#undef THNN_
#include "THC/THC.h"
#endif
#include "ATen/Utils.h"
${copy_includes}
namespace at {
${copy_functions}
}
""")
COPY = CodeTemplate("""\
${THTensor}_copy${cuda}${src_scalar_name}(${state,}self_->tensor, static_cast<${src_tensor}*>(src.pImpl)->tensor);
""")
COPY_ASYNC_CPU = CodeTemplate("""\
if (non_blocking) {
${THTensor}_copyAsyncCPU(${state,}self_->tensor, static_cast<${src_tensor}*>(src.pImpl)->tensor);
break;
}
""")
COPY_ASYNC_CUDA = CodeTemplate("""\
if (non_blocking) {
${THTensor}_copyAsyncCuda(${state,}self_->tensor, static_cast<${src_tensor}*>(src.pImpl)->tensor);
break;
}
""")
CASE = CodeTemplate("""\
case ${src_id}:
${copies}
break;
""")
FUNCTION = CodeTemplate("""\
Tensor & ${Type}::s_copy_(Tensor & self, const Tensor & src, bool non_blocking) const {
// code generated by function_wrapper
auto self_ = checked_cast_tensor<${Tensor}>(self.pImpl, "self", 0,false);
(void) self_; //silence unused warning
switch (src.type().ID()) {
${copy_body}
default:
AT_ERROR("copy does not support %s to %s copy.", src.type().toString(), toString());
break;
}
self.pImpl->setScalar(src.pImpl->isScalar());
return self;
}
""")
def create_one(env, all_types):
copy_body = []
for src_type in all_types:
if env['Density'] == 'Sparse' or src_type['Density'] == 'Sparse':
# skip sparse copies, which are not yet implemented
continue
state = []
cuda = ''
if src_type['Backend'] == 'CUDA':
cuda = 'Cuda'
if env['Backend'] == 'CUDA' or src_type['Backend'] == 'CUDA':
state.append('context->thc_state')
combined = nested_dict({
'src_scalar_name': src_type['ScalarName'],
'src_id': src_type['TypeID'],
'src_tensor': src_type['Tensor'],
'cuda': cuda,
'state': state,
}, env)
copies = []
if env['ScalarType'] == src_type['ScalarType']:
if env['Backend'] == 'CUDA' and src_type['Backend'] == 'CPU':
copies.append(COPY_ASYNC_CPU.substitute(combined))
if env['Backend'] == 'CPU' and src_type['Backend'] == 'CUDA':
copies.append(COPY_ASYNC_CUDA.substitute(combined))
copies.append(COPY.substitute(combined))
copy_body.append(CASE.substitute(combined, copies=copies))
return FUNCTION.substitute(env, copy_body=copy_body)
def create(all_types):
top_env = {
'copy_includes': [],
'copy_functions': [],
}
for dst_type in all_types:
top_env['copy_includes'].append(
'#include "ATen/{}.h"'.format(dst_type['Type']))
top_env['copy_includes'].append(
'#include "ATen/{}.h"'.format(dst_type['Tensor']))
top_env['copy_functions'].append(create_one(dst_type, all_types))
return FILE.substitute(top_env)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.