python_code
stringlengths 0
258k
|
---|
"""ImageNet-v2 tf.data input pipeline.
Uses TFDS https://www.tensorflow.org/datasets/catalog/imagenet_v2.
"""
import functools
from typing import Dict, Iterator, Tuple
import tensorflow_datasets as tfds
from algorithmic_efficiency import data_utils
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax import \
input_pipeline
def get_imagenet_v2_iter(data_dir: str,
global_batch_size: int,
mean_rgb: Tuple[float, float, float],
stddev_rgb: Tuple[float, float, float],
image_size: int,
resize_size: int) -> Iterator[Dict[str, spec.Tensor]]:
"""Always caches and repeats indefinitely."""
ds = tfds.load(
'imagenet_v2/matched-frequency:3.0.0',
split='test',
data_dir=data_dir,
decoders={
'image': tfds.decode.SkipDecoding(),
})
def _decode_example(example: Dict[str, float]) -> Dict[str, float]:
image = input_pipeline.preprocess_for_eval(example['image'],
mean_rgb,
stddev_rgb,
image_size,
resize_size)
return {'inputs': image, 'targets': example['label']}
ds = ds.map(_decode_example, num_parallel_calls=16)
ds = ds.batch(global_batch_size)
shard_pad_fn = functools.partial(
data_utils.shard_and_maybe_pad_np, global_batch_size=global_batch_size)
it = map(shard_pad_fn, iter(ds))
return it
|
"""PyTorch implementation of ResNet.
Adapted from torchvision:
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py.
"""
import collections
from typing import Any, Callable, List, Optional, Type, Union
import torch
from torch import nn
from torch import Tensor
from algorithmic_efficiency import spec
from algorithmic_efficiency.init_utils import pytorch_default_init
def conv3x3(in_planes: int,
out_planes: int,
stride: int = 1,
groups: int = 1,
dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding."""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution."""
return nn.Conv2d(
in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
"""ResNet block."""
expansion: int = 1
def __init__(self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample
# the input when stride != 1.
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: spec.Tensor) -> spec.Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""Bottleneck ResNet block."""
expansion: int = 4
def __init__(self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample
# the input when stride != 1.
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = True,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# Each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead.
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
'replace_stride_with_dilation should be None '
f'or a 3-element tuple, got {replace_stride_with_dilation}')
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(
block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(
block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
pytorch_default_init(m)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.fc.weight, std=1e-2)
nn.init.constant_(self.fc.bias, 0.)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros,
# and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to
# https://arxiv.org/abs/1706.02677.
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self,
block: Type[Union[BasicBlock, Bottleneck]],
planes: int,
blocks: int,
stride: int = 1,
dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = torch.nn.Sequential(
collections.OrderedDict([
("conv", conv1x1(self.inplanes, planes * block.expansion,
stride)),
("bn", norm_layer(planes * block.expansion)),
]))
layers = []
layers.append(
block(self.inplanes,
planes,
stride,
downsample,
self.groups,
self.base_width,
previous_dilation,
norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x: spec.Tensor) -> spec.Tensor:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def resnet18(**kwargs: Any) -> ResNet:
return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
def resnet50(**kwargs: Any) -> ResNet:
return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
|
"""ImageNet workload implemented in PyTorch."""
import contextlib
import functools
import itertools
import math
import os
import random
from typing import Dict, Iterator, Optional, Tuple
import numpy as np
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
from torchvision import transforms
from torchvision.datasets.folder import ImageFolder
from algorithmic_efficiency import data_utils
from algorithmic_efficiency import param_utils
from algorithmic_efficiency import pytorch_utils
from algorithmic_efficiency import spec
import algorithmic_efficiency.random_utils as prng
from algorithmic_efficiency.workloads.imagenet_resnet import imagenet_v2
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch import \
randaugment
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.models import \
resnet50
from algorithmic_efficiency.workloads.imagenet_resnet.workload import \
BaseImagenetResNetWorkload
USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_utils.pytorch_setup()
def imagenet_v2_to_torch(
batch: Dict[str, spec.Tensor]) -> Dict[str, spec.Tensor]:
# Slice off the part of the batch for this device and then transpose from
# [N, H, W, C] to [N, C, H, W]. Only transfer the inputs to GPU.
new_batch = {}
for k, v in batch.items():
if USE_PYTORCH_DDP:
new_v = v[RANK]
else:
new_v = v.reshape(-1, *v.shape[2:])
if k == 'inputs':
new_v = np.transpose(new_v, (0, 3, 1, 2))
dtype = torch.long if k == 'targets' else torch.float
new_batch[k] = torch.as_tensor(new_v, dtype=dtype, device=DEVICE)
return new_batch
class ImagenetResNetWorkload(BaseImagenetResNetWorkload):
def _build_dataset(
self,
data_rng: spec.RandomState,
split: str,
data_dir: str,
global_batch_size: int,
cache: Optional[bool] = None,
repeat_final_dataset: Optional[bool] = None,
use_mixup: bool = False,
use_randaug: bool = False) -> Iterator[Dict[str, spec.Tensor]]:
del cache
del repeat_final_dataset
if split == 'test':
np_iter = imagenet_v2.get_imagenet_v2_iter(
data_dir,
global_batch_size,
mean_rgb=self.train_mean,
stddev_rgb=self.train_stddev,
image_size=self.center_crop_size,
resize_size=self.resize_size)
return map(imagenet_v2_to_torch, itertools.cycle(np_iter))
is_train = split == 'train'
normalize = transforms.Normalize(
mean=[i / 255. for i in self.train_mean],
std=[i / 255. for i in self.train_stddev])
if is_train:
transform_config = [
transforms.RandomResizedCrop(
self.center_crop_size,
scale=self.scale_ratio_range,
ratio=self.aspect_ratio_range),
transforms.RandomHorizontalFlip(),
]
if use_randaug:
transform_config.append(randaugment.RandAugment())
transform_config.extend([transforms.ToTensor(), normalize])
transform_config = transforms.Compose(transform_config)
else:
transform_config = transforms.Compose([
transforms.Resize(self.resize_size),
transforms.CenterCrop(self.center_crop_size),
transforms.ToTensor(),
normalize,
])
folder = 'train' if 'train' in split else 'val'
dataset = ImageFolder(
os.path.join(data_dir, folder), transform=transform_config)
if split == 'eval_train':
indices = list(range(self.num_train_examples))
random.Random(data_rng[0]).shuffle(indices)
dataset = torch.utils.data.Subset(dataset,
indices[:self.num_eval_train_examples])
sampler = None
if USE_PYTORCH_DDP:
per_device_batch_size = global_batch_size // N_GPUS
ds_iter_batch_size = per_device_batch_size
else:
ds_iter_batch_size = global_batch_size
if USE_PYTORCH_DDP:
if is_train:
sampler = torch.utils.data.distributed.DistributedSampler(
dataset, num_replicas=N_GPUS, rank=RANK, shuffle=True)
else:
sampler = data_utils.DistributedEvalSampler(
dataset, num_replicas=N_GPUS, rank=RANK, shuffle=False)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=ds_iter_batch_size,
shuffle=not USE_PYTORCH_DDP and is_train,
sampler=sampler,
num_workers=4 if is_train else 0,
pin_memory=True,
drop_last=is_train,
persistent_workers=is_train)
dataloader = data_utils.PrefetchedWrapper(dataloader, DEVICE)
dataloader = data_utils.cycle(
dataloader,
custom_sampler=USE_PYTORCH_DDP,
use_mixup=use_mixup,
mixup_alpha=0.2)
return dataloader
def init_model_fn(
self,
rng: spec.RandomState,
dropout_rate: Optional[float] = None,
aux_dropout_rate: Optional[float] = None) -> spec.ModelInitState:
"""Dropout is unused."""
del dropout_rate
del aux_dropout_rate
torch.random.manual_seed(rng[0])
model = resnet50()
self._param_shapes = param_utils.pytorch_param_shapes(model)
self._param_types = param_utils.pytorch_param_types(self._param_shapes)
model.to(DEVICE)
if N_GPUS > 1:
if USE_PYTORCH_DDP:
model = DDP(model, device_ids=[RANK], output_device=RANK)
else:
model = torch.nn.DataParallel(model)
return model, None
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
return param_key in ['fc.weight', 'fc.bias']
def model_fn(
self,
params: spec.ParameterContainer,
augmented_and_preprocessed_input_batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
mode: spec.ForwardPassMode,
rng: spec.RandomState,
update_batch_norm: bool) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]:
del model_state
del rng
model = params
if mode == spec.ForwardPassMode.EVAL:
if update_batch_norm:
raise ValueError(
'Batch norm statistics cannot be updated during evaluation.')
model.eval()
if mode == spec.ForwardPassMode.TRAIN:
model.train()
model.apply(
functools.partial(
pytorch_utils.update_batch_norm_fn,
update_batch_norm=update_batch_norm))
contexts = {
spec.ForwardPassMode.EVAL: torch.no_grad,
spec.ForwardPassMode.TRAIN: contextlib.nullcontext,
}
with contexts[mode]():
logits_batch = model(augmented_and_preprocessed_input_batch['inputs'])
return logits_batch, None
# Does NOT apply regularization, which is left to the submitter to do in
# `update_params`.
def loss_fn(
self,
label_batch: spec.Tensor, # Dense or one-hot labels.
logits_batch: spec.Tensor,
mask_batch: Optional[spec.Tensor] = None,
label_smoothing: float = 0.0) -> Dict[str, spec.Tensor]: # differentiable
"""Evaluate the (masked) loss function at (label_batch, logits_batch).
Return {'summed': scalar summed loss, 'n_valid_examples': scalar number of
valid examples in batch, 'per_example': 1-d array of per-example losses}
(not synced across devices).
"""
per_example_losses = F.cross_entropy(
logits_batch,
label_batch,
reduction='none',
label_smoothing=label_smoothing)
# `mask_batch` is assumed to be shape [batch].
if mask_batch is not None:
per_example_losses *= mask_batch
n_valid_examples = mask_batch.sum()
else:
n_valid_examples = len(per_example_losses)
summed_loss = per_example_losses.sum()
return {
'summed': summed_loss,
'n_valid_examples': torch.as_tensor(n_valid_examples, device=DEVICE),
'per_example': per_example_losses,
}
def _compute_metrics(self,
logits: spec.Tensor,
labels: spec.Tensor,
weights: spec.Tensor) -> Dict[str, spec.Tensor]:
"""Return the mean accuracy and loss as a dict."""
if weights is None:
weights = torch.ones(len(logits), device=DEVICE)
predicted = torch.argmax(logits, 1)
# Not accuracy, but nr. of correct predictions.
accuracy = ((predicted == labels) * weights).sum()
summed_loss = self.loss_fn(labels, logits, weights)['summed']
return {'accuracy': accuracy, 'loss': summed_loss}
def _eval_model_on_split(self,
split: str,
num_examples: int,
global_batch_size: int,
params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState,
data_dir: str,
global_step: int = 0) -> Dict[str, float]:
"""Run a full evaluation of the model."""
del global_step
data_rng, model_rng = prng.split(rng, 2)
if split not in self._eval_iters:
is_test = split == 'test'
# These iterators repeat indefinitely.
self._eval_iters[split] = self._build_input_queue(
data_rng,
split=split,
global_batch_size=global_batch_size,
data_dir=data_dir,
cache=is_test,
repeat_final_dataset=is_test)
total_metrics = {
'accuracy': torch.tensor(0., device=DEVICE),
'loss': torch.tensor(0., device=DEVICE),
}
num_batches = int(math.ceil(num_examples / global_batch_size))
for _ in range(num_batches):
batch = next(self._eval_iters[split])
logits, _ = self.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.EVAL,
model_rng,
update_batch_norm=False)
weights = batch.get('weights')
batch_metrics = self._compute_metrics(logits, batch['targets'], weights)
total_metrics = {
k: v + batch_metrics[k] for k, v in total_metrics.items()
}
if USE_PYTORCH_DDP:
for metric in total_metrics.values():
dist.all_reduce(metric)
return {k: float(v.item() / num_examples) for k, v in total_metrics.items()}
|
"""PyTorch implementation of RandAugmentation.
Adapted from:
https://pytorch.org/vision/stable/_modules/torchvision/transforms/autoaugment.html.
"""
import math
from typing import Dict, List, Optional, Tuple
import numpy as np
import PIL
import torch
from torch import Tensor
from torchvision.transforms import functional as F
from torchvision.transforms import InterpolationMode
from algorithmic_efficiency import spec
def cutout(img: spec.Tensor, pad_size: int) -> spec.Tensor:
image_width, image_height = img.size
x0 = np.random.uniform(image_width)
y0 = np.random.uniform(image_height)
# Double the pad size to match Jax implementation.
pad_size = pad_size * 2
x0 = int(max(0, x0 - pad_size / 2.))
y0 = int(max(0, y0 - pad_size / 2.))
x1 = int(min(image_width, x0 + pad_size))
y1 = int(min(image_height, y0 + pad_size))
xy = (x0, y0, x1, y1)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, (128, 128, 128))
return img
def solarize(img: spec.Tensor, threshold: float) -> spec.Tensor:
img = np.array(img)
new_img = np.where(img < threshold, img, 255. - img)
return PIL.Image.fromarray(new_img.astype(np.uint8))
def solarize_add(img: spec.Tensor, addition: int = 0) -> spec.Tensor:
threshold = 128
img = np.array(img)
added_img = img.astype(np.int64) + addition
added_img = np.clip(added_img, 0, 255).astype(np.uint8)
new_img = np.where(img < threshold, added_img, img)
return PIL.Image.fromarray(new_img)
def _apply_op(img: spec.Tensor,
op_name: str,
magnitude: float,
interpolation: InterpolationMode,
fill: Optional[List[float]]) -> spec.Tensor:
if op_name == 'ShearX':
# Magnitude should be arctan(magnitude).
img = F.affine(
img,
angle=0.0,
translate=[0, 0],
scale=1.0,
shear=[math.degrees(math.atan(magnitude)), 0.0],
interpolation=interpolation,
fill=fill,
center=[0, 0],
)
elif op_name == 'ShearY':
# Magnitude should be arctan(magnitude).
img = F.affine(
img,
angle=0.0,
translate=[0, 0],
scale=1.0,
shear=[0.0, math.degrees(math.atan(magnitude))],
interpolation=interpolation,
fill=fill,
center=[0, 0],
)
elif op_name == 'TranslateX':
img = F.affine(
img,
angle=0.0,
translate=[int(magnitude), 0],
scale=1.0,
interpolation=interpolation,
shear=[0.0, 0.0],
fill=fill,
)
elif op_name == 'TranslateY':
img = F.affine(
img,
angle=0.0,
translate=[0, int(magnitude)],
scale=1.0,
interpolation=interpolation,
shear=[0.0, 0.0],
fill=fill,
)
elif op_name == 'Rotate':
img = F.rotate(img, magnitude, interpolation=interpolation, fill=fill)
elif op_name == 'Brightness':
img = F.adjust_brightness(img, magnitude)
elif op_name == 'Color':
img = F.adjust_saturation(img, magnitude)
elif op_name == 'Contrast':
img = F.adjust_contrast(img, magnitude)
elif op_name == 'Sharpness':
img = F.adjust_sharpness(img, magnitude)
elif op_name == 'Posterize':
img = F.posterize(img, int(magnitude))
elif op_name == 'Cutout':
img = cutout(img, int(magnitude))
elif op_name == 'SolarizeAdd':
img = solarize_add(img, int(magnitude))
elif op_name == 'Solarize':
img = solarize(img, magnitude)
elif op_name == 'AutoContrast':
img = F.autocontrast(img)
elif op_name == 'Equalize':
img = F.equalize(img)
elif op_name == 'Invert':
img = F.invert(img)
elif op_name == 'Identity':
pass
else:
raise ValueError(f'The provided operator {op_name} is not recognized.')
return img
def ops_space() -> Dict[str, Tuple[spec.Tensor, bool]]:
return {
# op_name: (magnitudes, signed)
'ShearX': (torch.tensor(0.3), True),
'ShearY': (torch.tensor(0.3), True),
'TranslateX': (torch.tensor(100), True),
'TranslateY': (torch.tensor(100), True),
'Rotate': (torch.tensor(30), True),
'Brightness': (torch.tensor(1.9), False),
'Color': (torch.tensor(1.9), False),
'Contrast': (torch.tensor(1.9), False),
'Sharpness': (torch.tensor(1.9), False),
'Posterize': (torch.tensor(4), False),
'Solarize': (torch.tensor(256), False),
'SolarizeAdd': (torch.tensor(110), False),
'AutoContrast': (torch.tensor(0.0), False),
'Equalize': (torch.tensor(0.0), False),
'Invert': (torch.tensor(0.0), False),
'Cutout': (torch.tensor(40.0), False),
}
class RandAugment(torch.nn.Module):
def __init__(
self,
num_ops: int = 2,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: Optional[List[float]] = None,
) -> None:
super().__init__()
self.num_ops = num_ops
self.interpolation = interpolation
self.fill = fill
def forward(self, img: spec.Tensor) -> spec.Tensor:
fill = self.fill if self.fill is not None else 128
channels, _, _ = F.get_dimensions(img)
if isinstance(img, Tensor):
if isinstance(fill, (int, float)):
fill = [float(fill)] * channels
elif fill is not None:
fill = [float(f) for f in fill]
op_meta = ops_space()
for _ in range(self.num_ops):
op_index = int(torch.randint(len(op_meta), (1,)).item())
op_name = list(op_meta.keys())[op_index]
magnitude, signed = op_meta[op_name]
magnitude = float(magnitude)
if signed and torch.randint(2, (1,)):
# With 50% prob turn the magnitude negative.
magnitude *= -1.0
img = _apply_op(
img, op_name, magnitude, interpolation=self.interpolation, fill=fill)
return img
|
"""ImageNet input pipeline.
Forked from Flax example which can be found here:
https://github.com/google/flax/blob/main/examples/imagenet/input_pipeline.py.
"""
import functools
from typing import Dict, Iterator, Tuple
from flax import jax_utils
import jax
import tensorflow as tf
import tensorflow_datasets as tfds
from algorithmic_efficiency import data_utils
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax import \
randaugment
TFDS_SPLIT_NAME = {
'train': 'train', 'eval_train': 'train', 'validation': 'validation'
}
def _distorted_bounding_box_crop(image_bytes: spec.Tensor,
rng: spec.RandomState,
bbox: spec.Tensor,
min_object_covered: float = 0.1,
aspect_ratio_range: Tuple[float,
float] = (0.75,
1.33),
area_range: Tuple[float, float] = (0.05, 1.0),
max_attempts: int = 100) -> spec.Tensor:
"""Generates cropped_image using one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image_bytes: `Tensor` of binary image data.
rng: a per-example, per-step unique RNG seed.
bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`
where each coordinate is [0, 1) and the coordinates are arranged
as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding
box supplied.
aspect_ratio_range: An optional list of `float`s. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `float`s. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
Returns:
cropped image `Tensor`
"""
shape = tf.io.extract_jpeg_shape(image_bytes)
bbox_begin, bbox_size, _ = tf.image.stateless_sample_distorted_bounding_box(
shape,
seed=rng,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.io.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
def resize(image: spec.Tensor, image_size: int) -> spec.Tensor:
"""Resizes the image given the image size.
Args:
image: `Tensor` of image data.
image_size: A size of the image to be reshaped.
Returns:
Resized image 'Tensor'.
"""
return tf.image.resize([image], [image_size, image_size],
method=tf.image.ResizeMethod.BICUBIC)[0]
def _at_least_x_are_equal(a: spec.Tensor, b: spec.Tensor, x: float) -> bool:
"""At least `x` of `a` and `b` `Tensors` are equal."""
match = tf.equal(a, b)
match = tf.cast(match, tf.int32)
return tf.greater_equal(tf.reduce_sum(match), x)
def _decode_and_random_crop(image_bytes: spec.Tensor,
rng: spec.RandomState,
image_size: int,
aspect_ratio_range: Tuple[float, float],
area_range: Tuple[float, float],
resize_size: int) -> spec.Tensor:
"""Make a random crop of image_size."""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image = _distorted_bounding_box_crop(
image_bytes,
rng,
bbox,
min_object_covered=0.1,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=10)
original_shape = tf.io.extract_jpeg_shape(image_bytes)
bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3)
image = tf.cond(
bad,
lambda: _decode_and_center_crop(image_bytes, image_size, resize_size),
lambda: resize(image, image_size))
return image
def _decode_and_center_crop(image_bytes: spec.Tensor,
image_size: int,
resize_size: int) -> spec.Tensor:
"""Crops to center of image with padding then scales image_size."""
shape = tf.io.extract_jpeg_shape(image_bytes)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / resize_size) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)),
tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([
offset_height,
offset_width,
padded_center_crop_size,
padded_center_crop_size,
])
image = tf.io.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = resize(image, image_size)
return image
def normalize_image(image: spec.Tensor,
mean_rgb: Tuple[float, float, float],
stddev_rgb: Tuple[float, float, float]) -> spec.Tensor:
image -= tf.constant(mean_rgb, shape=[1, 1, 3], dtype=image.dtype)
image /= tf.constant(stddev_rgb, shape=[1, 1, 3], dtype=image.dtype)
return image
def preprocess_for_train(image_bytes: spec.Tensor,
rng: spec.RandomState,
mean_rgb: Tuple[float, float, float],
stddev_rgb: Tuple[float, float, float],
aspect_ratio_range: Tuple[float, float],
area_range: Tuple[float, float],
image_size: int,
resize_size: int,
dtype: tf.DType = tf.float32,
use_randaug: bool = False,
randaug_num_layers: int = 2,
randaug_magnitude: int = 10) -> spec.Tensor:
"""Preprocesses the given image for training.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
rng: a per-example, per-step unique RNG seed.
dtype: data type of the image.
image_size: image size.
Returns:
A preprocessed image `Tensor`.
"""
rngs = tf.random.experimental.stateless_split(rng, 3)
image = _decode_and_random_crop(image_bytes,
rngs[0],
image_size,
aspect_ratio_range,
area_range,
resize_size)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.stateless_random_flip_left_right(image, seed=rngs[1])
if use_randaug:
image = tf.cast(tf.clip_by_value(image, 0, 255), tf.uint8)
image = randaugment.distort_image_with_randaugment(image,
randaug_num_layers,
randaug_magnitude,
rngs[2])
image = tf.cast(image, tf.float32)
image = normalize_image(image, mean_rgb, stddev_rgb)
image = tf.image.convert_image_dtype(image, dtype=dtype)
return image
def preprocess_for_eval(image_bytes: spec.Tensor,
mean_rgb: Tuple[float, float, float],
stddev_rgb: Tuple[float, float, float],
image_size: int,
resize_size: int,
dtype: tf.DType = tf.float32) -> spec.Tensor:
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
dtype: data type of the image.
image_size: image size.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_center_crop(image_bytes, image_size, resize_size)
image = tf.reshape(image, [image_size, image_size, 3])
image = normalize_image(image, mean_rgb, stddev_rgb)
image = tf.image.convert_image_dtype(image, dtype=dtype)
return image
# Modified from
# github.com/google/init2winit/blob/master/init2winit/dataset_lib/ (cont. below)
# image_preprocessing.py.
def mixup_tf(key: spec.RandomState,
inputs: spec.Tensor,
targets: spec.Tensor,
alpha: float = 0.2) -> Tuple[spec.Tensor, spec.Tensor]:
"""Perform mixup https://arxiv.org/abs/1710.09412.
NOTE: Code taken from https://github.com/google/big_vision with variables
renamed to match `mixup` in this file and logic to synchronize globally.
Args:
key: The random key to use.
inputs: inputs to mix.
targets: targets to mix.
alpha: the beta/dirichlet concentration parameter, typically 0.1 or 0.2.
Returns:
Mixed inputs and targets.
"""
key_a = tf.random.experimental.stateless_fold_in(key, 0)
key_b = tf.random.experimental.stateless_fold_in(key_a, 0)
gamma_a = tf.random.stateless_gamma((1,), key_a, alpha)
gamma_b = tf.random.stateless_gamma((1,), key_b, alpha)
weight = tf.squeeze(gamma_a / (gamma_a + gamma_b))
# Transform to one-hot targets.
targets = tf.one_hot(targets, 1000)
inputs = weight * inputs + (1.0 - weight) * tf.roll(inputs, 1, axis=0)
targets = weight * targets + (1.0 - weight) * tf.roll(targets, 1, axis=0)
return inputs, targets
def create_split(split,
dataset_builder,
rng,
global_batch_size,
train,
image_size,
resize_size,
mean_rgb,
stddev_rgb,
cache=False,
repeat_final_dataset=False,
aspect_ratio_range=(0.75, 4.0 / 3.0),
area_range=(0.08, 1.0),
use_mixup=False,
mixup_alpha=0.1,
use_randaug=False,
randaug_num_layers=2,
randaug_magnitude=10) -> Iterator[Dict[str, spec.Tensor]]:
"""Creates a split from the ImageNet dataset using TensorFlow Datasets."""
shuffle_rng, preprocess_rng, mixup_rng = jax.random.split(rng, 3)
def decode_example(example_index, example):
dtype = tf.float32
if train:
per_step_preprocess_rng = tf.random.experimental.stateless_fold_in(
tf.cast(preprocess_rng, tf.int64), example_index)
image = preprocess_for_train(example['image'],
per_step_preprocess_rng,
mean_rgb,
stddev_rgb,
aspect_ratio_range,
area_range,
image_size,
resize_size,
dtype,
use_randaug,
randaug_num_layers,
randaug_magnitude)
else:
image = preprocess_for_eval(example['image'],
mean_rgb,
stddev_rgb,
image_size,
resize_size,
dtype)
return {'inputs': image, 'targets': example['label']}
ds = dataset_builder.as_dataset(
split=TFDS_SPLIT_NAME[split],
decoders={
'image': tfds.decode.SkipDecoding(),
})
options = tf.data.Options()
options.threading.private_threadpool_size = 48
ds = ds.with_options(options)
if cache:
ds = ds.cache()
if train or split == 'eval_train':
ds = ds.repeat()
ds = ds.shuffle(16 * global_batch_size, seed=shuffle_rng[0])
# We call ds.enumerate() to get a globally unique per-example, per-step
# index that we can fold into the RNG seed.
ds = ds.enumerate()
ds = ds.map(decode_example, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = ds.batch(global_batch_size, drop_remainder=train)
if use_mixup:
if train:
def mixup_batch(batch_index, batch):
per_batch_mixup_rng = tf.random.experimental.stateless_fold_in(
mixup_rng, batch_index)
(inputs, targets) = mixup_tf(
per_batch_mixup_rng,
batch['inputs'],
batch['targets'],
alpha=mixup_alpha)
batch['inputs'] = inputs
batch['targets'] = targets
return batch
ds = ds.enumerate().map(
mixup_batch, num_parallel_calls=tf.data.experimental.AUTOTUNE)
else:
raise ValueError('Mixup can only be used for the training split.')
if repeat_final_dataset:
ds = ds.repeat()
ds = ds.prefetch(10)
return ds
def create_input_iter(split: str,
dataset_builder: tfds.core.dataset_builder.DatasetBuilder,
rng: spec.RandomState,
global_batch_size: int,
mean_rgb: Tuple[float, float, float],
stddev_rgb: Tuple[float, float, float],
image_size: int,
resize_size: int,
aspect_ratio_range: Tuple[float, float],
area_range: Tuple[float, float],
train: bool,
cache: bool,
repeat_final_dataset: bool,
use_mixup: bool,
mixup_alpha: float,
use_randaug: bool) -> Iterator[Dict[str, spec.Tensor]]:
ds = create_split(
split,
dataset_builder,
rng,
global_batch_size,
train=train,
image_size=image_size,
resize_size=resize_size,
mean_rgb=mean_rgb,
stddev_rgb=stddev_rgb,
cache=cache,
repeat_final_dataset=repeat_final_dataset,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
use_mixup=use_mixup,
mixup_alpha=mixup_alpha,
use_randaug=use_randaug)
it = map(
functools.partial(
data_utils.shard_and_maybe_pad_np,
global_batch_size=global_batch_size),
ds)
# Note(Dan S): On a Nvidia 2080 Ti GPU, this increased GPU utilization by 10%.
it = jax_utils.prefetch_to_device(it, 2)
return iter(it)
|
"""Jax implementation of ResNet V1.
Adapted from Flax example:
https://github.com/google/flax/blob/main/examples/imagenet/models.py.
"""
import functools
from typing import Any, Callable, Tuple
from flax import linen as nn
import jax.numpy as jnp
from algorithmic_efficiency import spec
ModuleDef = nn.Module
class ResNetBlock(nn.Module):
"""ResNet block."""
filters: int
conv: ModuleDef
norm: ModuleDef
act: Callable
strides: Tuple[int, int] = (1, 1)
@nn.compact
def __call__(self, x: spec.Tensor) -> spec.Tensor:
residual = x
y = self.conv(self.filters, (3, 3), self.strides)(x)
y = self.norm()(y)
y = self.act(y)
y = self.conv(self.filters, (3, 3))(y)
y = self.norm(scale_init=nn.initializers.zeros)(y)
if residual.shape != y.shape or self.strides != (1, 1):
residual = self.conv(
self.filters, (1, 1), self.strides, name='Conv_proj')(
residual)
residual = self.norm(name='BatchNorm_proj')(residual)
return self.act(residual + y)
class BottleneckResNetBlock(nn.Module):
"""Bottleneck ResNet block."""
filters: int
conv: ModuleDef
norm: ModuleDef
act: Callable
strides: Tuple[int, int] = (1, 1)
@nn.compact
def __call__(self, x: spec.Tensor) -> spec.Tensor:
residual = x
y = self.conv(self.filters, (1, 1))(x)
y = self.norm()(y)
y = self.act(y)
y = self.conv(self.filters, (3, 3), self.strides)(y)
y = self.norm()(y)
y = self.act(y)
y = self.conv(self.filters * 4, (1, 1))(y)
y = self.norm(scale_init=nn.initializers.zeros)(y)
if residual.shape != y.shape or self.strides != (1, 1):
residual = self.conv(
self.filters * 4, (1, 1), self.strides, name='Conv_proj')(
residual)
residual = self.norm(name='BatchNorm_proj')(residual)
return self.act(residual + y)
class ResNet(nn.Module):
stage_sizes: Tuple[int]
block_cls: ModuleDef
num_classes: int
num_filters: int = 64
dtype: Any = jnp.float32
act: Callable = nn.relu
@nn.compact
def __call__(self,
x: spec.Tensor,
update_batch_norm: bool = True) -> spec.Tensor:
conv = functools.partial(nn.Conv, use_bias=False, dtype=self.dtype)
norm = functools.partial(
nn.BatchNorm,
use_running_average=not update_batch_norm,
momentum=0.9,
epsilon=1e-5,
dtype=self.dtype)
x = conv(
self.num_filters, (7, 7), (2, 2),
padding=[(3, 3), (3, 3)],
name='Conv_init')(
x)
x = norm(name='BatchNorm_init')(x)
x = nn.relu(x)
x = nn.max_pool(x, (3, 3), strides=(2, 2), padding=((1, 1), (1, 1)))
for i, block_size in enumerate(self.stage_sizes):
for j in range(block_size):
strides = (2, 2) if i > 0 and j == 0 else (1, 1)
x = self.block_cls(
self.num_filters * 2**i,
strides=strides,
conv=conv,
norm=norm,
act=self.act)(
x)
x = jnp.mean(x, axis=(1, 2))
x = nn.Dense(
self.num_classes,
kernel_init=nn.initializers.normal(),
dtype=self.dtype)(
x)
return x
ResNet18 = functools.partial(
ResNet, stage_sizes=(2, 2, 2, 2), block_cls=ResNetBlock)
ResNet50 = functools.partial(
ResNet, stage_sizes=(3, 4, 6, 3), block_cls=BottleneckResNetBlock)
|
"""ImageNet workload implemented in Jax.
Forked from the Flax ImageNet Example v0.3.3
https://github.com/google/flax/tree/v0.3.3/examples/imagenet.
"""
import functools
import itertools
import math
from typing import Dict, Iterator, Optional, Tuple
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import optax
import tensorflow_datasets as tfds
from algorithmic_efficiency import param_utils
from algorithmic_efficiency import random_utils as prng
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.imagenet_resnet import imagenet_v2
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax import \
input_pipeline
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax import \
models
from algorithmic_efficiency.workloads.imagenet_resnet.workload import \
BaseImagenetResNetWorkload
class ImagenetResNetWorkload(BaseImagenetResNetWorkload):
def _build_dataset(
self,
data_rng: spec.RandomState,
split: str,
data_dir: str,
global_batch_size: int,
cache: Optional[bool] = None,
repeat_final_dataset: Optional[bool] = None,
use_mixup: bool = False,
use_randaug: bool = False) -> Iterator[Dict[str, spec.Tensor]]:
if split == 'test':
np_iter = imagenet_v2.get_imagenet_v2_iter(
data_dir,
global_batch_size,
mean_rgb=self.train_mean,
stddev_rgb=self.train_stddev,
image_size=self.center_crop_size,
resize_size=self.resize_size)
return itertools.cycle(np_iter)
ds_builder = tfds.builder('imagenet2012:5.1.0', data_dir=data_dir)
train = split == 'train'
ds = input_pipeline.create_input_iter(
split,
ds_builder,
data_rng,
global_batch_size,
self.train_mean,
self.train_stddev,
self.center_crop_size,
self.resize_size,
self.aspect_ratio_range,
self.scale_ratio_range,
train=train,
cache=not train if cache is None else cache,
repeat_final_dataset=repeat_final_dataset,
use_mixup=use_mixup,
mixup_alpha=0.2,
use_randaug=use_randaug)
return ds
def sync_batch_stats(
self, model_state: spec.ModelAuxiliaryState) -> spec.ModelAuxiliaryState:
"""Sync the batch statistics across replicas."""
# An axis_name is passed to pmap which can then be used by pmean.
# In this case each device has its own version of the batch statistics and
# we average them.
avg_fn = jax.pmap(lambda x: lax.pmean(x, 'x'), 'x')
new_model_state = model_state.copy(
{'batch_stats': avg_fn(model_state['batch_stats'])})
return new_model_state
def init_model_fn(
self,
rng: spec.RandomState,
dropout_rate: Optional[float] = None,
aux_dropout_rate: Optional[float] = None) -> spec.ModelInitState:
"""Dropout is unused."""
del dropout_rate
del aux_dropout_rate
model_cls = getattr(models, 'ResNet50')
model = model_cls(num_classes=self._num_classes, dtype=jnp.float32)
self._model = model
input_shape = (1, 224, 224, 3)
variables = jax.jit(model.init)({'params': rng},
jnp.ones(input_shape, model.dtype))
model_state, params = variables.pop('params')
self._param_shapes = param_utils.jax_param_shapes(params)
self._param_types = param_utils.jax_param_types(self._param_shapes)
model_state = jax_utils.replicate(model_state)
params = jax_utils.replicate(params)
return params, model_state
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
return param_key == 'Dense_0'
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, 0, 0, 0, 0),
static_broadcasted_argnums=(0,))
def _eval_model(self,
params: spec.ParameterContainer,
batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
logits, _ = self.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.EVAL,
rng=rng,
update_batch_norm=False)
weights = batch.get('weights')
return self._compute_metrics(logits, batch['targets'], weights)
def model_fn(
self,
params: spec.ParameterContainer,
augmented_and_preprocessed_input_batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
mode: spec.ForwardPassMode,
rng: spec.RandomState,
update_batch_norm: bool) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]:
del mode
del rng
variables = {'params': params, **model_state}
if update_batch_norm:
logits, new_model_state = self._model.apply(
variables,
augmented_and_preprocessed_input_batch['inputs'],
update_batch_norm=update_batch_norm,
mutable=['batch_stats'])
return logits, new_model_state
else:
logits = self._model.apply(
variables,
augmented_and_preprocessed_input_batch['inputs'],
update_batch_norm=update_batch_norm,
mutable=False)
return logits, model_state
# Does NOT apply regularization, which is left to the submitter to do in
# `update_params`.
def loss_fn(
self,
label_batch: spec.Tensor, # Dense or one-hot labels.
logits_batch: spec.Tensor,
mask_batch: Optional[spec.Tensor] = None,
label_smoothing: float = 0.0) -> Dict[str, spec.Tensor]: # differentiable
"""Evaluate the (masked) loss function at (label_batch, logits_batch).
Return {'summed': scalar summed loss, 'n_valid_examples': scalar number of
valid examples in batch, 'per_example': 1-d array of per-example losses}
(not synced across devices).
"""
if label_batch.shape[-1] != self._num_classes:
one_hot_labels = jax.nn.one_hot(
label_batch, num_classes=self._num_classes)
else:
one_hot_labels = label_batch
smoothed_labels = optax.smooth_labels(one_hot_labels, label_smoothing)
per_example_losses = -jnp.sum(
smoothed_labels * jax.nn.log_softmax(logits_batch, axis=-1), axis=-1)
# mask_batch is assumed to be shape [batch].
if mask_batch is not None:
per_example_losses *= mask_batch
n_valid_examples = mask_batch.sum()
else:
n_valid_examples = len(per_example_losses)
summed_loss = per_example_losses.sum()
return {
'summed': summed_loss,
'n_valid_examples': n_valid_examples,
'per_example': per_example_losses,
}
def _compute_metrics(self,
logits: spec.Tensor,
labels: spec.Tensor,
weights: spec.Tensor) -> Dict[str, spec.Tensor]:
if weights is None:
weights = jnp.ones(len(logits))
summed_loss = self.loss_fn(labels, logits, weights)['summed']
# not accuracy, but nr. of correct predictions
accuracy = jnp.sum((jnp.argmax(logits, -1) == labels) * weights)
metrics = {
'loss': summed_loss,
'accuracy': accuracy,
}
metrics = lax.psum(metrics, axis_name='batch')
return metrics
def _eval_model_on_split(self,
split: str,
num_examples: int,
global_batch_size: int,
params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState,
data_dir: str,
global_step: int = 0) -> Dict[str, float]:
del global_step
if model_state is not None:
# Sync batch statistics across replicas before evaluating.
model_state = self.sync_batch_stats(model_state)
num_batches = int(math.ceil(num_examples / global_batch_size))
data_rng, eval_rng = prng.split(rng, 2)
# We already repeat the dataset indefinitely in tf.data.
if split not in self._eval_iters:
self._eval_iters[split] = self._build_input_queue(
data_rng,
split=split,
global_batch_size=global_batch_size,
data_dir=data_dir,
cache=True,
repeat_final_dataset=True,
num_batches=num_batches)
eval_metrics = {}
for bi in range(num_batches):
eval_rng = prng.fold_in(eval_rng, bi)
step_eval_rngs = prng.split(eval_rng, jax.local_device_count())
batch = next(self._eval_iters[split])
# We already average these metrics across devices inside _compute_metrics.
synced_metrics = self._eval_model(params,
batch,
model_state,
step_eval_rngs)
for metric_name, metric_value in synced_metrics.items():
if metric_name not in eval_metrics:
eval_metrics[metric_name] = 0.0
eval_metrics[metric_name] += metric_value
eval_metrics = jax.tree_map(lambda x: float(x[0] / num_examples),
eval_metrics)
return eval_metrics
|
"""Jax implementation of RandAugmentation.
Adapted from:
https://github.com/google/init2winit/blob/master/init2winit/dataset_lib/autoaugment.py.
"""
import inspect
import math
import tensorflow as tf
from tensorflow_addons import image as contrib_image
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type uint8.
image2: An image Tensor of type uint8.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor of type uint8.
"""
if factor == 0.0:
return tf.convert_to_tensor(image1)
if factor == 1.0:
return tf.convert_to_tensor(image2)
image1 = tf.cast(image1, tf.float32)
image2 = tf.cast(image2, tf.float32)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = tf.cast(image1, tf.float32) + scaled
# Interpolate.
if 0.0 < factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return tf.cast(temp, tf.uint8)
# Extrapolate.
return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8)
def cutout(image, pad_size, replace=0):
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type uint8.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
replace: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
Returns:
An image Tensor that is of type uint8.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random.uniform(
shape=[], minval=0, maxval=image_height, dtype=tf.int32)
cutout_center_width = tf.random.uniform(
shape=[], minval=0, maxval=image_width, dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [
image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad),
]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims,
constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(mask, 0),
tf.ones_like(image, dtype=image.dtype) * replace,
image)
return image
def solarize(image, threshold=128):
"""Solarize the input image(s)."""
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image, addition=0, threshold=128):
"""Additive solarize the input image(s)."""
added_image = tf.cast(image, tf.int64) + addition
added_image = tf.cast(tf.clip_by_value(added_image, 0, 255), tf.uint8)
return tf.where(image < threshold, added_image, image)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, factor):
"""Equivalent of PIL Contrast."""
degenerate = tf.image.rgb_to_grayscale(image)
# Cast before calling tf.histogram.
degenerate = tf.cast(degenerate, tf.int32)
# Compute the grayscale histogram, then compute the mean pixel value,
# and create a constant image size of that value. Use that as the
# blending degenerate target of the original image.
hist = tf.histogram_fixed_width(degenerate, [0, 255], nbins=256)
mean = tf.reduce_sum(tf.cast(hist, tf.float32)) / 256.0
degenerate = tf.ones_like(degenerate, dtype=tf.float32) * mean
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.image.grayscale_to_rgb(tf.cast(degenerate, tf.uint8))
return blend(degenerate, image, factor)
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, bits):
"""Equivalent of PIL Posterize."""
shift = 8 - bits
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def rotate(image, degrees, replace):
"""Rotates the image by degrees either clockwise or counterclockwise.
Args:
image: An image Tensor of type uint8.
degrees: Float, a scalar angle in degrees to rotate all images by. If
degrees is positive the image will be rotated clockwise otherwise it will
be rotated counterclockwise.
replace: A one or three value 1D tensor to fill empty pixels caused by
the rotate operation.
Returns:
The rotated version of image.
"""
# Convert from degrees to radians.
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = contrib_image.rotate(wrap(image), radians)
return unwrap(image, replace)
def translate_x(image, pixels, replace):
"""Equivalent of PIL Translate in X dimension."""
image = contrib_image.translate(wrap(image), [-pixels, 0])
return unwrap(image, replace)
def translate_y(image, pixels, replace):
"""Equivalent of PIL Translate in Y dimension."""
image = contrib_image.translate(wrap(image), [0, -pixels])
return unwrap(image, replace)
def shear_x(image, level, replace):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
image = contrib_image.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace)
def shear_y(image, level, replace):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
image = contrib_image.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image, replace)
def autocontrast(image):
"""Implements Autocontrast function from PIL using TF ops.
Args:
image: A 3D uint8 tensor.
Returns:
The image after it has had autocontrast applied to it and will be of type
uint8.
"""
def scale_channel(image):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.cast(tf.reduce_min(image), tf.float32)
hi = tf.cast(tf.reduce_max(image), tf.float32)
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.cast(im, tf.float32) * scale + offset
im = tf.clip_by_value(im, 0.0, 255.0)
return tf.cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
orig_image = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation.
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel.
kernel = tf.constant([[1, 1, 1], [1, 5, 1], [1, 1, 1]],
dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension.
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
with tf.device('/cpu:0'):
# Some augmentation that uses depth-wise conv will cause crashing when
# training on GPU.
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', dilations=[1, 1])
degenerate = tf.clip_by_value(degenerate, 0.0, 255.0)
degenerate = tf.squeeze(tf.cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_image)
# Blend the final result.
return blend(result, orig_image, factor)
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(
tf.equal(step, 0),
lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def invert(image):
"""Inverts the image pixels."""
image = tf.convert_to_tensor(image)
return 255 - image
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
def unwrap(image, replace):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = tf.expand_dims(flattened_image[..., 3], axis=-1)
replace = tf.concat([replace, tf.ones([1], image.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])
return image
NAME_TO_FUNC = {
'AutoContrast': autocontrast,
'Equalize': equalize,
'Invert': invert,
'Rotate': rotate,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x,
'TranslateY': translate_y,
'Cutout': cutout,
}
def _randomly_negate_tensor(tensor):
"""With 50% prob turn the tensor negative."""
should_flip = tf.cast(tf.floor(tf.random.uniform([]) + 0.5), tf.bool)
final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor)
return final_tensor
def _rotate_level_to_arg(level):
level = (level / _MAX_LEVEL) * 30.
level = _randomly_negate_tensor(level)
return (level,)
def _enhance_level_to_arg(level):
return ((level / _MAX_LEVEL) * 1.8 + 0.1,)
def _shear_level_to_arg(level):
level = (level / _MAX_LEVEL) * 0.3
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def _translate_level_to_arg(level, translate_const):
level = (level / _MAX_LEVEL) * float(translate_const)
# Flip level to negative with 50% chance.
level = _randomly_negate_tensor(level)
return (level,)
def level_to_arg(cutout_const, translate_const):
return {
'AutoContrast':
lambda level: (),
'Equalize':
lambda level: (),
'Invert':
lambda level: (),
'Rotate':
_rotate_level_to_arg,
'Posterize':
lambda level: (int((level / _MAX_LEVEL) * 4),),
'Solarize':
lambda level: (int((level / _MAX_LEVEL) * 256),),
'SolarizeAdd':
lambda level: (int((level / _MAX_LEVEL) * 110),),
'Color':
_enhance_level_to_arg,
'Contrast':
_enhance_level_to_arg,
'Brightness':
_enhance_level_to_arg,
'Sharpness':
_enhance_level_to_arg,
'ShearX':
_shear_level_to_arg,
'ShearY':
_shear_level_to_arg,
'Cutout':
lambda level: (int((level / _MAX_LEVEL) * cutout_const),),
'TranslateX':
lambda level: _translate_level_to_arg(level, translate_const),
'TranslateY':
lambda level: _translate_level_to_arg(level, translate_const),
}
def _parse_policy_info(name,
prob,
level,
replace_value,
cutout_const,
translate_const):
"""Return the function that corresponds to `name` and update `level` param."""
func = NAME_TO_FUNC[name]
args = level_to_arg(cutout_const, translate_const)[name](level)
# Check to see if prob is passed into function. This is used for operations
# where we alter bboxes independently.
if 'prob' in inspect.getargspec(func)[0]:
args = tuple([prob] + list(args))
# Add in replace arg if it is required for the function that is being called.
if 'replace' in inspect.getargspec(func)[0]:
# Make sure replace is the final argument
assert 'replace' == inspect.getargspec(func)[0][-1]
args = tuple(list(args) + [replace_value])
return (func, prob, args)
def distort_image_with_randaugment(image, num_layers, magnitude, key):
"""Applies the RandAugment policy to `image`.
RandAugment is from the paper https://arxiv.org/abs/1909.13719,
Args:
image: `Tensor` of shape [height, width, 3] representing an image.
num_layers: Integer, the number of augmentation transformations to apply
sequentially to an image. Represented as (N) in the paper. Usually best
values will be in the range [1, 3].
magnitude: Integer, shared magnitude across all augmentation operations.
Represented as (M) in the paper. Best values are usually in the range
[5, 30].
key: an rng key from tf.random.experimental.stateless_fold_in.
Returns:
The augmented version of `image`.
"""
replace_value = [128] * 3
available_ops = [
'AutoContrast',
'Equalize',
'Invert',
'Rotate',
'Posterize',
'Solarize',
'Color',
'Contrast',
'Brightness',
'Sharpness',
'ShearX',
'ShearY',
'TranslateX',
'TranslateY',
'Cutout',
'SolarizeAdd',
]
for layer_num in range(num_layers):
key = tf.random.experimental.stateless_fold_in(key, layer_num)
op_to_select = tf.random.stateless_uniform([],
seed=key,
maxval=len(available_ops),
dtype=tf.int32)
random_magnitude = float(magnitude)
with tf.name_scope('randaug_layer_{}'.format(layer_num)):
for (i, op_name) in enumerate(available_ops):
key = tf.random.experimental.stateless_fold_in(key, i)
prob = tf.random.stateless_uniform([],
seed=key,
minval=0.2,
maxval=0.8,
dtype=tf.float32)
func, _, args = _parse_policy_info(op_name, prob, random_magnitude,
replace_value, cutout_const=40,
translate_const=100)
image = tf.cond(
tf.equal(i, op_to_select),
lambda selected_func=func,
selected_args=args: selected_func(image, *selected_args),
lambda: image)
return image
|
"""Data loader for pre-processed Criteo data.
Similar to how the NVIDIA example works, we split data from the last day into a
validation and test split (taking the first half for test and second half for
validation). See here for the NVIDIA example:
https://github.com/NVIDIA/DeepLearningExamples/blob/4e764dcd78732ebfe105fc05ea3dc359a54f6d5e/PyTorch/Recommendation/DLRM/preproc/run_spark_cpu.sh#L119.
"""
import functools
import os
from typing import Optional
import tensorflow as tf
from algorithmic_efficiency import data_utils
_NUM_DAY_23_FILES = 36
# Raw vocab sizes from
# https://cloud.google.com/tpu/docs/tutorials/dlrm-dcn-2.x#run-model.
_VOCAB_SIZES = [
39884406,
39043,
17289,
7420,
20263,
3,
7120,
1543,
63,
38532951,
2953546,
403346,
10,
2208,
11938,
155,
4,
976,
14,
39979771,
25641295,
39664984,
585935,
12972,
108,
36,
]
# Preprocessing is the same as
# https://github.com/mlcommons/inference/blob/master/recommendation/dlrm/tf/dataloader.py#L157
# and MAX_IND_RANGE used like
# https://github.com/facebookresearch/dlrm/blob/fbc37ebe21d4f88f18c6ae01333ada2d025e41cf/dlrm_data_pytorch.py#L298.
@tf.function
def _parse_example_fn(num_dense_features, example):
"""Parser function for pre-processed Criteo TSV records."""
label_defaults = [[0.0]]
int_defaults = [[0.0] for _ in range(num_dense_features)]
categorical_defaults = [['00000000'] for _ in range(len(_VOCAB_SIZES))]
record_defaults = label_defaults + int_defaults + categorical_defaults
fields = tf.io.decode_csv(
example, record_defaults, field_delim='\t', na_value='-1')
num_labels = 1
features = {}
features['targets'] = tf.reshape(fields[0], (-1,))
int_features = []
for idx in range(num_dense_features):
positive_val = tf.nn.relu(fields[idx + num_labels])
int_features.append(tf.math.log(positive_val + 1))
int_features = tf.stack(int_features, axis=1)
cat_features = []
for idx in range(len(_VOCAB_SIZES)):
field = fields[idx + num_dense_features + num_labels]
# We append the column index to the string to make the same id in different
# columns unique.
cat_features.append(
tf.strings.to_hash_bucket_fast(field + str(idx), _VOCAB_SIZES[idx]))
cat_features = tf.cast(
tf.stack(cat_features, axis=1), dtype=int_features.dtype)
features['inputs'] = tf.concat([int_features, cat_features], axis=1)
return features
def get_criteo1tb_dataset(split: str,
shuffle_rng,
data_dir: str,
num_dense_features: int,
global_batch_size: int,
num_batches: Optional[int] = None,
repeat_final_dataset: bool = False):
"""Get the Criteo 1TB dataset for a given split."""
num_test_files = _NUM_DAY_23_FILES // 2 + 1
if split in ['train', 'eval_train']:
file_paths = [os.path.join(data_dir, f'day_{d}_*') for d in range(0, 23)]
elif split == 'validation':
# Assumes files are of the format day_23_04.
file_paths = [
os.path.join(data_dir, f'day_23_{str(s).zfill(2)}')
for s in range(num_test_files, _NUM_DAY_23_FILES)
]
else:
file_paths = [
os.path.join(data_dir, f'day_23_{str(s).zfill(2)}')
for s in range(0, num_test_files)
]
is_training = split == 'train'
shuffle = is_training or split == 'eval_train'
ds = tf.data.Dataset.list_files(
file_paths, shuffle=shuffle, seed=shuffle_rng[0])
if shuffle:
ds = ds.shuffle(buffer_size=1024)
ds = ds.flat_map(tf.data.TextLineDataset)
ds = ds.batch(global_batch_size, drop_remainder=is_training)
parse_fn = functools.partial(_parse_example_fn, num_dense_features)
ds = ds.map(parse_fn, num_parallel_calls=16)
if is_training:
ds = ds.repeat()
ds = ds.prefetch(10)
if num_batches is not None:
ds = ds.take(num_batches)
# We do not use ds.cache() because the dataset is so large that it would OOM.
if repeat_final_dataset:
ds = ds.repeat()
ds = map(
functools.partial(
data_utils.shard_and_maybe_pad_np,
global_batch_size=global_batch_size),
ds)
return ds
|
"""Criteo1TB DLRM workload base class."""
import math
import os
from typing import Dict, Optional, Tuple
import jax
import torch.distributed as dist
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.criteo1tb import input_pipeline
USE_PYTORCH_DDP = 'LOCAL_RANK' in os.environ
class BaseCriteo1TbDlrmSmallWorkload(spec.Workload):
"""Criteo1tb workload."""
vocab_size: int = 32 * 128 * 1024 # 4_194_304
num_dense_features: int = 13
mlp_bottom_dims: Tuple[int, int] = (512, 256, 128)
mlp_top_dims: Tuple[int, int, int] = (1024, 1024, 512, 256, 1)
embed_dim: int = 128
@property
def target_metric_name(self) -> str:
"""The name of the target metric (useful for scoring/processing code)."""
return 'loss'
def has_reached_validation_target(self, eval_result: float) -> bool:
return eval_result['validation/loss'] < self.validation_target_value
@property
def validation_target_value(self) -> float:
return 0.123649
def has_reached_test_target(self, eval_result: float) -> bool:
return eval_result['test/loss'] < self.test_target_value
@property
def test_target_value(self) -> float:
return 0.126060
@property
def loss_type(self) -> spec.LossType:
return spec.LossType.SIGMOID_CROSS_ENTROPY
@property
def num_train_examples(self) -> int:
return 4_195_197_692
@property
def num_eval_train_examples(self) -> int:
# Round up from num_validation_examples (which is the default for
# num_eval_train_examples) to the next multiple of eval_batch_size, so that
# we don't have to extract the correctly sized subset of the training data.
rounded_up_multiple = math.ceil(self.num_validation_examples /
self.eval_batch_size)
return rounded_up_multiple * self.eval_batch_size
@property
def num_validation_examples(self) -> int:
return 89_000_000
@property
def num_test_examples(self) -> int:
return 89_274_637
@property
def train_mean(self):
raise NotImplementedError
@property
def train_stddev(self):
raise NotImplementedError
@property
def max_allowed_runtime_sec(self) -> int:
return 7703 # ~2 hours
@property
def eval_period_time_sec(self) -> int:
return 2 * 60
def _build_input_queue(self,
data_rng: jax.random.PRNGKey,
split: str,
data_dir: str,
global_batch_size: int,
num_batches: Optional[int] = None,
repeat_final_dataset: bool = False):
ds = input_pipeline.get_criteo1tb_dataset(
split=split,
shuffle_rng=data_rng,
data_dir=data_dir,
num_dense_features=self.num_dense_features,
global_batch_size=global_batch_size,
num_batches=num_batches,
repeat_final_dataset=repeat_final_dataset)
for batch in iter(ds):
yield batch
@property
def step_hint(self) -> int:
"""Max num steps the baseline algo was given to reach the target."""
return 10_666
def _eval_model_on_split(self,
split: str,
num_examples: int,
global_batch_size: int,
params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState,
data_dir: str,
global_step: int = 0) -> Dict[str, float]:
"""Run a full evaluation of the model."""
del model_state
del global_step
num_batches = int(math.ceil(num_examples / global_batch_size))
if split not in self._eval_iters:
# These iterators will repeat indefinitely.
self._eval_iters[split] = self._build_input_queue(
rng,
split,
data_dir,
global_batch_size,
num_batches,
repeat_final_dataset=True)
loss = 0.0
for _ in range(num_batches):
eval_batch = next(self._eval_iters[split])
loss += self._eval_batch(params, eval_batch)
if USE_PYTORCH_DDP:
dist.all_reduce(loss)
mean_loss = loss.item() / num_examples
return {'loss': mean_loss}
|
"""Pytorch implementation of DLRM-Small."""
import math
import torch
from torch import nn
def dot_interact(concat_features):
"""Performs feature interaction operation between dense or sparse features.
Input tensors represent dense or sparse features.
Pre-condition: The tensors have been stacked along dimension 1.
Args:
concat_features: Array of features with shape [B, n_features, feature_dim].
Returns:
activations: Array representing interacted features.
"""
batch_size = concat_features.shape[0]
# Interact features, select upper or lower-triangular portion, and re-shape.
xactions = torch.bmm(concat_features,
torch.permute(concat_features, (0, 2, 1)))
feature_dim = xactions.shape[-1]
indices = torch.triu_indices(feature_dim, feature_dim)
num_elems = indices.shape[1]
indices = torch.tile(indices, [1, batch_size])
indices0 = torch.reshape(
torch.tile(
torch.reshape(torch.arange(batch_size), [-1, 1]), [1, num_elems]),
[1, -1])
indices = tuple(torch.cat((indices0, indices), 0))
activations = xactions[indices]
activations = torch.reshape(activations, [batch_size, -1])
return activations
class DlrmSmall(nn.Module):
"""Define a DLRM-Small model.
Parameters:
vocab_size: vocab size of embedding table.
num_dense_features: number of dense features as the bottom mlp input.
mlp_bottom_dims: dimensions of dense layers of the bottom mlp.
mlp_top_dims: dimensions of dense layers of the top mlp.
embed_dim: embedding dimension.
"""
def __init__(self,
vocab_size,
num_dense_features=13,
num_sparse_features=26,
mlp_bottom_dims=(512, 256, 128),
mlp_top_dims=(1024, 1024, 512, 256, 1),
embed_dim=128,
dropout_rate=0.0):
super().__init__()
self.vocab_size = torch.tensor(vocab_size, dtype=torch.int32)
self.num_dense_features = num_dense_features
self.num_sparse_features = num_sparse_features
self.mlp_bottom_dims = mlp_bottom_dims
self.mlp_top_dims = mlp_top_dims
self.embed_dim = embed_dim
self.embedding_table = nn.Embedding(self.vocab_size, self.embed_dim)
self.embedding_table.weight.data.uniform_(0, 1)
# Scale the initialization to fan_in for each slice.
scale = 1.0 / torch.sqrt(self.vocab_size)
self.embedding_table.weight.data = scale * self.embedding_table.weight.data
# bottom mlp
bottom_mlp_layers = []
input_dim = self.num_dense_features
for dense_dim in self.mlp_bottom_dims:
bottom_mlp_layers.append(nn.Linear(input_dim, dense_dim))
bottom_mlp_layers.append(nn.ReLU(inplace=True))
input_dim = dense_dim
self.bot_mlp = nn.Sequential(*bottom_mlp_layers)
for module in self.bot_mlp.modules():
if isinstance(module, nn.Linear):
limit = math.sqrt(6. / (module.in_features + module.out_features))
nn.init.uniform_(module.weight.data, -limit, limit)
nn.init.normal_(module.bias.data,
0.,
math.sqrt(1. / module.out_features))
# top mlp
# TODO (JB): Write down the formula here instead of the constant.
input_dims = 506
top_mlp_layers = []
num_layers_top = len(self.mlp_top_dims)
for layer_idx, fan_out in enumerate(self.mlp_top_dims):
fan_in = input_dims if layer_idx == 0 \
else self.mlp_top_dims[layer_idx - 1]
top_mlp_layers.append(nn.Linear(fan_in, fan_out))
if layer_idx < (num_layers_top - 1):
top_mlp_layers.append(nn.ReLU(inplace=True))
if (dropout_rate is not None and dropout_rate > 0.0 and
layer_idx == num_layers_top - 2):
top_mlp_layers.append(nn.Dropout(p=dropout_rate))
self.top_mlp = nn.Sequential(*top_mlp_layers)
for module in self.top_mlp.modules():
if isinstance(module, nn.Linear):
nn.init.normal_(
module.weight.data,
0.,
math.sqrt(2. / (module.in_features + module.out_features)))
nn.init.normal_(module.bias.data,
0.,
math.sqrt(1. / module.out_features))
def forward(self, x):
bot_mlp_input, cat_features = torch.split(
x, [self.num_dense_features, self.num_sparse_features], 1)
cat_features = cat_features.to(dtype=torch.int32)
bot_mlp_output = self.bot_mlp(bot_mlp_input)
batch_size = bot_mlp_output.shape[0]
feature_stack = torch.reshape(bot_mlp_output,
[batch_size, -1, self.embed_dim])
idx_lookup = torch.reshape(cat_features, [-1]) % self.vocab_size
embed_features = self.embedding_table(idx_lookup)
embed_features = torch.reshape(embed_features,
[batch_size, -1, self.embed_dim])
feature_stack = torch.cat([feature_stack, embed_features], axis=1)
dot_interact_output = dot_interact(concat_features=feature_stack)
top_mlp_input = torch.cat([bot_mlp_output, dot_interact_output], axis=-1)
logits = self.top_mlp(top_mlp_input)
return logits
|
"""Criteo1TB workload implemented in PyTorch."""
import contextlib
from typing import Dict, Optional, Tuple
import jax
import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from algorithmic_efficiency import param_utils
from algorithmic_efficiency import spec
from algorithmic_efficiency.pytorch_utils import pytorch_setup
from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_pytorch.models import \
DlrmSmall
from algorithmic_efficiency.workloads.criteo1tb.workload import \
BaseCriteo1TbDlrmSmallWorkload
USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_setup()
class Criteo1TbDlrmSmallWorkload(BaseCriteo1TbDlrmSmallWorkload):
@property
def eval_batch_size(self) -> int:
return 262_144
def _per_example_sigmoid_binary_cross_entropy(
self, logits: spec.Tensor, targets: spec.Tensor) -> spec.Tensor:
ls = torch.nn.LogSigmoid()
log_p = ls(logits)
log_not_p = ls(-logits)
per_example_losses = -1.0 * (targets * log_p + (1 - targets) * log_not_p)
per_example_losses = per_example_losses.reshape(len(per_example_losses), -1)
return per_example_losses.sum(1)
# Does NOT apply regularization, which is left to the submitter to do in
# `update_params`.
def loss_fn(
self,
label_batch: spec.Tensor, # Dense (not one-hot) labels.
logits_batch: spec.Tensor,
mask_batch: Optional[spec.Tensor] = None,
label_smoothing: float = 0.0) -> Dict[str, spec.Tensor]: # differentiable
"""Evaluate the (masked) loss function at (label_batch, logits_batch).
Return {'summed': scalar summed loss, 'n_valid_examples': scalar number of
valid examples in batch, 'per_example': 1-d array of per-example losses}
(not synced across devices).
"""
del label_smoothing
batch_size = label_batch.shape[0]
label_batch = torch.reshape(label_batch, (batch_size,))
logits_batch = torch.reshape(logits_batch, (batch_size,))
per_example_losses = self._per_example_sigmoid_binary_cross_entropy(
logits=logits_batch, targets=label_batch)
if mask_batch is not None:
mask_batch = torch.reshape(mask_batch, (batch_size,))
per_example_losses *= mask_batch
n_valid_examples = mask_batch.sum()
else:
n_valid_examples = len(per_example_losses)
summed_loss = per_example_losses.sum()
return {
'summed': summed_loss,
'n_valid_examples': torch.as_tensor(n_valid_examples, device=DEVICE),
'per_example': per_example_losses,
}
def _eval_metric(self, logits: spec.Tensor,
targets: spec.Tensor) -> Dict[str, int]:
summed_loss = self.loss_fn(logits, targets)['summed']
return {'loss': summed_loss}
def init_model_fn(
self,
rng: spec.RandomState,
dropout_rate: Optional[float] = None,
aux_dropout_rate: Optional[float] = None) -> spec.ModelInitState:
"""Only dropout is used."""
del aux_dropout_rate
torch.random.manual_seed(rng[0])
model = DlrmSmall(
vocab_size=self.vocab_size,
num_dense_features=self.num_dense_features,
mlp_bottom_dims=self.mlp_bottom_dims,
mlp_top_dims=self.mlp_top_dims,
embed_dim=self.embed_dim,
dropout_rate=dropout_rate)
self._param_shapes = param_utils.pytorch_param_shapes(model)
self._param_types = param_utils.pytorch_param_types(self._param_shapes)
model.to(DEVICE)
if N_GPUS > 1:
if USE_PYTORCH_DDP:
model = DDP(model, device_ids=[RANK], output_device=RANK)
else:
model = torch.nn.DataParallel(model)
return model, None
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
return param_key in ['top_mlp.4.weight', 'top_mlp.4.bias']
def model_fn(
self,
params: spec.ParameterContainer,
augmented_and_preprocessed_input_batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
mode: spec.ForwardPassMode,
rng: spec.RandomState,
update_batch_norm: bool) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]:
del model_state
del rng
del update_batch_norm
model = params
inputs = augmented_and_preprocessed_input_batch['inputs']
if mode == spec.ForwardPassMode.EVAL:
model.eval()
if mode == spec.ForwardPassMode.TRAIN:
model.train()
contexts = {
spec.ForwardPassMode.EVAL: torch.no_grad,
spec.ForwardPassMode.TRAIN: contextlib.nullcontext,
}
with contexts[mode]():
logits_batch = model(inputs)
return logits_batch, None
def _build_input_queue(self,
data_rng: jax.random.PRNGKey,
split: str,
data_dir: str,
global_batch_size: int,
num_batches: Optional[int] = None,
repeat_final_dataset: bool = False):
not_train = split != 'train'
per_device_batch_size = int(global_batch_size / N_GPUS)
# Only create and iterate over tf input pipeline in one Python process to
# avoid creating too many threads.
if RANK == 0:
np_iter = super()._build_input_queue(data_rng,
split,
data_dir,
global_batch_size,
num_batches,
repeat_final_dataset)
weights = None
while True:
if RANK == 0:
batch = next(np_iter) # pylint: disable=stop-iteration-return
inputs = torch.as_tensor(
batch['inputs'], dtype=torch.float32, device=DEVICE)
targets = torch.as_tensor(
batch['targets'], dtype=torch.float32, device=DEVICE)
if not_train:
weights = batch.get('weights')
if weights is None:
weights = torch.ones((N_GPUS, per_device_batch_size, 1),
dtype=torch.float32,
device=DEVICE)
else:
weights = torch.as_tensor(
weights, dtype=torch.float32, device=DEVICE)
# Send batch to other devices when using DDP.
if USE_PYTORCH_DDP:
if not_train:
# During eval, the batch size of the remainder might be different.
per_device_batch_size = torch.tensor(
len(targets[0]), dtype=torch.int32, device=DEVICE)
dist.broadcast(per_device_batch_size, src=0)
dist.broadcast(weights, src=0)
weights = weights[0]
dist.broadcast(inputs, src=0)
inputs = inputs[0]
dist.broadcast(targets, src=0)
targets = targets[0]
else:
inputs = inputs.view(-1, *inputs.shape[2:])
targets = targets.view(-1, *targets.shape[2:])
if not_train:
weights = weights.view(-1, *weights.shape[2:])
else:
if not_train:
# During eval, the batch size of the remainder might be different.
per_device_batch_size = torch.empty((1,),
dtype=torch.int32,
device=DEVICE)
dist.broadcast(per_device_batch_size, src=0)
weights = torch.empty((N_GPUS, per_device_batch_size, 1),
dtype=torch.float32,
device=DEVICE)
dist.broadcast(weights, src=0)
weights = weights[RANK]
inputs = torch.empty((N_GPUS, per_device_batch_size, 39),
dtype=torch.float32,
device=DEVICE)
dist.broadcast(inputs, src=0)
inputs = inputs[RANK]
targets = torch.empty((N_GPUS, per_device_batch_size, 1),
dtype=torch.float32,
device=DEVICE)
dist.broadcast(targets, src=0)
targets = targets[RANK]
if weights is None:
weights = torch.ones(per_device_batch_size, device=DEVICE)
batch = {
'inputs': inputs,
'targets': targets,
'weights': weights,
}
yield batch
def _eval_batch(self,
params: spec.ParameterContainer,
batch: Dict[str, spec.Tensor]) -> spec.Tensor:
logits, _ = self.model_fn(
params,
batch,
model_state=None,
mode=spec.ForwardPassMode.EVAL,
rng=None,
update_batch_norm=False)
weights = batch.get('weights')
if weights is None:
weights = torch.ones(len(logits), device=DEVICE)
summed_loss = self.loss_fn(
label_batch=batch['targets'], logits_batch=logits,
mask_batch=weights)['summed']
return summed_loss
class Criteo1TbDlrmSmallTestWorkload(Criteo1TbDlrmSmallWorkload):
vocab_size: int = 32 * 128 * 16
|
"""A JAX implementation of DLRM-Small."""
from typing import Sequence
import flax.linen as nn
from jax import nn as jnn
import jax.numpy as jnp
def dot_interact(concat_features):
"""Performs feature interaction operation between dense or sparse features.
Input tensors represent dense or sparse features.
Pre-condition: The tensors have been stacked along dimension 1.
Args:
concat_features: Array of features with shape [B, n_features, feature_dim].
Returns:
activations: Array representing interacted features.
"""
batch_size = concat_features.shape[0]
# Interact features, select upper or lower-triangular portion, and re-shape.
xactions = jnp.matmul(concat_features,
jnp.transpose(concat_features, [0, 2, 1]))
feature_dim = xactions.shape[-1]
indices = jnp.array(jnp.triu_indices(feature_dim))
num_elems = indices.shape[1]
indices = jnp.tile(indices, [1, batch_size])
indices0 = jnp.reshape(
jnp.tile(jnp.reshape(jnp.arange(batch_size), [-1, 1]), [1, num_elems]),
[1, -1])
indices = tuple(jnp.concatenate((indices0, indices), 0))
activations = xactions[indices]
activations = jnp.reshape(activations, [batch_size, -1])
return activations
class DlrmSmall(nn.Module):
"""Define a DLRM-Small model.
Parameters:
vocab_size: vocab size of embedding table.
num_dense_features: number of dense features as the bottom mlp input.
mlp_bottom_dims: dimensions of dense layers of the bottom mlp.
mlp_top_dims: dimensions of dense layers of the top mlp.
embed_dim: embedding dimension.
"""
vocab_size: int = 32 * 128 * 1024 # 4_194_304
num_dense_features: int = 13
mlp_bottom_dims: Sequence[int] = (512, 256, 128)
mlp_top_dims: Sequence[int] = (1024, 1024, 512, 256, 1)
embed_dim: int = 128
dropout_rate: float = 0.0
@nn.compact
def __call__(self, x, train):
bot_mlp_input, cat_features = jnp.split(x, [self.num_dense_features], 1)
cat_features = jnp.asarray(cat_features, dtype=jnp.int32)
# Bottom MLP.
for dense_dim in self.mlp_bottom_dims:
bot_mlp_input = nn.Dense(
dense_dim,
kernel_init=jnn.initializers.glorot_uniform(),
bias_init=jnn.initializers.normal(stddev=jnp.sqrt(1.0 / dense_dim)),
)(
bot_mlp_input)
bot_mlp_input = nn.relu(bot_mlp_input)
bot_mlp_output = bot_mlp_input
batch_size = bot_mlp_output.shape[0]
feature_stack = jnp.reshape(bot_mlp_output,
[batch_size, -1, self.embed_dim])
# Embedding table look-up.
idx_lookup = jnp.reshape(cat_features, [-1]) % self.vocab_size
def scaled_init(key, shape, dtype=jnp.float_):
return (jnn.initializers.uniform(scale=1.0)(key, shape, dtype) /
jnp.sqrt(self.vocab_size))
embedding_table = self.param('embedding_table',
scaled_init, [self.vocab_size, self.embed_dim])
idx_lookup = jnp.reshape(idx_lookup, [-1])
embed_features = embedding_table[idx_lookup]
embed_features = jnp.reshape(embed_features,
[batch_size, -1, self.embed_dim])
feature_stack = jnp.concatenate([feature_stack, embed_features], axis=1)
dot_interact_output = dot_interact(concat_features=feature_stack)
top_mlp_input = jnp.concatenate([bot_mlp_output, dot_interact_output],
axis=-1)
mlp_input_dim = top_mlp_input.shape[1]
mlp_top_dims = self.mlp_top_dims
num_layers_top = len(mlp_top_dims)
for layer_idx, fan_out in enumerate(mlp_top_dims):
fan_in = mlp_input_dim if layer_idx == 0 else mlp_top_dims[layer_idx - 1]
top_mlp_input = nn.Dense(
fan_out,
kernel_init=jnn.initializers.normal(
stddev=jnp.sqrt(2.0 / (fan_in + fan_out))),
bias_init=jnn.initializers.normal(stddev=jnp.sqrt(1.0 / fan_out)))(
top_mlp_input)
if layer_idx < (num_layers_top - 1):
top_mlp_input = nn.relu(top_mlp_input)
if (self.dropout_rate is not None and self.dropout_rate > 0.0 and
layer_idx == num_layers_top - 2):
top_mlp_input = nn.Dropout(
rate=self.dropout_rate, deterministic=not train)(
top_mlp_input)
logits = top_mlp_input
return logits
|
"""Criteo1TB workload implemented in Jax."""
import functools
from typing import Dict, Optional, Tuple
from flax import jax_utils
import jax
import jax.numpy as jnp
from algorithmic_efficiency import param_utils
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_jax import models
from algorithmic_efficiency.workloads.criteo1tb.workload import \
BaseCriteo1TbDlrmSmallWorkload
class Criteo1TbDlrmSmallWorkload(BaseCriteo1TbDlrmSmallWorkload):
@property
def eval_batch_size(self) -> int:
return 524_288
def _per_example_sigmoid_binary_cross_entropy(
self, logits: spec.Tensor, targets: spec.Tensor) -> spec.Tensor:
"""Computes the sigmoid binary cross entropy per example.
Args:
logits: float array of shape (batch, output_shape).
targets: float array of shape (batch, output_shape).
Returns:
Sigmoid binary cross entropy computed per example, shape (batch,).
"""
log_p = jax.nn.log_sigmoid(logits)
log_not_p = jax.nn.log_sigmoid(-logits)
losses = -1.0 * (targets * log_p + (1 - targets) * log_not_p)
return losses
# Does NOT apply regularization, which is left to the submitter to do in
# `update_params`.
def loss_fn(
self,
label_batch: spec.Tensor, # Dense (not one-hot) labels.
logits_batch: spec.Tensor,
mask_batch: Optional[spec.Tensor] = None,
label_smoothing: float = 0.0) -> Dict[str, spec.Tensor]: # differentiable
"""Evaluate the (masked) loss function at (label_batch, logits_batch).
Return {'summed': scalar summed loss, 'n_valid_examples': scalar number of
valid examples in batch, 'per_example': 1-d array of per-example losses}
(not synced across devices).
"""
del label_smoothing
batch_size = label_batch.shape[0]
label_batch = jnp.reshape(label_batch, (batch_size,))
logits_batch = jnp.reshape(logits_batch, (batch_size,))
per_example_losses = self._per_example_sigmoid_binary_cross_entropy(
logits=logits_batch, targets=label_batch)
if mask_batch is not None:
mask_batch = jnp.reshape(mask_batch, (batch_size,))
per_example_losses *= mask_batch
n_valid_examples = mask_batch.sum()
else:
n_valid_examples = len(per_example_losses)
summed_loss = per_example_losses.sum()
return {
'summed': summed_loss,
'n_valid_examples': n_valid_examples,
'per_example': per_example_losses,
}
def init_model_fn(
self,
rng: spec.RandomState,
dropout_rate: Optional[float] = None,
aux_dropout_rate: Optional[float] = None) -> spec.ModelInitState:
"""Only dropout is used."""
del aux_dropout_rate
self._model = models.DlrmSmall(
vocab_size=self.vocab_size,
num_dense_features=self.num_dense_features,
mlp_bottom_dims=self.mlp_bottom_dims,
mlp_top_dims=self.mlp_top_dims,
embed_dim=self.embed_dim,
dropout_rate=dropout_rate)
params_rng, dropout_rng = jax.random.split(rng)
init_fake_batch_size = 2
num_categorical_features = 26
input_size = self.num_dense_features + num_categorical_features
input_shape = (init_fake_batch_size, input_size)
init_fn = functools.partial(self._model.init, train=False)
initial_variables = jax.jit(init_fn)(
{'params': params_rng, 'dropout': dropout_rng},
jnp.ones(input_shape, jnp.float32))
initial_params = initial_variables['params']
self._param_shapes = param_utils.jax_param_shapes(initial_params)
self._param_types = param_utils.jax_param_types(self._param_shapes)
return jax_utils.replicate(initial_params), None
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
return param_key == 'Dense_7'
def model_fn(
self,
params: spec.ParameterContainer,
augmented_and_preprocessed_input_batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
mode: spec.ForwardPassMode,
rng: spec.RandomState,
update_batch_norm: bool) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]:
del model_state
del update_batch_norm
inputs = augmented_and_preprocessed_input_batch['inputs']
train = mode == spec.ForwardPassMode.TRAIN
apply_kwargs = {'train': train}
if train:
apply_kwargs['rngs'] = {'dropout': rng}
logits_batch = self._model.apply({'params': params}, inputs, **apply_kwargs)
return logits_batch, None
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, 0, 0),
static_broadcasted_argnums=(0,))
def _eval_batch_pmapped(self,
params: spec.ParameterContainer,
batch: Dict[str, spec.Tensor]) -> spec.Tensor:
logits, _ = self.model_fn(
params,
batch,
model_state=None,
mode=spec.ForwardPassMode.EVAL,
rng=None,
update_batch_norm=False)
weights = batch.get('weights')
if weights is None:
weights = jnp.ones(len(logits))
summed_loss = self.loss_fn(
label_batch=batch['targets'], logits_batch=logits,
mask_batch=weights)['summed']
return summed_loss
def _eval_batch(self,
params: spec.ParameterContainer,
batch: Dict[str, spec.Tensor]) -> spec.Tensor:
# We do NOT psum inside of _eval_batch_pmapped, so the returned tensor of
# shape (local_device_count,) will all be different values.
return self._eval_batch_pmapped(params, batch).sum()
class Criteo1TbDlrmSmallTestWorkload(Criteo1TbDlrmSmallWorkload):
vocab_size: int = 32 * 128 * 16
|
"""FastMRI knee singlecoil input pipeline."""
import datetime
import functools
import glob
import os
import h5py
import jax
import tensorflow as tf
from algorithmic_efficiency import data_utils
_TRAIN_DIR = 'knee_singlecoil_train'
_VAL_DIR = 'knee_singlecoil_val'
_EVAL_SEED = 0
def _process_example(kspace,
kspace_shape,
target,
target_shape,
volume_max,
seed):
"""Generate a single example (slice from mri image).
Args:
kspace: raw mri data.
kspace_shape: shape of kspace. We pass this in because it is not constant.
target: target image.
target_shape: shape of target.
volume_max: max value over the entire volume that the example slice was
originally derived from.
seed: seed for stateless randomness.
Returns:
dictionary of processed image/target.
"""
# sample_mask
num_cols = kspace_shape[1]
num_cols_float = tf.cast(num_cols, dtype=tf.float32)
# choose_acceleration
center_fraction = tf.convert_to_tensor(0.08, dtype=tf.float32)
acceleration = tf.convert_to_tensor(4.0, dtype=tf.float32)
num_low_frequencies = tf.cast(
num_cols_float * center_fraction, dtype=tf.int32)
# calculate_center_mask
mask = tf.zeros(num_cols, dtype=tf.float32)
pad = (num_cols - num_low_frequencies + 1) // 2
mask = tf.tensor_scatter_nd_update(
mask,
tf.reshape(tf.range(pad, pad + num_low_frequencies), (-1, 1)),
tf.ones(num_low_frequencies))
# reshape_mask
center_mask = tf.reshape(mask, (1, num_cols))
# calculate_acceleration_mask
num_low_frequencies_float = tf.cast(num_low_frequencies, dtype=tf.float32)
prob = (num_cols_float / acceleration - num_low_frequencies_float) / (
num_cols_float - num_low_frequencies_float)
mask = tf.cast(
tf.random.stateless_uniform((num_cols,), seed) < prob, dtype=tf.float32)
acceleration_mask = tf.reshape(mask, (1, num_cols))
mask = tf.math.maximum(center_mask, acceleration_mask)
mask = tf.cast(mask, dtype=tf.complex64)
# apply_mask
masked_kspace = kspace * mask + 0.0
# ifft2c
shifted_kspace = tf.signal.ifftshift(masked_kspace, axes=(0, 1))
shifted_image = tf.signal.ifft2d(shifted_kspace)
image = tf.signal.fftshift(shifted_image, axes=(0, 1))
scaling_norm = tf.cast(
tf.math.sqrt(
tf.cast(tf.math.reduce_prod(tf.shape(kspace)[-2:]), 'float32')),
kspace.dtype)
image = image * scaling_norm
image = tf.stack((tf.math.real(image), tf.math.imag(image)), axis=-1)
# complex_center_crop
w_from = (kspace_shape[0] - target_shape[0]) // 2
h_from = (kspace_shape[1] - target_shape[1]) // 2
w_to = w_from + target_shape[0]
h_to = h_from + target_shape[1]
image = image[..., w_from:w_to, h_from:h_to, :]
# complex_abs
abs_image = tf.math.sqrt(tf.math.reduce_sum(image**2, axis=-1))
# normalize_instance
mean = tf.math.reduce_mean(abs_image)
std = tf.math.reduce_std(abs_image)
norm_image = (abs_image - mean) / std
# clip_image
image = tf.clip_by_value(norm_image, -6, 6)
# process target
norm_target = (target - mean) / std
target = tf.clip_by_value(norm_target, -6, 6)
return {
'inputs': image,
'targets': target,
'mean': mean,
'std': std,
'volume_max': volume_max,
}
def _h5_to_examples(path, log=False):
"""Yield MRI slices from an hdf5 file containing a single MRI volume."""
if log:
tf.print('fastmri_dataset._h5_to_examples call:',
path,
datetime.datetime.now().strftime('%H:%M:%S:%f'))
with open(path, 'rb') as gf:
with h5py.File(gf, 'r') as hf:
# NOTE(dsuo): logic taken from reference code
volume_max = hf.attrs.get('max', 0.0)
for i in range(hf['kspace'].shape[0]):
yield hf['kspace'][i], hf['kspace'][i].shape, hf['reconstruction_esc'][
i], hf['reconstruction_esc'][i].shape, volume_max
def _create_generator(filename):
signature = (
tf.TensorSpec(shape=(640, None), dtype=tf.complex64),
tf.TensorSpec(shape=(2,), dtype=tf.int32),
tf.TensorSpec(shape=(320, 320), dtype=tf.float32),
tf.TensorSpec(shape=(2,), dtype=tf.int32),
tf.TensorSpec(shape=(), dtype=tf.float32),
)
return tf.data.Dataset.from_generator(
_h5_to_examples, args=(filename,), output_signature=signature)
def load_fastmri_split(global_batch_size,
split,
data_dir,
shuffle_rng,
num_batches,
repeat_final_eval_dataset):
"""Creates a split from the FastMRI dataset using tf.data.
NOTE: only creates knee singlecoil datasets.
NOTE: fastMRI has fixed randomness for eval.
Args:
global_batch_size: The global batch size returned by the data pipeline.
split: One of ['train', 'eval_train', 'validation'].
data_dir: The location of the data on disk.
shuffle_rng: The RNG used to shuffle the split.
num_batches: Number of batches to iterate over.
Returns:
A `tf.data.Dataset`.
"""
if split not in ['train', 'eval_train', 'validation', 'test']:
raise ValueError('Unrecognized split {}'.format(split))
# Check if data directories exist because glob will not raise an error
if not os.path.exists(os.path.join(data_dir, _TRAIN_DIR)):
raise NotADirectoryError('Directory not found: {}'.format(
os.path.join(data_dir, _TRAIN_DIR)))
if not os.path.exists(os.path.join(data_dir, _VAL_DIR)):
raise NotADirectoryError('Directory not found: {}'.format(
os.path.join(data_dir, _VAL_DIR)))
if split in ['train', 'eval_train']:
file_pattern = os.path.join(data_dir, _TRAIN_DIR, '*.h5')
h5_paths = glob.glob(file_pattern)
elif split == 'validation':
file_pattern = os.path.join(data_dir, _VAL_DIR, '*.h5')
h5_paths = sorted(glob.glob(file_pattern))[:100]
elif split == 'test':
# The fastmri validation set is split into a validation and test set
file_pattern = os.path.join(data_dir, _VAL_DIR, '*.h5')
h5_paths = sorted(glob.glob(file_pattern))[100:]
is_train = split == 'train'
shuffle = is_train or split == 'eval_train'
ds = tf.data.Dataset.from_tensor_slices(h5_paths)
ds = ds.interleave(
_create_generator,
cycle_length=32,
block_length=64,
num_parallel_calls=16)
if is_train:
ds = ds.cache()
def process_example(example_index, example):
if shuffle:
process_rng = tf.cast(jax.random.fold_in(shuffle_rng, 0), tf.int64)
process_rng = tf.random.experimental.stateless_fold_in(
process_rng, example_index)
else:
# NOTE(dsuo): we use fixed randomness for eval.
process_rng = tf.cast(jax.random.PRNGKey(_EVAL_SEED), tf.int64)
return _process_example(*example, process_rng)
ds = ds.enumerate().map(process_example, num_parallel_calls=16)
if shuffle:
ds = ds.shuffle(
16 * global_batch_size,
seed=shuffle_rng[0],
reshuffle_each_iteration=True)
if is_train:
ds = ds.repeat()
ds = ds.batch(global_batch_size, drop_remainder=is_train)
if is_train:
ds = ds.prefetch(10)
iterator = map(data_utils.shard_and_maybe_pad_np, ds)
return iterator
else:
if num_batches:
ds = ds.take(num_batches)
ds = ds.cache()
if repeat_final_eval_dataset:
ds = ds.repeat()
ds = ds.prefetch(10)
return map(
functools.partial(
data_utils.shard_and_maybe_pad_np,
global_batch_size=global_batch_size),
ds)
|
"""FastMRI workload parent class."""
import math
from typing import Optional
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.fastmri import input_pipeline
class BaseFastMRIWorkload(spec.Workload):
@property
def target_metric_name(self) -> str:
"""The name of the target metric (useful for scoring/processing code)."""
return 'ssim'
def has_reached_validation_target(self, eval_result: float) -> bool:
return eval_result['validation/ssim'] > self.validation_target_value
@property
def validation_target_value(self) -> float:
return 0.7344
def has_reached_test_target(self, eval_result: float) -> bool:
return eval_result['test/ssim'] > self.test_target_value
@property
def test_target_value(self) -> float:
return 0.741652
@property
def loss_type(self) -> spec.LossType:
return spec.LossType.MEAN_ABSOLUTE_ERROR
@property
def num_train_examples(self) -> int:
return 34742
@property
def num_eval_train_examples(self) -> int:
# Round up from num_validation_examples (which is the default for
# num_eval_train_examples) to the next multiple of eval_batch_size, so that
# we don't have to extract the correctly sized subset of the training data.
rounded_up_multiple = math.ceil(self.num_validation_examples /
self.eval_batch_size)
return rounded_up_multiple * self.eval_batch_size
@property
def num_validation_examples(self) -> int:
return 3554
@property
def num_test_examples(self) -> int:
return 3581
@property
def eval_batch_size(self) -> int:
return 256
@property
def train_mean(self):
raise NotImplementedError
@property
def train_stddev(self):
raise NotImplementedError
@property
def center_fractions(self):
return (0.08,)
@property
def accelerations(self):
return (4,)
@property
def max_allowed_runtime_sec(self) -> int:
return 8859 # ~2.5 hours
@property
def eval_period_time_sec(self) -> int:
return 80
@property
def step_hint(self) -> int:
"""Max num steps the baseline algo was given to reach the target."""
return 36_189
def _build_input_queue(self,
data_rng: spec.RandomState,
split: str,
data_dir: str,
global_batch_size: int,
cache: Optional[bool] = None,
repeat_final_dataset: Optional[bool] = None,
num_batches: Optional[int] = None):
del cache
return input_pipeline.load_fastmri_split(global_batch_size,
split,
data_dir,
data_rng,
num_batches,
repeat_final_dataset)
|
"""U-Net Model.
Adapted from fastMRI:
https://github.com/facebookresearch/fastMRI/blob/main/fastmri/models/unet.py
"""
from typing import Optional
import torch
from torch import nn
from torch import Tensor
from torch.nn import functional as F
from algorithmic_efficiency import init_utils
class UNet(nn.Module):
r"""U-Net model from
`"U-net: Convolutional networks
for biomedical image segmentation"
<hhttps://arxiv.org/pdf/1505.04597.pdf>`_.
"""
def __init__(self,
in_chans: int = 1,
out_chans: int = 1,
chans: int = 32,
num_pool_layers: int = 4,
dropout_rate: Optional[float] = 0.0) -> None:
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
if dropout_rate is None:
dropout_rate = 0.0
self.down_sample_layers = nn.ModuleList(
[ConvBlock(in_chans, chans, dropout_rate)])
ch = chans
for _ in range(num_pool_layers - 1):
self.down_sample_layers.append(ConvBlock(ch, ch * 2, dropout_rate))
ch *= 2
self.conv = ConvBlock(ch, ch * 2, dropout_rate)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for _ in range(num_pool_layers - 1):
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(ConvBlock(ch * 2, ch, dropout_rate))
ch //= 2
self.up_transpose_conv.append(TransposeConvBlock(ch * 2, ch))
self.up_conv.append(
nn.Sequential(
ConvBlock(ch * 2, ch, dropout_rate),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1),
))
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
init_utils.pytorch_default_init(m)
def forward(self, x: Tensor) -> Tensor:
stack = []
output = x
# apply down-sampling layers
for layer in self.down_sample_layers:
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# reflect pad on the right/botton if needed to handle
# odd input dimensions
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # padding bottom
if torch.sum(torch.tensor(padding)) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
return output
class ConvBlock(nn.Module):
# A Convolutional Block that consists of two convolution layers each
# followed by instance normalization, LeakyReLU activation and dropout_rate.
def __init__(self,
in_chans: int,
out_chans: int,
dropout_rate: float = 0.0) -> None:
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.dropout_rate = dropout_rate
self.conv_layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(dropout_rate),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(dropout_rate),
)
def forward(self, x: Tensor) -> Tensor:
return self.conv_layers(x)
class TransposeConvBlock(nn.Module):
# A Transpose Convolutional Block that consists of one convolution transpose
# layers followed by instance normalization and LeakyReLU activation.
def __init__(self, in_chans: int, out_chans: int):
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = nn.Sequential(
nn.ConvTranspose2d(
in_chans, out_chans, kernel_size=2, stride=2, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, x: Tensor) -> Tensor:
return self.layers(x)
|
"""Structural similarity index calculation in PyTorch, ported from Jax."""
import functools
import functorch
import torch
import torch.nn.functional as F
from torchvision.transforms.functional import pad as pad_fn
from algorithmic_efficiency.pytorch_utils import pytorch_setup
DEVICE = pytorch_setup()[2]
def ssim(logits, targets, mean=None, std=None, volume_max=None):
"""Computes example-wise structural similarity for a batch.
NOTE(dsuo): we use the same (default) arguments to `structural_similarity`
as in https://arxiv.org/abs/1811.08839.
Args:
logits: (batch,) + input.shape float tensor.
targets: (batch,) + input.shape float tensor.
mean: (batch,) mean of original images.
std: (batch,) std of original images.
volume_max: (batch,) of the volume max for the volumes each example came
from.
Returns:
Structural similarity computed per example, shape [batch, ...].
"""
if volume_max is None:
volume_max = torch.ones(logits.shape[0], device=DEVICE)
# NOTE(dsuo): `volume_max` can be 0 if we have a padded batch, but this will
# lead to NaN values in `ssim`.
volume_max = torch.where(volume_max == 0,
torch.ones_like(volume_max),
volume_max)
if mean is None:
mean = torch.zeros(logits.shape[0], device=DEVICE)
if std is None:
std = torch.ones(logits.shape[0], device=DEVICE)
mean = mean.view((-1,) + (1,) * (len(logits.shape) - 1))
std = std.view((-1,) + (1,) * (len(logits.shape) - 1))
logits = logits * std + mean
targets = targets * std + mean
ssims = functorch.vmap(structural_similarity)(logits, targets, volume_max)
return ssims
def structural_similarity(im1,
im2,
data_range=1.0,
win_size=7,
k1=0.01,
k2=0.03):
"""Compute the mean structural similarity index between two images.
NOTE(dsuo): modified from skimage.metrics.structural_similarity.
Args:
im1: Image tensor. Any dimensionality with same shape.
im2: Image tensor. Any dimensionality with same shape.
data_range: float. The data range of the input image (distance
between minimum and maximum possible values). By default, this is
win_size: int or None. The side-length of the sliding window used
in comparison. Must be an odd value. If `gaussian_weights` is True, this
is ignored and the window size will depend on `sigma`.
estimated from the image data-type.
k1: float. Algorithm parameter K1 (see [1]).
k2: float. Algorithm parameter K2 (see [2]).
Returns:
mssim: Scalar float tensor.
The mean structural similarity index over the image.
References
[1] Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P.
(2004). Image quality assessment: From error visibility to
structural similarity. IEEE Transactions on Image Processing,
13, 600-612.
https://ece.uwaterloo.ca/~z70wang/publications/ssim.pdf,
:DOI:`10.1109/TIP.2003.819861`
"""
filter_func = functools.partial(_uniform_filter, size=win_size)
num_points = win_size**len(im1.shape)
# filter has already normalized by num_points
cov_norm = num_points / (num_points - 1) # sample covariance
# compute (weighted) means
ux = filter_func(im1)
uy = filter_func(im2)
# compute (weighted) variances and covariances
uxx = filter_func(im1 * im1)
uyy = filter_func(im2 * im2)
uxy = filter_func(im1 * im2)
vx = cov_norm * (uxx - ux * ux)
vy = cov_norm * (uyy - uy * uy)
vxy = cov_norm * (uxy - ux * uy)
c1 = (k1 * data_range)**2
c2 = (k2 * data_range)**2
a1 = 2 * ux * uy + c1
a2 = 2 * vxy + c2
b1 = ux**2 + uy**2 + c1
b2 = vx + vy + c2
d = b1 * b2
s = (a1 * a2) / d
# to avoid edge effects will ignore filter radius strip around edges
pad = (win_size - 1) // 2
# compute (weighted) mean of ssim.
return torch.mean(s[pad:-pad, pad:-pad])
def _uniform_filter(im, size=7):
pad_size = size // 2
def conv(im):
# This function does not seem to work with only two dimensions.
padded_im = pad_fn(im.unsqueeze(0), pad_size, padding_mode='symmetric')
# Remove the first dim and the padding from the second dim.
padded_im = padded_im[0, pad_size:-pad_size]
filters = torch.ones(1, 1, size, dtype=padded_im.dtype, device=DEVICE)
# Add additional dimension for the number of channels.
return F.conv1d(padded_im.unsqueeze(1), filters).squeeze(1) / size
im = conv(im)
im = conv(im.T)
return im.T
|
"""FastMRI workload implemented in PyTorch."""
import contextlib
import math
from typing import Dict, Optional, Tuple
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
from algorithmic_efficiency import param_utils
from algorithmic_efficiency import pytorch_utils
from algorithmic_efficiency import spec
import algorithmic_efficiency.random_utils as prng
from algorithmic_efficiency.workloads.fastmri.fastmri_pytorch.models import \
UNet
from algorithmic_efficiency.workloads.fastmri.fastmri_pytorch.ssim import ssim
from algorithmic_efficiency.workloads.fastmri.workload import \
BaseFastMRIWorkload
USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_utils.pytorch_setup()
class FastMRIWorkload(BaseFastMRIWorkload):
def _build_input_queue(self,
data_rng: spec.RandomState,
split: str,
data_dir: str,
global_batch_size: int,
cache: Optional[bool] = None,
repeat_final_dataset: Optional[bool] = None,
num_batches: Optional[int] = None):
per_device_batch_size = int(global_batch_size / N_GPUS)
# Only create and iterate over tf input pipeline in one Python process to
# avoid creating too many threads.
if RANK == 0:
data_rng = data_rng.astype('uint32')
np_iter = super()._build_input_queue(data_rng,
split,
data_dir,
global_batch_size,
cache,
repeat_final_dataset,
num_batches)
while True:
if RANK == 0:
batch = next(np_iter) # pylint: disable=stop-iteration-return
tensor_list, aux_tensor_list = [], []
for key, value in batch.items():
tensor = torch.as_tensor(value, device=DEVICE)
if key == 'weights':
weights = tensor.clone()
else:
if tensor.dim() == 4:
tensor_list.append(tensor)
else:
aux_tensor_list.append(tensor)
batch[key] = (
tensor[0] if USE_PYTORCH_DDP else tensor.view(
-1, *value.shape[2:]))
# Send batch to other devices when using DDP.
if USE_PYTORCH_DDP:
if split != 'train':
# During eval, the batch size of the remainder might be different.
per_device_batch_size = torch.tensor(
len(batch['inputs']), dtype=torch.int32, device=DEVICE)
dist.broadcast(per_device_batch_size, src=0)
weights = weights if 'weights' in batch else None
if weights is None:
weights = torch.ones((N_GPUS, per_device_batch_size),
dtype=torch.float64,
device=DEVICE)
# Has no effect, but without it `batch` has no `weights` key
# for RANK == 0, but has one for all others.
batch['weights'] = weights[0]
dist.broadcast(weights, src=0)
dist.broadcast(torch.stack(tensor_list), src=0)
dist.broadcast(torch.stack(aux_tensor_list), src=0)
else:
batch = {}
if split != 'train':
# During eval, the batch size of the remainder might be different.
per_device_batch_size = torch.empty((),
dtype=torch.int32,
device=DEVICE)
dist.broadcast(per_device_batch_size, src=0)
weights = torch.empty((N_GPUS, per_device_batch_size),
dtype=torch.float64,
device=DEVICE)
dist.broadcast(weights, src=0)
batch['weights'] = weights[RANK]
tensors = torch.empty((2, N_GPUS, per_device_batch_size, 320, 320),
device=DEVICE)
dist.broadcast(tensors, src=0)
aux_tensors = torch.empty((3, N_GPUS, per_device_batch_size),
device=DEVICE)
dist.broadcast(aux_tensors, src=0)
# Note that the batch dict in the RANK == 0 process is ordered.
batch['inputs'] = tensors[0][RANK]
batch['targets'] = tensors[1][RANK]
batch['mean'] = aux_tensors[0][RANK]
batch['std'] = aux_tensors[1][RANK]
batch['volume_max'] = aux_tensors[2][RANK]
yield batch
def init_model_fn(
self,
rng: spec.RandomState,
dropout_rate: Optional[float] = None,
aux_dropout_rate: Optional[float] = None) -> spec.ModelInitState:
del aux_dropout_rate
torch.random.manual_seed(rng[0])
model = UNet(dropout_rate=dropout_rate)
self._param_shapes = param_utils.pytorch_param_shapes(model)
self._param_types = param_utils.pytorch_param_types(self._param_shapes)
model.to(DEVICE)
if N_GPUS > 1:
if USE_PYTORCH_DDP:
model = DDP(model, device_ids=[RANK], output_device=RANK)
else:
model = torch.nn.DataParallel(model)
return model, None
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
return param_key in ['up_conv.3.1.weight', 'up_conv.3.1.bias']
def model_fn(
self,
params: spec.ParameterContainer,
augmented_and_preprocessed_input_batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
mode: spec.ForwardPassMode,
rng: spec.RandomState,
update_batch_norm: bool) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]:
del model_state
del rng
del update_batch_norm
model = params
if mode == spec.ForwardPassMode.EVAL:
model.eval()
if mode == spec.ForwardPassMode.TRAIN:
model.train()
contexts = {
spec.ForwardPassMode.EVAL: torch.no_grad,
spec.ForwardPassMode.TRAIN: contextlib.nullcontext,
}
with contexts[mode]():
logit_batch = model(
augmented_and_preprocessed_input_batch['inputs'].unsqueeze(
1)).squeeze(1)
return logit_batch, None
# Does NOT apply regularization, which is left to the submitter to do in
# `update_params`.
def loss_fn(
self,
label_batch: spec.Tensor, # Dense or one-hot labels.
logits_batch: spec.Tensor,
mask_batch: Optional[spec.Tensor] = None,
label_smoothing: float = 0.0) -> Dict[str, spec.Tensor]: # differentiable
"""Evaluate the (masked) loss function at (label_batch, logits_batch).
Return {'summed': scalar summed loss, 'n_valid_examples': scalar number of
valid examples in batch, 'per_example': 1-d array of per-example losses}
(not synced across devices).
"""
del label_smoothing
per_example_losses = F.l1_loss(
logits_batch, label_batch, reduction='none').mean(dim=(1, 2))
# mask_batch is assumed to be shape [batch].
if mask_batch is not None:
per_example_losses *= mask_batch
n_valid_examples = mask_batch.sum()
else:
n_valid_examples = len(per_example_losses)
summed_loss = per_example_losses.sum()
return {
'summed': summed_loss,
'n_valid_examples': torch.as_tensor(n_valid_examples, device=DEVICE),
'per_example': per_example_losses,
}
def _eval_model(self,
params: spec.ParameterContainer,
batch: Dict[str, spec.Tensor],
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Return the SSIM and loss as a dict."""
outputs, _ = self.model_fn(
params,
batch,
None,
spec.ForwardPassMode.EVAL,
rng,
update_batch_norm=False)
targets = batch['targets']
weights = batch.get('weights')
if weights is None:
weights = torch.ones(len(outputs), device=DEVICE)
weights_sum = weights.sum().to(torch.int)
ssim_sum = ssim(
outputs[:weights_sum],
targets[:weights_sum],
mean=batch['mean'][:weights_sum],
std=batch['std'][:weights_sum],
volume_max=batch['volume_max'][:weights_sum]).sum()
summed_loss = self.loss_fn(targets, outputs, weights)['summed']
return {'ssim': ssim_sum, 'loss': summed_loss}
def _eval_model_on_split(self,
split: str,
num_examples: int,
global_batch_size: int,
params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState,
data_dir: str,
global_step: int = 0) -> Dict[str, float]:
"""Run a full evaluation of the model."""
del model_state
del global_step
data_rng, model_rng = prng.split(rng, 2)
num_batches = int(math.ceil(num_examples / global_batch_size))
if split not in self._eval_iters:
# These iterators repeat indefinitely.
self._eval_iters[split] = self._build_input_queue(
data_rng,
split,
data_dir,
global_batch_size=global_batch_size,
repeat_final_dataset=True,
num_batches=num_batches)
total_metrics = {
'ssim': torch.tensor(0., device=DEVICE),
'loss': torch.tensor(0., device=DEVICE),
}
for _ in range(num_batches):
batch = next(self._eval_iters[split])
batch_metrics = self._eval_model(params, batch, model_rng)
total_metrics = {
k: v + batch_metrics[k] for k, v in total_metrics.items()
}
if USE_PYTORCH_DDP:
for metric in total_metrics.values():
dist.all_reduce(metric)
return {k: float(v.item() / num_examples) for k, v in total_metrics.items()}
|
"""Jax / Flax implementation of FastMRI U-Net.
Forked from
https://github.com/google/init2winit/blob/master/init2winit/model_lib/unet.py
Original implementation:
github.com/facebookresearch/fastMRI/blob/main/fastmri/models/unet.py
Training:
github.com/facebookresearch/fastMRI/blob/main/fastmri/pl_modules/unet_module.py
Data:
github.com/facebookresearch/fastMRI/tree/main/fastmri/data
"""
from typing import Optional
import flax.linen as nn
import jax
import jax.numpy as jnp
def _compute_stats(x, axes):
# promote x to at least float32, this avoids half precision computation
# but preserves double or complex floating points
x = jnp.asarray(x, jnp.promote_types(jnp.float32, jnp.result_type(x)))
mean = jnp.mean(x, axes)
mean2 = jnp.mean(jnp.square(x), axes)
# mean2 - _abs_sq(mean) is not guaranteed to be non-negative due
# to floating point round-off errors.
var = jnp.maximum(0., mean2 - jnp.square(mean))
return mean, var
def _normalize(x, axes, mean, var, epsilon):
stats_shape = list(x.shape)
for axis in axes:
stats_shape[axis] = 1
mean = mean.reshape(stats_shape)
var = var.reshape(stats_shape)
y = x - mean
mul = jnp.sqrt(var + epsilon)
y /= mul
return y
def _simple_instance_norm2d(x, axes, epsilon=1e-5):
mean, var = _compute_stats(x, axes)
return _normalize(x, axes, mean, var, epsilon)
class UNet(nn.Module):
"""Jax / Flax implementation of a U-Net model.
O. Ronneberger, P. Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical
image computing and computer-assisted intervention, pages 234–241.
Springer, 2015.
out_channels: Number of channels in the output to the U-Net model.
channels: Number of output channels of the first convolution layer.
num_pool_layers: Number of down-sampling and up-sampling layers.
dropout_rate: Dropout probability.
"""
out_channels: int = 1
channels: int = 32
num_pool_layers: int = 4
dropout_rate: Optional[float] = 0.0 # If None, defaults to 0.0.
@nn.compact
def __call__(self, x, train=True):
dropout_rate = self.dropout_rate
if dropout_rate is None:
dropout_rate = 0.0
down_sample_layers = [ConvBlock(self.channels, dropout_rate)]
ch = self.channels
for _ in range(self.num_pool_layers - 1):
down_sample_layers.append(ConvBlock(ch * 2, dropout_rate))
ch *= 2
conv = ConvBlock(ch * 2, dropout_rate)
up_conv = []
up_transpose_conv = []
for _ in range(self.num_pool_layers - 1):
up_transpose_conv.append(TransposeConvBlock(ch))
up_conv.append(ConvBlock(ch, dropout_rate))
ch //= 2
up_transpose_conv.append(TransposeConvBlock(ch))
up_conv.append(ConvBlock(ch, dropout_rate))
final_conv = nn.Conv(self.out_channels, kernel_size=(1, 1), strides=(1, 1))
stack = []
output = jnp.expand_dims(x, axis=-1)
# apply down-sampling layers
for layer in down_sample_layers:
output = layer(output, train)
stack.append(output)
output = nn.avg_pool(output, window_shape=(2, 2), strides=(2, 2))
output = conv(output, train)
# apply up-sampling layers
for transpose_conv, conv in zip(up_transpose_conv, up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# reflect pad on the right/botton if needed to handle odd input dimensions
padding_right = 0
padding_bottom = 0
if output.shape[-2] != downsample_layer.shape[-2]:
padding_right = 1 # padding right
if output.shape[-3] != downsample_layer.shape[-3]:
padding_bottom = 1 # padding bottom
if padding_right or padding_bottom:
padding = ((0, 0), (0, padding_bottom), (0, padding_right), (0, 0))
output = jnp.pad(output, padding, mode='reflect')
output = jnp.concatenate((output, downsample_layer), axis=-1)
output = conv(output, train)
output = final_conv(output)
return output.squeeze(-1)
class ConvBlock(nn.Module):
"""A Convolutional Block.
out_channels: Number of channels in the output.
dropout_rate: Dropout probability.
"""
out_channels: int
dropout_rate: float
@nn.compact
def __call__(self, x, train=True):
"""Forward function.
Note: Pytorch is NCHW and jax/flax is NHWC.
Args:
x: Input 4D tensor of shape `(N, H, W, in_channels)`.
train: deterministic or not (use init2winit naming).
Returns:
jnp.array: Output tensor of shape `(N, H, W, out_channels)`.
"""
x = nn.Conv(
features=self.out_channels,
kernel_size=(3, 3),
strides=(1, 1),
use_bias=False)(
x)
# InstanceNorm2d was run with no learnable params in reference code
# so this is a simple normalization along channels
x = _simple_instance_norm2d(x, (1, 2))
x = jax.nn.leaky_relu(x, negative_slope=0.2)
# Ref code uses dropout2d which applies the same mask for the entire channel
# Replicated by using broadcast dims to have the same filter on HW
x = nn.Dropout(
self.dropout_rate, broadcast_dims=(1, 2), deterministic=not train)(
x)
x = nn.Conv(
features=self.out_channels,
kernel_size=(3, 3),
strides=(1, 1),
use_bias=False)(
x)
x = _simple_instance_norm2d(x, (1, 2))
x = jax.nn.leaky_relu(x, negative_slope=0.2)
x = nn.Dropout(
self.dropout_rate, broadcast_dims=(1, 2), deterministic=not train)(
x)
return x
class TransposeConvBlock(nn.Module):
"""A Transpose Convolutional Block.
out_channels: Number of channels in the output.
"""
out_channels: int
@nn.compact
def __call__(self, x):
"""Forward function.
Args:
x: Input 4D tensor of shape `(N, H, W, in_channels)`.
Returns:
jnp.array: Output tensor of shape `(N, H*2, W*2, out_channels)`.
"""
x = nn.ConvTranspose(
self.out_channels, kernel_size=(2, 2), strides=(2, 2), use_bias=False)(
x)
x = _simple_instance_norm2d(x, (1, 2))
x = jax.nn.leaky_relu(x, negative_slope=0.2)
return x
|
"""Structural similarity index calculation in Jax."""
import functools
import jax
import jax.numpy as jnp
def ssim(logits, targets, mean=None, std=None, volume_max=None):
"""Computes example-wise structural similarity for a batch.
NOTE(dsuo): we use the same (default) arguments to `structural_similarity`
as in https://arxiv.org/abs/1811.08839.
Args:
logits: (batch,) + input.shape float array.
targets: (batch,) + input.shape float array.
mean: (batch,) mean of original images.
std: (batch,) std of original images.
volume_max: (batch,) of the volume max for the volumes each example came
from.
Returns:
Structural similarity computed per example, shape [batch, ...].
"""
if volume_max is None:
volume_max = jnp.ones(logits.shape[0])
# NOTE(dsuo): `volume_max` can be 0 if we have a padded batch, but this will
# lead to NaN values in `ssim`.
volume_max = jnp.where(volume_max == 0, jnp.ones_like(volume_max), volume_max)
if mean is None:
mean = jnp.zeros(logits.shape[0])
if std is None:
std = jnp.ones(logits.shape[0])
mean = mean.reshape((-1,) + (1,) * (len(logits.shape) - 1))
std = std.reshape((-1,) + (1,) * (len(logits.shape) - 1))
logits = logits * std + mean
targets = targets * std + mean
ssims = jax.vmap(structural_similarity)(logits, targets, volume_max)
return ssims
def structural_similarity(im1,
im2,
data_range=1.0,
win_size=7,
k1=0.01,
k2=0.03):
"""Compute the mean structural similarity index between two images.
NOTE(dsuo): modified from skimage.metrics.structural_similarity.
Args:
im1: ndarray Images. Any dimensionality with same shape.
im2: ndarray Images. Any dimensionality with same shape.
data_range: float. The data range of the input image (distance
between minimum and maximum possible values). By default, this is
win_size: int or None. The side-length of the sliding window used
in comparison. Must be an odd value. If `gaussian_weights` is True, this
is ignored and the window size will depend on `sigma`.
estimated from the image data-type.
k1: float. Algorithm parameter K1 (see [1]).
k2: float. Algorithm parameter K2 (see [2]).
Returns:
mssim: float
The mean structural similarity index over the image.
References
[1] Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P.
(2004). Image quality assessment: From error visibility to
structural similarity. IEEE Transactions on Image Processing,
13, 600-612.
https://ece.uwaterloo.ca/~z70wang/publications/ssim.pdf,
:DOI:`10.1109/TIP.2003.819861`
"""
filter_func = functools.partial(_uniform_filter, size=win_size)
num_points = win_size**len(im1.shape)
# filter has already normalized by num_points
cov_norm = num_points / (num_points - 1) # sample covariance
# compute (weighted) means
ux = filter_func(im1)
uy = filter_func(im2)
# compute (weighted) variances and covariances
uxx = filter_func(im1 * im1)
uyy = filter_func(im2 * im2)
uxy = filter_func(im1 * im2)
vx = cov_norm * (uxx - ux * ux)
vy = cov_norm * (uyy - uy * uy)
vxy = cov_norm * (uxy - ux * uy)
c1 = (k1 * data_range)**2
c2 = (k2 * data_range)**2
a1 = 2 * ux * uy + c1
a2 = 2 * vxy + c2
b1 = ux**2 + uy**2 + c1
b2 = vx + vy + c2
d = b1 * b2
s = (a1 * a2) / d
# to avoid edge effects will ignore filter radius strip around edges
pad = (win_size - 1) // 2
# compute (weighted) mean of ssim.
return jnp.mean(s.at[pad:-pad, pad:-pad].get())
def _uniform_filter(im, size=7):
def conv(im):
return jnp.convolve(
jnp.pad(im, pad_width=size // 2, mode='symmetric'),
jnp.ones(size),
mode='valid') / size
im = jax.vmap(conv, (0,))(im)
im = jax.vmap(conv, (1,))(im)
return im.T
|
"""FastMRI workload implemented in Jax."""
import functools
import math
from typing import Dict, Optional, Tuple
from flax import jax_utils
import jax
import jax.numpy as jnp
from algorithmic_efficiency import param_utils
from algorithmic_efficiency import spec
import algorithmic_efficiency.random_utils as prng
from algorithmic_efficiency.workloads.fastmri.fastmri_jax import models
from algorithmic_efficiency.workloads.fastmri.fastmri_jax.ssim import ssim
from algorithmic_efficiency.workloads.fastmri.workload import \
BaseFastMRIWorkload
class FastMRIWorkload(BaseFastMRIWorkload):
def init_model_fn(
self,
rng: spec.RandomState,
dropout_rate: Optional[float] = None,
aux_dropout_rate: Optional[float] = None) -> spec.ModelInitState:
"""aux_dropout_rate is unused."""
del aux_dropout_rate
fake_batch = jnp.zeros((13, 320, 320))
self._model = models.UNet(dropout_rate=dropout_rate)
variables = jax.jit(self._model.init)({'params': rng}, fake_batch)
params = variables['params']
self._param_shapes = param_utils.jax_param_shapes(params)
self._param_types = param_utils.jax_param_types(self._param_shapes)
params = jax_utils.replicate(params)
return params, None
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
return param_key == 'Conv_0'
def model_fn(
self,
params: spec.ParameterContainer,
augmented_and_preprocessed_input_batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
mode: spec.ForwardPassMode,
rng: spec.RandomState,
update_batch_norm: bool) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]:
del model_state
del update_batch_norm
train = mode == spec.ForwardPassMode.TRAIN
logits = self._model.apply({'params': params},
augmented_and_preprocessed_input_batch['inputs'],
rngs={'dropout': rng},
train=train)
return logits, None
# Does NOT apply regularization, which is left to the submitter to do in
# `update_params`.
def loss_fn(
self,
label_batch: spec.Tensor, # Dense or one-hot labels.
logits_batch: spec.Tensor,
mask_batch: Optional[spec.Tensor] = None,
label_smoothing: float = 0.0) -> Dict[str, spec.Tensor]: # differentiable
"""Evaluate the (masked) loss function at (label_batch, logits_batch).
Return {'summed': scalar summed loss, 'n_valid_examples': scalar number of
valid examples in batch, 'per_example': 1-d array of per-example losses}
(not synced across devices).
"""
del label_smoothing
per_example_losses = jnp.mean(
jnp.abs(logits_batch - label_batch),
axis=tuple(range(1, logits_batch.ndim)))
# mask_batch is assumed to be shape [batch].
if mask_batch is not None:
per_example_losses *= mask_batch
n_valid_examples = mask_batch.sum()
else:
n_valid_examples = len(per_example_losses)
summed_loss = per_example_losses.sum()
return {
'summed': summed_loss,
'n_valid_examples': n_valid_examples,
'per_example': per_example_losses,
}
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, 0, 0, 0),
static_broadcasted_argnums=(0,))
def _eval_model(self,
params: spec.Tensor,
batch: Dict[str, spec.Tensor],
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Return the SSIM and loss as a dict."""
logits, _ = self.model_fn(
params,
batch,
model_state=None,
mode=spec.ForwardPassMode.EVAL,
rng=rng,
update_batch_norm=False)
targets = batch['targets']
weights = batch.get('weights')
if weights is None:
weights = jnp.ones(len(logits))
ssim_vals = ssim(
logits,
targets,
mean=batch['mean'],
std=batch['std'],
volume_max=batch['volume_max'])
ssim_sum = jnp.sum(ssim_vals * weights)
summed_loss = self.loss_fn(targets, logits, weights)['summed']
metrics = {
'ssim': ssim_sum,
'loss': summed_loss,
}
metrics = jax.lax.psum(metrics, axis_name='batch')
return metrics
def _eval_model_on_split(self,
split: str,
num_examples: int,
global_batch_size: int,
params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState,
data_dir: str,
global_step: int = 0) -> Dict[str, float]:
"""Run a full evaluation of the model."""
del model_state
del global_step
data_rng, model_rng = prng.split(rng, 2)
num_batches = int(math.ceil(num_examples / global_batch_size))
if split not in self._eval_iters:
# These iterators repeat indefinitely.
self._eval_iters[split] = self._build_input_queue(
data_rng,
split,
data_dir,
global_batch_size=global_batch_size,
repeat_final_dataset=True,
num_batches=num_batches)
total_metrics = {'ssim': 0., 'loss': 0.}
eval_rngs = prng.split(model_rng, jax.local_device_count())
for _ in range(num_batches):
batch = next(self._eval_iters[split])
# We already sum these metrics across devices inside _eval_model.
synced_metrics = self._eval_model(params, batch, eval_rngs)
total_metrics = {
k: v + synced_metrics[k][0] for k, v in total_metrics.items()
}
return {k: float(v.item() / num_examples) for k, v in total_metrics.items()}
|
"""CIFAR workload parent class."""
import abc
import math
from typing import Any, Dict, Iterator, Optional, Tuple
import jax
import torch
from algorithmic_efficiency import spec
from algorithmic_efficiency.pytorch_utils import pytorch_setup
import algorithmic_efficiency.random_utils as prng
USE_PYTORCH_DDP, _, _, _ = pytorch_setup()
class BaseCifarWorkload(spec.Workload):
_num_classes: int = 10
@property
def target_metric_name(self) -> str:
"""The name of the target metric (useful for scoring/processing code)."""
return 'accuracy'
def has_reached_validation_target(self, eval_result: Dict[str,
float]) -> bool:
return eval_result['validation/accuracy'] > self.validation_target_value
@property
def validation_target_value(self) -> float:
return 0.85
def has_reached_test_target(self, eval_result: Dict[str, float]) -> bool:
return eval_result['test/accuracy'] > self.test_target_value
@property
def test_target_value(self) -> float:
return 0.85
@property
def loss_type(self) -> spec.LossType:
return spec.LossType.SOFTMAX_CROSS_ENTROPY
@property
def num_train_examples(self) -> int:
return 45000
@property
def num_eval_train_examples(self) -> int:
# Round up from num_validation_examples (which is the default for
# num_eval_train_examples) to the next multiple of eval_batch_size, so that
# we don't have to extract the correctly sized subset of the training data.
rounded_up_multiple = math.ceil(self.num_validation_examples /
self.eval_batch_size)
return rounded_up_multiple * self.eval_batch_size
@property
def num_validation_examples(self) -> int:
return 5000
@property
def num_test_examples(self) -> int:
return 10000
@property
def eval_batch_size(self) -> int:
return 1024
@property
def train_mean(self) -> Tuple[float, float, float]:
return (0.49139968, 0.48215827, 0.44653124)
@property
def train_stddev(self) -> Tuple[float, float, float]:
return (0.24703233, 0.24348505, 0.26158768)
# Data augmentation settings.
@property
def crop_size(self) -> int:
return 32
@property
def padding_size(self) -> int:
return 4
@property
def max_allowed_runtime_sec(self) -> int:
return 3600 # 1 hour.
@property
def eval_period_time_sec(self) -> int:
return 600 # 10 mins.
def _build_dataset(
self,
data_rng: spec.RandomState,
split: str,
data_dir: str,
global_batch_size: int,
cache: Optional[bool] = None,
repeat_final_dataset: Optional[bool] = None
) -> Iterator[Dict[str, spec.Tensor]]:
raise NotImplementedError
def _build_input_queue(
self,
data_rng: spec.RandomState,
split: str,
data_dir: str,
global_batch_size: int,
cache: Optional[bool] = None,
repeat_final_dataset: Optional[bool] = None,
num_batches: Optional[int] = None) -> Iterator[Dict[str, spec.Tensor]]:
del num_batches
if split == 'test':
if not cache:
raise ValueError('cache must be True for split=test.')
if not repeat_final_dataset:
raise ValueError('repeat_final_dataset must be True for split=test.')
return self._build_dataset(data_rng,
split,
data_dir,
global_batch_size,
cache,
repeat_final_dataset)
@property
def step_hint(self) -> int:
# Note that the target setting algorithms were not actually run on this
# workload, but for completeness we provide the number of steps for 100
# epochs at batch size 1024.
return 4883
def _eval_model(
self,
params: spec.ParameterContainer,
batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState) -> Dict[spec.Tensor, spec.ModelAuxiliaryState]:
raise NotImplementedError
@abc.abstractmethod
def _normalize_eval_metrics(
self, num_examples: int, total_metrics: Dict[str,
Any]) -> Dict[str, float]:
"""Normalize eval metrics."""
def _eval_model_on_split(self,
split: str,
num_examples: int,
global_batch_size: int,
params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState,
data_dir: str,
global_step: int = 0) -> Dict[str, float]:
"""Run a full evaluation of the model."""
del global_step
data_rng, model_rng = prng.split(rng, 2)
if split not in self._eval_iters:
self._eval_iters[split] = self._build_input_queue(
data_rng=data_rng,
split=split,
data_dir=data_dir,
global_batch_size=global_batch_size,
cache=True,
repeat_final_dataset=True)
num_batches = int(math.ceil(num_examples / global_batch_size))
num_devices = max(torch.cuda.device_count(), jax.local_device_count())
eval_metrics = {}
for _ in range(num_batches):
batch = next(self._eval_iters[split])
per_device_model_rngs = prng.split(model_rng, num_devices)
# We already average these metrics across devices inside _compute_metrics.
synced_metrics = self._eval_model(params,
batch,
model_state,
per_device_model_rngs)
for metric_name, metric_value in synced_metrics.items():
if metric_name not in eval_metrics:
eval_metrics[metric_name] = 0.0
eval_metrics[metric_name] += metric_value
return self._normalize_eval_metrics(num_examples, eval_metrics)
|
"""CIFAR input pipeline.
Forked from Flax example which can be found here:
https://github.com/google/flax/blob/main/examples/imagenet/input_pipeline.py
and adjusted to work for CIFAR10.
"""
import functools
from typing import Dict, Iterator, Tuple
from flax import jax_utils
import jax
import tensorflow as tf
import tensorflow_datasets as tfds
from algorithmic_efficiency import spec
from algorithmic_efficiency.data_utils import shard_and_maybe_pad_np
def preprocess_for_train(image: spec.Tensor,
rng: spec.RandomState,
mean_rgb: Tuple[float, float, float],
stddev_rgb: Tuple[float, float, float],
crop_size: int,
padding_size: int,
dtype: tf.DType = tf.float32) -> spec.Tensor:
"""Preprocesses the given image for training.
Args:
image: `Tensor` representing an image.
rng: A per-example, per-step unique RNG seed.
mean_rgb: A tuple representing the mean of the total training images.
stddev_rgb: A tuple representing the standard deviation of the
total training images.
crop_size: Desired output size of the crop.
padding_size: An optional padding on each border of the image.
dtype: data type of the image.
Returns:
A preprocessed image `Tensor`.
"""
rng = tf.random.experimental.stateless_split(rng, 2)
crop_rng = rng[0, :]
flip_rng = rng[1, :]
image_shape = tf.shape(image)
image = tf.image.resize_with_crop_or_pad(image,
image_shape[0] + padding_size,
image_shape[1] + padding_size)
image = tf.image.stateless_random_crop(
image, (crop_size, crop_size, 3), seed=crop_rng)
image = tf.image.stateless_random_flip_left_right(image, seed=flip_rng)
image = normalize_image(image, mean_rgb, stddev_rgb, dtype=dtype)
return image
def preprocess_for_eval(image: spec.Tensor,
mean_rgb: Tuple[float, float, float],
stddev_rgb: Tuple[float, float, float],
dtype: tf.DType = tf.float32) -> spec.Tensor:
"""Preprocesses the given image for evaluation.
Args:
image: `Tensor` representing an image.
mean_rgb: A tuple representing the mean of the total training images.
stddev_rgb: A tuple representing the standard deviation
of the total training images.
dtype: data type of the image.
Returns:
A preprocessed image `Tensor`.
"""
image = normalize_image(image, mean_rgb, stddev_rgb, dtype=dtype)
return image
def normalize_image(image: spec.Tensor,
mean_rgb: Tuple[float, float, float],
stddev_rgb: Tuple[float, float, float],
dtype=tf.float32) -> spec.Tensor:
image = tf.image.convert_image_dtype(image, dtype)
image -= tf.constant(mean_rgb, shape=[1, 1, 3], dtype=image.dtype)
image /= tf.constant(stddev_rgb, shape=[1, 1, 3], dtype=image.dtype)
return image
def create_split(
split: str,
dataset_builder: tfds.core.dataset_builder.DatasetBuilder,
rng: spec.RandomState,
global_batch_size: int,
train: bool,
mean_rgb: Tuple[float, float, float],
stddev_rgb: Tuple[float, float, float],
cache: bool = False,
repeat_final_dataset: bool = False,
crop_size: int = 32,
padding_size: int = 4,
) -> Iterator[Dict[str, spec.Tensor]]:
"""Creates a split from the CIFAR-10 dataset using TensorFlow Datasets."""
shuffle_rng, preprocess_rng = jax.random.split(rng, 2)
def preprocess_example(example_index, example):
dtype = tf.float32
if train:
per_step_preprocess_rng = tf.random.experimental.stateless_fold_in(
tf.cast(preprocess_rng, tf.int64), example_index)
image = preprocess_for_train(example['image'],
per_step_preprocess_rng,
mean_rgb,
stddev_rgb,
crop_size,
padding_size,
dtype)
else:
image = preprocess_for_eval(example['image'], mean_rgb, stddev_rgb, dtype)
return {'inputs': image, 'targets': example['label']}
ds = dataset_builder.as_dataset(split=split)
options = tf.data.Options()
options.threading.private_threadpool_size = 48
ds = ds.with_options(options)
if cache:
ds = ds.cache()
if train or split == 'eval_train':
ds = ds.repeat()
ds = ds.shuffle(16 * global_batch_size, seed=shuffle_rng[0])
# We call ds.enumerate() to get a globally unique per-example, per-step
# index that we can fold into the RNG seed.
ds = ds.enumerate()
ds = ds.map(
preprocess_example, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = ds.batch(global_batch_size, drop_remainder=train)
if repeat_final_dataset:
ds = ds.repeat()
ds = ds.prefetch(10)
return ds
def create_input_iter(
split: str,
dataset_builder: tfds.core.dataset_builder.DatasetBuilder,
rng: spec.RandomState,
global_batch_size: int,
mean_rgb: Tuple[float, float, float],
stddev_rgb: Tuple[float, float, float],
crop_size: int,
padding_size: int,
train: bool,
cache: bool,
repeat_final_dataset: bool) -> Iterator[Dict[str, spec.Tensor]]:
ds = create_split(
split,
dataset_builder,
rng,
global_batch_size,
train=train,
mean_rgb=mean_rgb,
stddev_rgb=stddev_rgb,
cache=cache,
repeat_final_dataset=repeat_final_dataset,
crop_size=crop_size,
padding_size=padding_size)
it = map(
functools.partial(
shard_and_maybe_pad_np, global_batch_size=global_batch_size),
ds)
it = jax_utils.prefetch_to_device(it, 2)
return it
|
"""Jax implementation of ResNet V1 for CIFAR.
Adapted from Flax example:
https://github.com/google/flax/blob/main/examples/imagenet/models.py.
"""
import functools
from typing import Any, Callable, Tuple
from flax import linen as nn
import jax.numpy as jnp
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax.models import \
ResNetBlock
ModuleDef = nn.Module
class ResNet(nn.Module):
stage_sizes: Tuple[int]
block_cls: ModuleDef
num_classes: int = 10
num_filters: int = 64
dtype: Any = jnp.float32
act: Callable = nn.relu
@nn.compact
def __call__(self,
x: spec.Tensor,
update_batch_norm: bool = True) -> spec.Tensor:
conv = functools.partial(nn.Conv, use_bias=False, dtype=self.dtype)
norm = functools.partial(
nn.BatchNorm,
use_running_average=not update_batch_norm,
momentum=0.9,
epsilon=1e-5,
dtype=self.dtype)
x = conv(
self.num_filters, (3, 3), (1, 1),
padding=[(1, 1), (1, 1)],
name='Conv_init')(
x)
x = norm(name='BatchNorm_init')(x)
x = nn.relu(x)
for i, block_size in enumerate(self.stage_sizes):
for j in range(block_size):
strides = (2, 2) if i > 0 and j == 0 else (1, 1)
x = self.block_cls(
self.num_filters * 2**i,
strides=strides,
conv=conv,
norm=norm,
act=self.act)(
x)
x = nn.avg_pool(x, (4, 4), strides=(4, 4))
x = jnp.mean(x, axis=(1, 2))
x = nn.Dense(
self.num_classes,
kernel_init=nn.initializers.normal(),
dtype=self.dtype)(
x)
return x
ResNet18 = functools.partial(
ResNet, stage_sizes=(2, 2, 2, 2), block_cls=ResNetBlock)
|
"""CIFAR workload implemented in Jax."""
import functools
from typing import Any, Dict, Iterator, Optional, Tuple
from flax import jax_utils
from flax import linen as nn
import jax
from jax import lax
import jax.numpy as jnp
import optax
import tensorflow_datasets as tfds
from algorithmic_efficiency import param_utils
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.cifar.cifar_jax import models
from algorithmic_efficiency.workloads.cifar.cifar_jax.input_pipeline import \
create_input_iter
from algorithmic_efficiency.workloads.cifar.workload import BaseCifarWorkload
class CifarWorkload(BaseCifarWorkload):
def _build_cifar_dataset(
self,
data_rng: spec.RandomState,
split: str,
data_dir: str,
batch_size: int,
cache: Optional[bool] = None,
repeat_final_dataset: Optional[bool] = None
) -> Iterator[Dict[str, spec.Tensor]]:
ds_builder = tfds.builder('cifar10:3.0.2', data_dir=data_dir)
train = split == 'train'
assert self.num_train_examples + self.num_validation_examples == 50000
if split in ['train', 'eval_train']:
split = f'train[:{self.num_train_examples}]'
elif split == 'validation':
split = f'train[{self.num_train_examples}:]'
ds = create_input_iter(
split,
ds_builder,
data_rng,
batch_size,
self.train_mean,
self.train_stddev,
self.crop_size,
self.padding_size,
train=train,
cache=not train if cache is None else cache,
repeat_final_dataset=repeat_final_dataset)
return ds
def _build_input_queue(
self,
data_rng: spec.RandomState,
split: str,
data_dir: str,
global_batch_size: int,
cache: Optional[bool] = None,
repeat_final_dataset: Optional[bool] = None,
num_batches: Optional[int] = None) -> Iterator[Dict[str, spec.Tensor]]:
del num_batches
return self._build_cifar_dataset(data_rng,
split,
data_dir,
global_batch_size,
cache,
repeat_final_dataset)
def sync_batch_stats(
self, model_state: spec.ModelAuxiliaryState) -> spec.ModelAuxiliaryState:
"""Sync the batch statistics across replicas."""
# An axis_name is passed to pmap which can then be used by pmean.
# In this case each device has its own version of the batch statistics
# and we average them.
avg_fn = jax.pmap(lambda x: lax.pmean(x, 'x'), 'x')
new_model_state = model_state.copy(
{'batch_stats': avg_fn(model_state['batch_stats'])})
return new_model_state
def init_model_fn(
self,
rng: spec.RandomState,
dropout_rate: Optional[float] = None,
aux_dropout_rate: Optional[float] = None) -> spec.ModelInitState:
"""Dropout is unused."""
del dropout_rate
del aux_dropout_rate
model_cls = getattr(models, 'ResNet18')
model = model_cls(num_classes=self._num_classes, dtype=jnp.float32)
self._model = model
input_shape = (1, 32, 32, 3)
variables = jax.jit(model.init)({'params': rng},
jnp.ones(input_shape, model.dtype))
model_state, params = variables.pop('params')
self._param_shapes = param_utils.jax_param_shapes(params)
self._param_types = param_utils.jax_param_types(self._param_shapes)
model_state = jax_utils.replicate(model_state)
params = jax_utils.replicate(params)
return params, model_state
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
return param_key == 'Dense_0'
def model_fn(
self,
params: spec.ParameterContainer,
augmented_and_preprocessed_input_batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
mode: spec.ForwardPassMode,
rng: spec.RandomState,
update_batch_norm: bool) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]:
del mode
del rng
variables = {'params': params, **model_state}
if update_batch_norm:
logits, new_model_state = self._model.apply(
variables,
augmented_and_preprocessed_input_batch['inputs'],
update_batch_norm=update_batch_norm,
mutable=['batch_stats'])
return logits, new_model_state
else:
logits = self._model.apply(
variables,
augmented_and_preprocessed_input_batch['inputs'],
update_batch_norm=update_batch_norm,
mutable=False)
return logits, model_state
# Does NOT apply regularization, which is left to the submitter to do in
# `update_params`.
def loss_fn(
self,
label_batch: spec.Tensor, # Dense or one-hot labels.
logits_batch: spec.Tensor,
mask_batch: Optional[spec.Tensor] = None,
label_smoothing: float = 0.0) -> Dict[str, spec.Tensor]: # differentiable
"""Evaluate the (masked) loss function at (label_batch, logits_batch).
Return {'summed': scalar summed loss, 'n_valid_examples': scalar number of
valid examples in batch, 'per_example': 1-d array of per-example losses}
(not synced across devices).
"""
one_hot_targets = jax.nn.one_hot(label_batch, self._num_classes)
smoothed_targets = optax.smooth_labels(one_hot_targets, label_smoothing)
per_example_losses = -jnp.sum(
smoothed_targets * nn.log_softmax(logits_batch), axis=-1)
# `mask_batch` is assumed to be shape [batch].
if mask_batch is not None:
per_example_losses *= mask_batch
n_valid_examples = mask_batch.sum()
else:
n_valid_examples = len(per_example_losses)
summed_loss = per_example_losses.sum()
return {
'summed': summed_loss,
'n_valid_examples': n_valid_examples,
'per_example': per_example_losses,
}
def _compute_metrics(self,
logits: spec.Tensor,
labels: spec.Tensor,
weights: spec.Tensor) -> Dict[str, spec.Tensor]:
summed_loss = self.loss_fn(labels, logits, weights)['summed']
# Number of correct predictions.
accuracy = jnp.sum((jnp.argmax(logits, -1) == labels) * weights)
metrics = {
'loss': summed_loss,
'accuracy': accuracy,
}
metrics = lax.psum(metrics, axis_name='batch')
return metrics
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, 0, 0, 0, None),
static_broadcasted_argnums=(0,))
def _eval_model(
self,
params: spec.ParameterContainer,
batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState) -> Dict[spec.Tensor, spec.ModelAuxiliaryState]:
"""Return the mean accuracy and loss as a dict."""
logits, _ = self.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.EVAL,
rng,
update_batch_norm=False)
weights = batch.get('weights')
if weights is None:
weights = jnp.ones(len(logits))
return self._compute_metrics(logits, batch['targets'], weights)
def _normalize_eval_metrics(
self, num_examples: int, total_metrics: Dict[str,
Any]) -> Dict[str, float]:
"""Normalize eval metrics."""
return jax.tree_map(lambda x: float(x[0] / num_examples), total_metrics)
|
"""PyTorch implementation of ResNet for CIFAR.
Adapted from torchvision:
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py.
"""
import collections
from typing import Any, Callable, List, Optional, Type, Union
import torch
from torch import nn
from algorithmic_efficiency import spec
from algorithmic_efficiency.init_utils import pytorch_default_init
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.models import \
BasicBlock
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.models import \
Bottleneck
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.models import \
conv1x1
class ResNet(nn.Module):
def __init__(self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 10,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# Each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead.
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
'replace_stride_with_dilation should be None '
f'or a 3-element tuple, got {replace_stride_with_dilation}')
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(
block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(
block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.reset_parameters()
def reset_parameters(self) -> None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
pytorch_default_init(m)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
nn.init.normal_(self.fc.weight, std=1e-2)
nn.init.constant_(self.fc.bias, 0.)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros,
# and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to
# https://arxiv.org/abs/1706.02677.
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self,
block: Type[Union[BasicBlock, Bottleneck]],
planes: int,
blocks: int,
stride: int = 1,
dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = torch.nn.Sequential(
collections.OrderedDict([
("conv", conv1x1(self.inplanes, planes * block.expansion,
stride)),
("bn", norm_layer(planes * block.expansion)),
]))
layers = []
layers.append(
block(self.inplanes,
planes,
stride,
downsample,
self.groups,
self.base_width,
previous_dilation,
norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x: spec.Tensor) -> spec.Tensor:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = torch.nn.functional.avg_pool2d(x, 4)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def resnet18(**kwargs: Any) -> ResNet:
return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
|
"""CIFAR10 workload implemented in PyTorch."""
import contextlib
import functools
import random
from typing import Any, Dict, Optional, Tuple
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
from torchvision import transforms
from torchvision.datasets import CIFAR10
from algorithmic_efficiency import data_utils
from algorithmic_efficiency import param_utils
from algorithmic_efficiency import pytorch_utils
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.cifar.cifar_pytorch.models import \
resnet18
from algorithmic_efficiency.workloads.cifar.workload import BaseCifarWorkload
USE_PYTORCH_DDP, RANK, DEVICE, N_GPUS = pytorch_utils.pytorch_setup()
class CifarWorkload(BaseCifarWorkload):
def _build_dataset(
self,
data_rng: spec.RandomState,
split: str,
data_dir: str,
global_batch_size: int,
cache: Optional[bool] = None,
repeat_final_dataset: Optional[bool] = None
) -> torch.utils.data.DataLoader:
del cache
del repeat_final_dataset
is_train = split == 'train'
normalize = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=self.train_mean, std=self.train_stddev),
])
eval_transform_config = normalize
train_transform_config = transforms.Compose([
transforms.RandomCrop(
size=self.crop_size,
padding=self.padding_size,
),
transforms.RandomHorizontalFlip(),
normalize,
])
transform = train_transform_config if is_train else eval_transform_config
dataset = CIFAR10(
root=data_dir,
train=split in ['train', 'eval_train', 'validation'],
download=False,
transform=transform)
assert self.num_train_examples + self.num_validation_examples == 50000
indices = list(range(50000))
indices_split = {
'train': indices[:self.num_train_examples],
'validation': indices[self.num_train_examples:],
}
if split == 'eval_train':
train_indices = indices_split['train']
random.Random(data_rng[0]).shuffle(train_indices)
indices_split['eval_train'] = train_indices[:self.num_eval_train_examples]
if split in indices_split:
dataset = torch.utils.data.Subset(dataset, indices_split[split])
sampler = None
if USE_PYTORCH_DDP:
per_device_batch_size = global_batch_size // N_GPUS
ds_iter_batch_size = per_device_batch_size
if is_train:
sampler = torch.utils.data.distributed.DistributedSampler(
dataset, num_replicas=N_GPUS, rank=RANK, shuffle=True)
else:
sampler = data_utils.DistributedEvalSampler(
dataset, num_replicas=N_GPUS, rank=RANK, shuffle=False)
else:
ds_iter_batch_size = global_batch_size
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=ds_iter_batch_size,
shuffle=not USE_PYTORCH_DDP and is_train,
sampler=sampler,
num_workers=4,
pin_memory=True,
drop_last=is_train)
dataloader = data_utils.PrefetchedWrapper(dataloader, DEVICE)
dataloader = data_utils.cycle(dataloader, custom_sampler=USE_PYTORCH_DDP)
return dataloader
def init_model_fn(
self,
rng: spec.RandomState,
dropout_rate: Optional[float] = None,
aux_dropout_rate: Optional[float] = None) -> spec.ModelInitState:
"""Dropout is unused."""
del dropout_rate
del aux_dropout_rate
if hasattr(self, '_model'):
if isinstance(self._model, (DDP, torch.nn.DataParallel)):
self._model.module.reset_parameters()
else:
self._model.reset_parameters()
return self._model, None
torch.random.manual_seed(rng[0])
self._model = resnet18(num_classes=self._num_classes)
self._param_shapes = param_utils.pytorch_param_shapes(self._model)
self._param_types = param_utils.pytorch_param_types(self._param_shapes)
self._model.to(DEVICE)
if N_GPUS > 1:
if USE_PYTORCH_DDP:
self._model = DDP(self._model, device_ids=[RANK], output_device=RANK)
else:
self._model = torch.nn.DataParallel(self._model)
return self._model, None
def is_output_params(self, param_key: spec.ParameterKey) -> bool:
return param_key in ['fc.weight', 'fc.bias']
def model_fn(
self,
params: spec.ParameterContainer,
augmented_and_preprocessed_input_batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
mode: spec.ForwardPassMode,
rng: spec.RandomState,
update_batch_norm: bool) -> Tuple[spec.Tensor, spec.ModelAuxiliaryState]:
del model_state
del rng
model = params
if mode == spec.ForwardPassMode.EVAL:
if update_batch_norm:
raise ValueError(
'Batch norm statistics cannot be updated during evaluation.')
model.eval()
if mode == spec.ForwardPassMode.TRAIN:
model.train()
model.apply(
functools.partial(
pytorch_utils.update_batch_norm_fn,
update_batch_norm=update_batch_norm))
contexts = {
spec.ForwardPassMode.EVAL: torch.no_grad,
spec.ForwardPassMode.TRAIN: contextlib.nullcontext,
}
with contexts[mode]():
logits_batch = model(augmented_and_preprocessed_input_batch['inputs'])
return logits_batch, None
# Does NOT apply regularization, which is left to the submitter to do in
# `update_params`.
def loss_fn(
self,
label_batch: spec.Tensor, # Dense or one-hot labels.
logits_batch: spec.Tensor,
mask_batch: Optional[spec.Tensor] = None,
label_smoothing: float = 0.0) -> Dict[str, spec.Tensor]: # differentiable
"""Evaluate the (masked) loss function at (label_batch, logits_batch).
Return {'summed': scalar summed loss, 'n_valid_examples': scalar number of
valid examples in batch, 'per_example': 1-d array of per-example losses}
(not synced across devices).
"""
per_example_losses = F.cross_entropy(
logits_batch,
label_batch,
reduction='none',
label_smoothing=label_smoothing)
# `mask_batch` is assumed to be shape [batch].
if mask_batch is not None:
per_example_losses *= mask_batch
n_valid_examples = mask_batch.sum()
else:
n_valid_examples = len(per_example_losses)
summed_loss = per_example_losses.sum()
return {
'summed': summed_loss,
'n_valid_examples': torch.as_tensor(n_valid_examples, device=DEVICE),
'per_example': per_example_losses,
}
def _eval_model(
self,
params: spec.ParameterContainer,
batch: Dict[str, spec.Tensor],
model_state: spec.ModelAuxiliaryState,
rng: spec.RandomState) -> Dict[spec.Tensor, spec.ModelAuxiliaryState]:
"""Return the mean accuracy and loss as a dict."""
logits, _ = self.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.EVAL,
rng,
update_batch_norm=False)
targets = batch['targets']
weights = batch.get('weights')
if weights is None:
weights = torch.ones(len(logits), device=DEVICE)
_, predicted = torch.max(logits.data, 1)
# Number of correct predictions.
accuracy = ((predicted == targets) * weights).sum()
summed_loss = self.loss_fn(targets, logits, weights)['summed']
return {'accuracy': accuracy, 'loss': summed_loss}
def _normalize_eval_metrics(
self, num_examples: int, total_metrics: Dict[str,
Any]) -> Dict[str, float]:
if USE_PYTORCH_DDP:
for metric in total_metrics.values():
dist.all_reduce(metric)
return {k: float(v.item() / num_examples) for k, v in total_metrics.items()}
|
"""Data preprocessing for LibriSpeech.
Modified from https://github.com/lsari/librispeech_100.
"""
import multiprocessing.dummy
import os
from os.path import exists
import sys
import threading
import time
from absl import flags
from absl import logging
import numpy as np
import pandas as pd
from pydub import AudioSegment
import tensorflow as tf
from datasets import librispeech_tokenizer
gfile = tf.io.gfile
copy = tf.io.gfile.copy
exists = tf.io.gfile.exists
rename = tf.io.gfile.rename
flags.DEFINE_string('raw_input_dir',
'',
'Path to the raw training data directory.')
flags.DEFINE_string('output_dir', '', 'Dir to write the processed data to.')
flags.DEFINE_string('tokenizer_vocab_path',
'',
'Path to sentence piece tokenizer vocab file.')
FLAGS = flags.FLAGS
TRANSCRIPTION_MAX_LENGTH = 256
AUDIO_MAX_LENGTH = 320000
# taken from TFDS page for librispeech dataset :
# https://www.tensorflow.org/datasets/catalog/librispeech
librispeech_example_counts = {
'train-clean-100': 28539,
'train-clean-360': 104014,
'train-other-500': 148688,
'test-clean': 2620,
'dev-clean': 2703,
'dev-other': 2864,
}
class Counter:
"""A threadsafe counter."""
lock = threading.Lock()
value = 0
def inc(self):
with self.lock:
self.value += 1
def val(self):
with self.lock:
return self.value
def report_progress(count, total, start_time):
"""Print a progress bar to stdout."""
now = time.time()
size = 50
filled = int(round(size * count / float(total)))
percent = round(100. * count / float(total), 1)
bar = "-" * filled + "." * (size - filled)
sys.stdout.write("[%s] %d%% (%d of %d) %.2f sample/sec\r" %
(bar, percent, count, total, count / (now - start_time)))
sys.stdout.flush()
def preprocess_data(in_folder, out_folder, tokenizer, split):
finished = Counter()
skipped = Counter()
start_time = time.time()
def process(index):
data_folder, speaker_folder, chapter_folder = index
utterance_ids = []
trans_file = (f'{data_folder}/{speaker_folder}/{chapter_folder}/'
f'{speaker_folder}-{chapter_folder}.trans.txt')
if not exists(trans_file):
skipped.inc()
return utterance_ids
with open(trans_file, 'r', encoding='UTF-8') as f:
for l in f:
utt, trans = l.strip().split(' ', maxsplit=1)
audio_path = (
f'{data_folder}/{speaker_folder}/{chapter_folder}/{utt}.flac')
if not os.path.isfile(audio_path):
skipped.inc()
continue
if len(trans) > TRANSCRIPTION_MAX_LENGTH:
skipped.inc()
continue
sound = load_audio(audio_path)
sound = np.array(sound, dtype=np.int64)
if sound.shape[0] > AUDIO_MAX_LENGTH:
skipped.inc()
continue
targets = tokenizer.tokenize(trans).numpy().astype(np.int32)
np.save('{}/{}/{}_audio.npy'.format(out_folder, split, utt), sound)
np.save('{}/{}/{}_targets.npy'.format(out_folder, split, utt), targets)
finished.inc()
report_progress(finished.val() + skipped.val(),
librispeech_example_counts[split],
start_time)
utterance_ids.append(utt)
return utterance_ids
paths = []
for _, speaker_folder in enumerate(os.listdir(in_folder)):
for chapter_folder in os.listdir(f'{in_folder}/{speaker_folder}'):
paths.append((in_folder, speaker_folder, chapter_folder))
sys.stdout.write('\r')
pool = multiprocessing.dummy.Pool(32)
file_trans = pool.map(process, paths)
file_trans = list(np.concatenate(file_trans).flat)
end_time = time.time()
elapsed_time = end_time - start_time
print(' \n time taken to preprocess split : ',
split,
' = ',
time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))
final_count = finished.val() + skipped.val()
return pd.DataFrame(file_trans, columns=['id']), final_count
def load_audio(audio_path):
audio_segment = AudioSegment.from_file(audio_path, 'flac')
audio = np.array(audio_segment.get_array_of_samples(), dtype=np.int64)
return audio
def run(input_dir, output_dir, tokenizer_vocab_path):
tokenizer = librispeech_tokenizer.load_tokenizer(tokenizer_vocab_path)
os.makedirs(output_dir, exist_ok=True)
subset_list = [
'train-clean-100',
'train-clean-360',
'train-other-500',
'dev-clean',
'dev-other',
'test-clean',
'test-other',
]
for subset in subset_list:
logging.info('Processing split = %s...', subset)
in_dir = os.path.join(input_dir, subset)
out_dir = os.path.join(output_dir, subset)
os.makedirs(out_dir, exist_ok=True)
example_ids, num_entries = preprocess_data(
in_dir, output_dir, tokenizer, subset)
if num_entries != librispeech_example_counts[subset]:
raise ValueError('Preprocessed dataframe final count not equal to '
'expected count: {} vs expected {}'.format(
num_entries, librispeech_example_counts[subset]))
example_ids.to_csv(os.path.join(output_dir, f'{subset}.csv'))
def main():
run(FLAGS.input_dir, FLAGS.output_dir, FLAGS.tokenizer_vocab_path)
if __name__ == '__main__':
main()
|
r"""MLCommons dataset setup script.
If you already have a copy of a dataset(s), you can skip download it and provide
the path when running your algorithm with submission_runner.py via --data_dir.
Note that in order to avoid potential accidental deletion, this script does NOT
delete any intermediate temporary files (such as zip archives) without a user
confirmation. Deleting temp files is particularly important for Criteo 1TB, as
there can be multiple copies of the dataset on disk during preprocessing if
files are not cleaned up. If you do not want any temp files to be deleted, you
can pass --interactive_deletion=false and then all files will be downloaded to
the provided --temp_dir, and the user can manually delete these after
downloading has finished.
Note that some functions use subprocess.Popen(..., shell=True), which can be
dangerous if the user injects code into the --data_dir or --temp_dir flags. We
do some basic sanitization in main(), but submitters should not let untrusted
users run this script on their systems.
If mounting a GCS bucket with gcsfuse, --temp_dir should NOT be a path to the
GCS bucket, as this can result in *orders of magnitude* slower download speeds
due to write speed issues (--data_dir can include the GCS bucket though).
Note that some of the disk usage number below may be underestimates if the temp
and final data dir locations are on the same drive.
Criteo download size: ~350GB
Criteo final disk size: ~1TB
FastMRI download size:
FastMRI final disk size:
LibriSpeech download size:
LibriSpeech final disk size:
OGBG download size:
OGBG final disk size:
WMT download size: (1.58 GiB + ) =
WMT final disk size:
_______________________
Total download size:
Total disk size:
Some datasets require signing a form before downloading:
FastMRI:
Fill out form on https://fastmri.med.nyu.edu/ and run this script with the
links that are emailed to you for "knee_singlecoil_train" and
"knee_singlecoil_val".
ImageNet:
Register on https://image-net.org/ and run this script with the links to the
ILSVRC2012 train and validation images.
Note for tfds ImageNet, you may have to increase the max number of files allowed
open at once using `ulimit -n 8192`.
Example command:
python3 datasets/dataset_setup.py \
--data_dir=~/data \
--temp_dir=/tmp/mlcommons_data
--imagenet \
--imagenet_train_url=<train_url> \
--imagenet_val_url=<val_url>\
--framework=jax
"""
# pylint: disable=logging-format-interpolation
# pylint: disable=consider-using-with
import functools
import os
import resource
import shutil
import subprocess
import tarfile
from absl import app
from absl import flags
from absl import logging
import requests
import tensorflow as tf
import tensorflow_datasets as tfds
from torchvision.datasets import CIFAR10
import tqdm
IMAGENET_TRAIN_TAR_FILENAME = 'ILSVRC2012_img_train.tar'
IMAGENET_VAL_TAR_FILENAME = 'ILSVRC2012_img_val.tar'
FASTMRI_TRAIN_TAR_FILENAME = 'knee_singlecoil_train.tar'
FASTMRI_VAL_TAR_FILENAME = 'knee_singlecoil_val.tar'
FASTMRI_TEST_TAR_FILENAME = 'knee_singlecoil_test.tar'
from algorithmic_efficiency.workloads.wmt import tokenizer
from algorithmic_efficiency.workloads.wmt.input_pipeline import \
normalize_feature_names
from datasets import librispeech_preprocess
from datasets import librispeech_tokenizer
flags.DEFINE_boolean(
'interactive_deletion',
True,
'If true, user will be prompted before any files are deleted. If false, no '
'files will be deleted.')
flags.DEFINE_boolean(
'all',
False,
'Whether or not to download all datasets. If false, can download some '
'combination of datasets by setting the individual dataset flags below.')
flags.DEFINE_boolean('criteo',
False,
'If --all=false, whether or not to download Criteo.')
flags.DEFINE_boolean('cifar',
False,
'If --all=false, whether or not to download CIFAR-10.')
flags.DEFINE_boolean('fastmri',
False,
'If --all=false, whether or not to download FastMRI.')
flags.DEFINE_boolean('imagenet',
False,
'If --all=false, whether or not to download Imagenet.')
flags.DEFINE_boolean('librispeech',
False,
'If --all=false, whether or not to download LibriSpeech.')
flags.DEFINE_boolean('mnist',
False,
'If --all=false, whether or not to download MNIST.')
flags.DEFINE_boolean('ogbg',
False,
'If --all=false, whether or not to download OGBG.')
flags.DEFINE_boolean('wmt',
False,
'If --all=false, whether or not to download WMT.')
flags.DEFINE_string(
'data_dir',
None,
'The path to the folder where datasets should be downloaded.')
flags.DEFINE_string(
'temp_dir',
'/tmp',
'A local path to a folder where temp files can be downloaded.')
flags.DEFINE_string(
'imagenet_train_url',
None,
'Only necessary if you want this script to `wget` the ImageNet train '
'split. If not, you can supply the path to --data_dir in '
'submission_runner.py.')
flags.DEFINE_string(
'imagenet_val_url',
None,
'Only necessary if you want this script to `wget` the ImageNet validation '
'split. If not, you can supply the path to --data_dir in '
'submission_runner.py.')
flags.DEFINE_string(
'fastmri_knee_singlecoil_train_url',
None,
'Only necessary if you want this script to `wget` the FastMRI train '
'split. If not, you can supply the path to --data_dir in '
'submission_runner.py.')
flags.DEFINE_string(
'fastmri_knee_singlecoil_val_url',
None,
'Only necessary if you want this script to `wget` the FastMRI validation '
'split. If not, you can supply the path to --data_dir in '
'submission_runner.py.')
flags.DEFINE_integer(
'num_decompression_threads',
8,
'The number of threads to use in parallel when decompressing.')
flags.DEFINE_string('framework', None, 'Can be either jax or pytorch.')
flags.DEFINE_boolean('train_tokenizer', True, 'Train Librispeech tokenizer.')
FLAGS = flags.FLAGS
def _maybe_mkdir(d):
if not os.path.exists(d):
os.makedirs(d)
def _maybe_prompt_for_deletion(paths, interactive_deletion):
if not interactive_deletion:
return
files_for_deletion = '\n'.join(paths)
logging.info('\n\n\nWARNING: the following temp files will be DELETED:'
f'\n{files_for_deletion}')
delete_str = input('Confirm deletion? [y/N]: ')
if delete_str.lower() == 'y':
del_cmd = 'rm ' + ' '.join(f'"{s}"' for s in paths)
logging.info(f'Running deletion command:\n{del_cmd}')
subprocess.Popen(del_cmd, shell=True).communicate()
else:
logging.info('Skipping deletion.')
def _download_url(url, data_dir):
data_dir = os.path.expanduser(data_dir)
file_path = os.path.join(data_dir, url.split('/')[-1])
response = requests.get(url, stream=True, timeout=600)
total_size_in_bytes = int(response.headers.get('Content-length', 0))
total_size_in_mib = total_size_in_bytes / (2**20)
progress_bar = tqdm.tqdm(total=total_size_in_mib, unit='MiB', unit_scale=True)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if os.path.exists(file_path):
while True:
overwrite = input('File already exists {}.\n Overwrite? (Y/n)'.format(
file_path)).lower()
if overwrite in ['y', 'n']:
break
logging.info('Invalid response. Try again.')
if overwrite == 'n':
logging.info('Skipping download to {}'.format(file_path))
return
with open(file_path, 'wb') as f:
for chunk in response.iter_content(chunk_size=2**10):
chunk_size_in_mib = len(chunk) / (2**20)
progress_bar.update(chunk_size_in_mib)
f.write(chunk)
progress_bar.close()
if (progress_bar.total != 0 and progress_bar.n != progress_bar.total):
raise RuntimeError(
('Download corrupted, size {n} MiB from {url} does not match '
'expected size {size} MiB').format(
url=url, n=progress_bar.n, size=progress_bar.total))
def download_criteo(data_dir,
tmp_dir,
num_decompression_threads,
interactive_deletion):
criteo_dir = os.path.join(data_dir, 'criteo')
tmp_criteo_dir = os.path.join(tmp_dir, 'criteo')
_maybe_mkdir(criteo_dir)
_maybe_mkdir(tmp_criteo_dir)
processes = []
gz_paths = []
# Download and unzip.
for day in range(24):
logging.info(f'Downloading Criteo day {day}...')
wget_cmd = (
f'wget --no-clobber --directory-prefix="{tmp_criteo_dir}" '
f'https://sacriteopcail01.z16.web.core.windows.net/day_{day}.gz')
input_path = os.path.join(tmp_criteo_dir, f'day_{day}.gz')
gz_paths.append(input_path)
unzipped_path = os.path.join(criteo_dir, f'day_{day}.csv')
unzip_cmd = (f'pigz -d -c -p{num_decompression_threads} "{input_path}" > '
f'"{unzipped_path}"')
command_str = f'{wget_cmd} && {unzip_cmd}'
logging.info(f'Running Criteo download command:\n{command_str}')
processes.append(subprocess.Popen(command_str, shell=True))
for p in processes:
p.communicate()
_maybe_prompt_for_deletion(gz_paths, interactive_deletion)
# Split into files with 1M lines each: day_1.csv -> day_1_[0-40].csv.
for batch in range(6):
batch_processes = []
unzipped_paths = []
for day_offset in range(4):
day = batch * 4 + day_offset
unzipped_path = os.path.join(criteo_dir, f'day_{day}.csv')
unzipped_paths.append(unzipped_path)
split_path = os.path.join(criteo_dir, f'day_{day}_')
split_cmd = ('split -a 3 -d -l 1000000 --additional-suffix=.csv '
f'"{unzipped_path}" "{split_path}"')
logging.info(f'Running Criteo split command:\n{split_cmd}')
batch_processes.append(subprocess.Popen(split_cmd, shell=True))
for p in batch_processes:
p.communicate()
_maybe_prompt_for_deletion(unzipped_paths, interactive_deletion)
def download_cifar(data_dir, framework):
if framework == 'jax':
tfds.builder('cifar10:3.0.2', data_dir=data_dir).download_and_prepare()
elif framework == 'pytorch':
CIFAR10(root=data_dir, train=True, download=True)
CIFAR10(root=data_dir, train=False, download=True)
else:
raise ValueError('Invalid value for framework: {}'.format(framework))
def download_fastmri(data_dir,
fastmri_train_url,
fastmri_val_url,
fastmri_test_url):
data_dir = os.path.join(data_dir, 'fastmri')
# Download fastmri train dataset
logging.info(
'Downloading fastmri train dataset from {}'.format(fastmri_train_url))
_download_url(url=fastmri_train_url, data_dir=data_dir).download()
# Download fastmri val dataset
logging.info(
'Downloading fastmri val dataset from {}'.format(fastmri_val_url))
_download_url(url=fastmri_val_url, data_dir=data_dir).download()
# Download fastmri test dataset
logging.info(
'Downloading fastmri test dataset from {}'.format(fastmri_test_url))
_download_url(url=fastmri_test_url, data_dir=data_dir).download()
def extract(source, dest):
if not os.path.exists(dest):
os.path.makedirs(dest)
tar = tarfile.open(source)
tar.extractall(dest)
tar.close()
def setup_fastmri(data_dir):
train_tar_file_path = os.path.join(data_dir, FASTMRI_TRAIN_TAR_FILENAME)
val_tar_file_path = os.path.join(data_dir, FASTMRI_VAL_TAR_FILENAME)
test_tar_file_path = os.path.join(data_dir, FASTMRI_TEST_TAR_FILENAME)
# Make train, val and test subdirectories
fastmri_data_dir = os.path.join(data_dir, 'fastmri')
train_data_dir = os.path.join(fastmri_data_dir, 'train')
os.makedirs(train_data_dir)
val_data_dir = os.path.join(fastmri_data_dir, 'val')
os.makedirsval_data_dir()
test_data_dir = os.path.join(fastmri_data_dir, 'test')
os.makedirs(test_data_dir)
# Unzip tar file into subdirectories
logging.info('Unzipping {} to {}'.format(train_tar_file_path,
fastmri_data_dir))
extract(train_tar_file_path, train_data_dir)
logging.info('Unzipping {} to {}'.format(val_tar_file_path, fastmri_data_dir))
extract(val_tar_file_path, val_data_dir)
logging.info('Unzipping {} to {}'.format(val_tar_file_path, fastmri_data_dir))
extract(test_tar_file_path, test_data_dir)
logging.info('Set up imagenet dataset for jax framework complete')
def download_imagenet(data_dir, imagenet_train_url, imagenet_val_url):
imagenet_train_filepath = os.path.join(data_dir, IMAGENET_TRAIN_TAR_FILENAME)
imagenet_val_filepath = os.path.join(data_dir, IMAGENET_VAL_TAR_FILENAME)
# Download imagnet train dataset
if not os.path.exists(imagenet_train_filepath):
logging.info(
'Downloading imagenet train dataset from {}'.format(imagenet_train_url))
_download_url(url=imagenet_train_url, data_dir=data_dir).download()
# Download imagenet val dataset
if not os.path.exists(imagenet_val_filepath):
logging.info('Downloading imagenet validation dataset from {}'.format(
imagenet_val_url))
_download_url(url=imagenet_val_url, data_dir=data_dir).download()
# Download imagenet test set
download_imagenet_v2(data_dir)
def setup_imagenet(data_dir, framework=None):
if framework == 'jax':
setup_imagenet_jax(data_dir)
elif framework == 'pytorch':
setup_imagenet_pytorch(data_dir)
else:
raise ValueError('Invalid value for framework: {}'.format(framework))
def setup_imagenet_jax(data_dir):
train_tar_file_path = os.path.join(data_dir, IMAGENET_TRAIN_TAR_FILENAME)
val_tar_file_path = os.path.join(data_dir, IMAGENET_VAL_TAR_FILENAME)
# Setup jax dataset dir
imagenet_jax_data_dir = os.path.join(data_dir, 'jax')
manual_download_dir = os.path.join(imagenet_jax_data_dir,
'downloads',
'manual')
os.makedirs(manual_download_dir, exist_ok=True)
# Copy tar file into jax/downloads/manual
logging.info('Checking if tar files already exists in jax/downloads/manual.')
if not os.path.exists(
os.path.join(manual_download_dir, IMAGENET_TRAIN_TAR_FILENAME)):
logging.info('Copying {} to {}'.format(train_tar_file_path,
manual_download_dir))
shutil.move(train_tar_file_path, manual_download_dir)
if not os.path.exists(
os.path.join(manual_download_dir, IMAGENET_VAL_TAR_FILENAME)):
logging.info('Copying {} to {}'.format(val_tar_file_path,
manual_download_dir))
shutil.move(val_tar_file_path, manual_download_dir)
logging.info('Preparing imagenet data.')
resource.setrlimit(resource.RLIMIT_NOFILE,
(resource.RLIM_INFINITY, resource.RLIM_INFINITY))
ds_builder = tfds.builder(
'imagenet2012:5.1.0', data_dir=os.path.join(imagenet_jax_data_dir))
ds_builder.download_and_prepare()
logging.info('Set up imagenet dataset for jax framework complete')
def setup_imagenet_pytorch(data_dir):
train_tar_file_path = os.path.join(data_dir, IMAGENET_TRAIN_TAR_FILENAME)
val_tar_file_path = os.path.join(data_dir, IMAGENET_VAL_TAR_FILENAME)
# Setup jax dataset dir
imagenet_pytorch_data_dir = os.path.join(data_dir, 'pytorch')
os.makedirs(imagenet_pytorch_data_dir)
os.makedirs(os.path.join(imagenet_pytorch_data_dir, 'train'))
os.makedirs(os.path.join(imagenet_pytorch_data_dir, 'val'))
# Copy tar file into pytorch directory
logging.info('Copying {} to {}'.format(train_tar_file_path,
imagenet_pytorch_data_dir))
shutil.move(train_tar_file_path, imagenet_pytorch_data_dir)
logging.info('Copying {} to {}'.format(val_tar_file_path,
imagenet_pytorch_data_dir))
shutil.move(val_tar_file_path, imagenet_pytorch_data_dir)
# Extract train data\
logging.info('Extracting imagenet train data')
extract(
os.path.join(imagenet_pytorch_data_dir, IMAGENET_TRAIN_TAR_FILENAME),
os.path.join(imagenet_pytorch_data_dir, 'train'))
train_tar_filenames = os.listdir(
os.path.join(imagenet_pytorch_data_dir, 'train'))
for tar_filename in train_tar_filenames:
if tar_filename.endswith('.tar'):
dir_name = tar_filename[:-4]
extract(
os.path.join(imagenet_pytorch_data_dir, IMAGENET_TRAIN_TAR_FILENAME),
os.path.join(imagenet_pytorch_data_dir, 'train', dir_name))
# Extract val data
logging.info('Extracting imagenet val data')
extract(
os.path.join(imagenet_pytorch_data_dir, IMAGENET_VAL_TAR_FILENAME),
os.path.join(imagenet_pytorch_data_dir, 'val'))
valprep_command = [
'wget',
'-qO-',
('https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/'
'valprep.sh'),
]
valprep_process = subprocess.Popen(valprep_command, shell=True)
valprep_process.communicate()
logging.info('Set up imagenet dataset for pytorch framework complete')
def download_imagenet_v2(data_dir):
tfds.builder(
'imagenet_v2/matched-frequency:3.0.0',
data_dir=data_dir).download_and_prepare()
def download_librispeech(dataset_dir, tmp_dir, train_tokenizer):
# After extraction the result is a folder named Librispeech containing audio
# files in .flac format along with transcripts containing name of audio file
# and corresponding transcription.
tmp_librispeech_dir = os.path.join(tmp_dir, 'LibriSpeech')
_maybe_mkdir(tmp_librispeech_dir)
for split in ['dev', 'test']:
for version in ['clean', 'other']:
wget_cmd = f'wget http://www.openslr.org/resources/12/{split}-{version}.tar.gz -O - | tar xz' # pylint: disable=line-too-long
subprocess.Popen(wget_cmd, shell=True, cwd=tmp_dir).communicate()
tars = [
'raw-metadata.tar.gz',
'train-clean-100.tar.gz',
'train-clean-360.tar.gz',
'train-other-500.tar.gz',
]
for tar_filename in tars:
wget_cmd = f'wget http://www.openslr.org/resources/12/{tar_filename} -O - | tar xz ' # pylint: disable=line-too-long
subprocess.Popen(wget_cmd, shell=True, cwd=tmp_dir).communicate()
if train_tokenizer:
tokenizer_vocab_path = librispeech_tokenizer.run(
train=True, data_dir=tmp_librispeech_dir)
# Preprocess data.
librispeech_dir = os.path.join(dataset_dir, 'librispeech')
librispeech_preprocess.run(
input_dir=tmp_librispeech_dir,
output_dir=librispeech_dir,
tokenizer_vocab_path=tokenizer_vocab_path)
def download_mnist(data_dir):
tfds.builder('mnist', data_dir=data_dir).download_and_prepare()
def download_ogbg(data_dir):
tfds.builder('ogbg_molpcba:0.1.3', data_dir=data_dir).download_and_prepare()
def download_wmt(data_dir):
"""WMT14 and WMT17 de-en."""
for ds_name in ['wmt14_translate/de-en:1.0.0', 'wmt17_translate/de-en:1.0.0']:
dataset_builder = tfds.builder(ds_name, data_dir=data_dir)
dataset_builder.download_and_prepare()
if ds_name == 'wmt17_translate/de-en:1.0.0':
ds = dataset_builder.as_dataset(split='train', shuffle_files=False)
ds = ds.map(
functools.partial(normalize_feature_names, dataset_builder.info),
num_parallel_calls=tf.data.AUTOTUNE)
# Tokenize data.
vocab_path = os.path.join(data_dir, 'wmt_sentencepiece_model')
tokenizer.train_tokenizer(
ds, vocab_path=vocab_path, vocab_size=32000, max_corpus_chars=10**7)
def main(_):
data_dir = FLAGS.data_dir
tmp_dir = FLAGS.temp_dir
num_decompression_threads = FLAGS.num_decompression_threads
bad_chars = [';', ' ', '&', '"']
if any(s in data_dir for s in bad_chars):
raise ValueError(f'Invalid data_dir: {data_dir}.')
if any(s in tmp_dir for s in bad_chars):
raise ValueError(f'Invalid temp_dir: {tmp_dir}.')
data_dir = os.path.abspath(os.path.expanduser(data_dir))
logging.info('Downloading data to %s...', data_dir)
if FLAGS.all or FLAGS.criteo:
logging.info('Downloading criteo...')
download_criteo(data_dir,
tmp_dir,
num_decompression_threads,
FLAGS.interactive_deletion)
if FLAGS.all or FLAGS.mnist:
logging.info('Downloading MNIST...')
download_mnist(data_dir)
if FLAGS.all or FLAGS.fastmri:
logging.info('Downloading FastMRI...')
knee_singlecoil_train_url = FLAGS.fastmri_knee_singlecoil_train_url
knee_singlecoil_val_url = FLAGS.fastmri_knee_singlecoil_val_url
knee_singlecoil_test_url = FLAGS.fastmri_knee_singlecoil_test_url
if (knee_singlecoil_train_url is None or knee_singlecoil_val_url is None or
knee_singlecoil_val_url is None):
raise ValueError(
'Must provide both --fastmri_knee_singlecoil_{train,val}_url to '
'download the FastMRI dataset. Sign up for the URLs at '
'https://fastmri.med.nyu.edu/.')
download_fastmri(data_dir,
tmp_dir,
knee_singlecoil_train_url,
knee_singlecoil_val_url,
knee_singlecoil_test_url)
if FLAGS.all or FLAGS.imagenet:
flags.mark_flag_as_required('imagenet_train_url')
flags.mark_flag_as_required('imagenet_val_url')
logging.info('Downloading ImageNet...')
imagenet_train_url = FLAGS.imagenet_train_url
imagenet_val_url = FLAGS.imagenet_val_url
if imagenet_train_url is None or imagenet_val_url is None:
raise ValueError(
'Must provide both --imagenet_{train,val}_url to download the '
'ImageNet dataset. Sign up for the URLs at https://image-net.org/.')
if FLAGS.framework is None:
raise ValueError(
'Please specify either jax or pytorch framework through framework '
'flag.')
imagenet_data_dir = os.path.join(data_dir, 'imagenet')
download_imagenet(imagenet_data_dir, imagenet_train_url, imagenet_val_url)
setup_imagenet(imagenet_data_dir, framework=FLAGS.framework)
if FLAGS.all or FLAGS.librispeech:
logging.info('Downloading Librispeech...')
download_librispeech(data_dir, tmp_dir, train_tokenizer=True)
if FLAGS.all or FLAGS.cifar:
logging.info('Downloading CIFAR...')
download_cifar(data_dir, FLAGS.framework)
if FLAGS.all or FLAGS.ogbg:
logging.info('Downloading OGBG...')
download_ogbg(data_dir)
if FLAGS.all or FLAGS.wmt:
logging.info('Downloading WMT...')
download_wmt(data_dir)
# pylint: enable=logging-format-interpolation
# pylint: enable=consider-using-with
if __name__ == '__main__':
app.run(main)
|
"""Sentence Piece Tokenizer and ops for tokenizing / de-tokenizing a dataset.
Forked from:
https://github.com/google/flax/blob/b60f7f45b90f8fc42a88b1639c9cc88a40b298d3/examples/lm1b/tokenizer.py
"""
import os
import tempfile
from typing import Dict
from absl import flags
from absl import logging
import sentencepiece as spm
import tensorflow as tf
import tensorflow_text as tftxt
gfile = tf.io.gfile
copy = tf.io.gfile.copy
exists = tf.io.gfile.exists
rename = tf.io.gfile.rename
Features = Dict[str, tf.Tensor]
flags.DEFINE_string('input_dir', '', 'Path to training data directory.')
flags.DEFINE_boolean(
'train',
False,
'Whether to train a new tokenizer or load existing one to test.')
FLAGS = flags.FLAGS
def dump_chars_for_training(data_folder, splits, maxchars: int = int(1e7)):
char_count = 0
with tempfile.NamedTemporaryFile(
delete=False, prefix='/tmp/ds_chars') as outfp:
for split in splits:
data_folder = data_folder + '/' + split
for _, speaker_folder in enumerate(os.listdir(data_folder)):
if char_count > maxchars:
break
for chapter_folder in os.listdir(f'{data_folder}/{speaker_folder}'):
trans_file = (f'{data_folder}/{speaker_folder}/{chapter_folder}/'
f'{speaker_folder}-{chapter_folder}.trans.txt')
if not exists(trans_file):
logging.info('path does not exist -> %s', trans_file)
continue
with open(trans_file, 'r', encoding='UTF-8') as f:
for l in f:
_, line = l.strip().split(' ', maxsplit=1)
line = line + '\n'
char_count += len(line)
if char_count > maxchars:
break
logging.info(line)
outfp.write(str.encode(line))
return outfp
def train_tokenizer(data_dir: str,
splits,
vocab_size: int = 1024,
model_path: str = 'spm_model.vocab',
maxchars: int = int(1e7),
model_type: str = 'unigram',
character_coverage: float = 1.0):
"""Train SentencePiece tokenizer from subset of tf dataset.
Args:
data_dir: string path to data
vocab_size: int: size of vocab tokens to train.
maxchars: int: number of characters to use for sentencepiece training.
model_path: str: path of model file to save vocab model to.
model_type: str: type of sentencepiece vocab to train.
character_coverage: amount of characters covered by the model, good defaults
are 0.9995 for languages with rich character set like Japanese or Chinese
and 1.0 for other languages with small character set.
data_keys: Tuple[str]: keys of dataset to use for training.
Returns:
path to the trained sentencepiece vocabulary model.
"""
abs_model_path = os.path.abspath(os.path.expanduser(model_path))
charfile = dump_chars_for_training(data_dir, splits, maxchars=maxchars)
with tempfile.NamedTemporaryFile(
delete=False, prefix='/tmp/sp_tmp') as model_fp:
pass # we just want a prefix'd tmp-filename
argstr = ' '.join([
f'--input={charfile.name}',
f'--vocab_size={vocab_size}',
f'--character_coverage={character_coverage}',
f'--model_prefix={model_fp.name}',
f'--model_type={model_type}',
])
spm.SentencePieceTrainer.Train(argstr)
copy_rename_path = abs_model_path + '.rntmp'
copy(model_fp.name + '.model', copy_rename_path, overwrite=True)
rename(copy_rename_path, abs_model_path, overwrite=True)
logging.info('Copied %s to %s', model_fp.name + '.model', abs_model_path)
return abs_model_path
def load_tokenizer(model_filepath):
"""Load a tf-text SentencePiece tokenizer from given model filepath."""
if not exists(model_filepath):
logging.info('Tokenizer not found.')
with gfile.GFile(model_filepath, 'rb') as model_fp:
sp_model = model_fp.read()
sp_tokenizer = tftxt.SentencepieceTokenizer(
model=sp_model, add_bos=False, add_eos=True, reverse=False)
return sp_tokenizer
def run(train, data_dir):
logging.info('Data dir: %s', data_dir)
if train:
logging.info('Training...')
splits = ['train-clean-100']
return train_tokenizer(data_dir, splits)
else:
tokenizer = load_tokenizer(os.path.join(data_dir, 'spm_model.vocab'))
test_input = 'OPEN SOURCE ROCKS'
tokens = tokenizer.tokenize(test_input)
detokenized = tokenizer.detokenize(tokens).numpy().decode('utf-8')
logging.info('Original input = %s', test_input)
logging.info('Output after after tokenizing and detokenizing = %s',
detokenized)
if detokenized == test_input:
logging.info('Tokenizer working correctly!')
def main():
run(FLAGS.train, FLAGS.data_dir)
if __name__ == '__main__':
main()
|
"""Test for the equality of the SSIM calculation in Jax and PyTorch."""
import os
from typing import Tuple
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as jnp
import numpy as np
import torch
from algorithmic_efficiency.pytorch_utils import pytorch_setup
from algorithmic_efficiency.workloads.fastmri.fastmri_jax.ssim import \
_uniform_filter as _jax_uniform_filter
from algorithmic_efficiency.workloads.fastmri.fastmri_jax.ssim import \
ssim as jax_ssim
from algorithmic_efficiency.workloads.fastmri.fastmri_pytorch.ssim import \
_uniform_filter as _pytorch_uniform_filter
from algorithmic_efficiency.workloads.fastmri.fastmri_pytorch.ssim import \
ssim as pytorch_ssim
# Make sure no GPU memory is preallocated to Jax.
os.environ['XLA_PYTHON_CLIENT_PREALLOCATE'] = 'false'
DEVICE = pytorch_setup()[2]
def _create_fake_im(height: int, width: int) -> Tuple[jnp.array, torch.Tensor]:
fake_im = np.random.randn(height, width)
jax_fake_im = jnp.asarray(fake_im)
pytorch_fake_im = torch.as_tensor(fake_im, device=DEVICE)
return jax_fake_im, pytorch_fake_im
def _create_fake_batch(
batch_size: int, height: int, width: int
) -> Tuple[Tuple[jnp.array, jnp.array], Tuple[torch.Tensor, torch.Tensor]]:
logits = np.random.randn(batch_size, height, width)
targets = np.random.randn(batch_size, height, width)
jax_logits = jnp.asarray(logits)
jax_targets = jnp.asarray(targets)
pytorch_logits = torch.as_tensor(logits, device=DEVICE)
pytorch_targets = torch.as_tensor(targets, device=DEVICE)
return (jax_logits, jax_targets), (pytorch_logits, pytorch_targets)
class SSIMTest(parameterized.TestCase):
"""Test for equivalence of SSIM and _uniform_filter implementations in Jax
and PyTorch."""
@parameterized.named_parameters(
dict(testcase_name='fastmri_im', height=320, width=320),
dict(testcase_name='uneven_even_im', height=31, width=16),
dict(testcase_name='even_uneven_im', height=42, width=53),
)
def test_uniform_filter(self, height: int, width: int) -> None:
jax_im, pytorch_im = _create_fake_im(height, width)
jax_result = np.asarray(_jax_uniform_filter(jax_im))
torch_result = _pytorch_uniform_filter(pytorch_im).cpu().numpy()
assert np.allclose(jax_result, torch_result, atol=1e-6)
@parameterized.named_parameters(
dict(
testcase_name='fastmri_batch', batch_size=256, height=320, width=320),
dict(
testcase_name='uneven_even_batch', batch_size=8, height=31, width=16),
dict(
testcase_name='even_uneven_batch', batch_size=8, height=42, width=53),
)
def test_ssim(self, batch_size: int, height: int, width: int) -> None:
jax_inputs, pytorch_inputs = _create_fake_batch(batch_size, height, width)
jax_ssim_result = jax_ssim(*jax_inputs)
pytorch_ssim_result = pytorch_ssim(*pytorch_inputs)
self.assertEqual(jax_ssim_result.shape, pytorch_ssim_result.shape)
assert np.allclose(
jax_ssim_result.sum().item(),
pytorch_ssim_result.sum().item(),
atol=1e-6)
if __name__ == '__main__':
absltest.main()
|
import jax
import numpy as np
import pytest
# isort: skip_file
# pylint:disable=line-too-long
from algorithmic_efficiency.workloads.cifar.cifar_jax.workload import CifarWorkload as JaxCifarWorkload
from algorithmic_efficiency.workloads.cifar.cifar_pytorch.workload import CifarWorkload as PyTorchCifarWorkload
from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_jax.workload import Criteo1TbDlrmSmallWorkload as JaxCriteoWorkload
from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_pytorch.workload import Criteo1TbDlrmSmallWorkload as PyTorchCriteoWorkload
from algorithmic_efficiency.workloads.fastmri.fastmri_jax.workload import FastMRIWorkload as JaxFastMRIWorkload
from algorithmic_efficiency.workloads.fastmri.fastmri_pytorch.workload import FastMRIWorkload as PyTorchFastMRIWorkload
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax.workload import ImagenetResNetWorkload as JaxImagenetResNetWorkload
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.workload import ImagenetResNetWorkload as PyTorchImagenetResNetWorkload
from algorithmic_efficiency.workloads.imagenet_vit.imagenet_jax.workload import ImagenetVitWorkload as JaxImagenetViTWorkload
from algorithmic_efficiency.workloads.imagenet_vit.imagenet_pytorch.workload import ImagenetVitWorkload as PyTorchImagenetViTWorkload
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax.workload import LibriSpeechConformerWorkload as JaxLibriSpeechConformerWorkload
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.workload import LibriSpeechConformerWorkload as PytorchLibriSpeechConformerWorkload
from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_jax.workload import LibriSpeechDeepSpeechWorkload as JaxLibriSpeechDeepSpeechWorkload
from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_pytorch.workload import LibriSpeechDeepSpeechWorkload as PytorchLibriSpeechDeepSpeechWorkload
from algorithmic_efficiency.workloads.mnist.mnist_jax.workload import MnistWorkload as JaxMnistWorkload
from algorithmic_efficiency.workloads.mnist.mnist_pytorch.workload import MnistWorkload as PyTorchMnistWorkload
from algorithmic_efficiency.workloads.ogbg.ogbg_jax.workload import OgbgWorkload as JaxOgbgWorkload
from algorithmic_efficiency.workloads.ogbg.ogbg_pytorch.workload import OgbgWorkload as PyTorchOgbgWorkload
from algorithmic_efficiency.workloads.wmt.wmt_jax.workload import WmtWorkload as JaxWmtWorkload
from algorithmic_efficiency.workloads.wmt.wmt_pytorch.workload import WmtWorkload as PyTorchWmtWorkload
# pylint:enable=line-too-long
WORKLOADS = [
'cifar',
'criteo1tb',
'fastmri',
'imagenet_resnet',
'imagenet_vit',
# TODO: make tests work for these.
# 'librispeech_conformer',
# 'librispeech_deepspeech',
'mnist',
'ogbg',
'wmt',
]
# Ideally we would match the shapes layer-wise, but for that we
# have to ensure the exact same order of the shapes and that the
# shapes of the weights of the same layer type actually match between
# Jax and PyTorch, which is not always the case.
@pytest.mark.parametrize('workload', WORKLOADS)
def test_param_shapes(workload):
jax_workload, pytorch_workload = get_workload(workload)
# Compare number of parameter tensors of both models.
jax_param_shapes = jax.tree_util.tree_leaves(
jax_workload.param_shapes.unfreeze())
pytorch_param_shapes = jax.tree_util.tree_leaves(
pytorch_workload.param_shapes)
assert len(jax_param_shapes) == len(pytorch_param_shapes)
# Check if total number of params deduced from shapes match.
num_jax_params = 0
num_pytorch_params = 0
for jax_shape, pytorch_shape in zip(jax_param_shapes, pytorch_param_shapes):
num_jax_params += np.prod(jax_shape.shape_tuple)
num_pytorch_params += np.prod(pytorch_shape.shape_tuple)
assert num_jax_params == num_pytorch_params
def get_workload(workload):
if workload == 'cifar':
jax_workload = JaxCifarWorkload()
pytorch_workload = PyTorchCifarWorkload()
elif workload == 'criteo1tb':
jax_workload = JaxCriteoWorkload()
pytorch_workload = PyTorchCriteoWorkload()
elif workload == 'fastmri':
jax_workload = JaxFastMRIWorkload()
pytorch_workload = PyTorchFastMRIWorkload()
elif workload == 'imagenet_resnet':
jax_workload = JaxImagenetResNetWorkload()
pytorch_workload = PyTorchImagenetResNetWorkload()
elif workload == 'imagenet_vit':
jax_workload = JaxImagenetViTWorkload()
pytorch_workload = PyTorchImagenetViTWorkload()
elif workload == 'librispeech_conformer':
jax_workload = JaxLibriSpeechConformerWorkload()
pytorch_workload = PytorchLibriSpeechConformerWorkload()
elif workload == 'librispeech_deepspeech':
jax_workload = JaxLibriSpeechDeepSpeechWorkload()
pytorch_workload = PytorchLibriSpeechDeepSpeechWorkload()
elif workload == 'mnist':
jax_workload = JaxMnistWorkload()
pytorch_workload = PyTorchMnistWorkload()
elif workload == 'ogbg':
jax_workload = JaxOgbgWorkload()
pytorch_workload = PyTorchOgbgWorkload()
elif workload == 'wmt':
jax_workload = JaxWmtWorkload()
pytorch_workload = PyTorchWmtWorkload()
else:
raise ValueError(f'Workload {workload} is not available.')
_ = jax_workload.init_model_fn(jax.random.PRNGKey(0))
_ = pytorch_workload.init_model_fn([0])
return jax_workload, pytorch_workload
if __name__ == '__main__':
for w in WORKLOADS:
test_param_shapes(w)
|
import jax
import pytest
from absl import logging
from algorithmic_efficiency import spec
# isort: skip_file
# pylint:disable=line-too-long
from algorithmic_efficiency.workloads.cifar.cifar_jax.workload import CifarWorkload as JaxCifarWorkload
from algorithmic_efficiency.workloads.cifar.cifar_pytorch.workload import CifarWorkload as PyTorchCifarWorkload
from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_jax.workload import Criteo1TbDlrmSmallWorkload as JaxCriteoWorkload
from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_pytorch.workload import Criteo1TbDlrmSmallWorkload as PyTorchCriteoWorkload
from algorithmic_efficiency.workloads.fastmri.fastmri_jax.workload import FastMRIWorkload as JaxFastMRIWorkload
from algorithmic_efficiency.workloads.fastmri.fastmri_pytorch.workload import FastMRIWorkload as PyTorchFastMRIWorkload
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax.workload import ImagenetResNetWorkload as JaxImagenetResNetWorkload
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.workload import ImagenetResNetWorkload as PyTorchImagenetResNetWorkload
from algorithmic_efficiency.workloads.imagenet_vit.imagenet_jax.workload import ImagenetVitWorkload as JaxImagenetViTWorkload
from algorithmic_efficiency.workloads.imagenet_vit.imagenet_pytorch.workload import ImagenetVitWorkload as PyTorchImagenetViTWorkload
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax.workload import LibriSpeechConformerWorkload as JaxLibriSpeechConformerWorkload
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.workload import LibriSpeechConformerWorkload as PytorchLibriSpeechConformerWorkload
from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_jax.workload import LibriSpeechDeepSpeechWorkload as JaxLibriSpeechDeepSpeechWorkload
from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_pytorch.workload import LibriSpeechDeepSpeechWorkload as PytorchLibriSpeechDeepSpeechWorkload
from algorithmic_efficiency.workloads.mnist.mnist_jax.workload import MnistWorkload as JaxMnistWorkload
from algorithmic_efficiency.workloads.mnist.mnist_pytorch.workload import MnistWorkload as PyTorchMnistWorkload
from algorithmic_efficiency.workloads.ogbg.ogbg_jax.workload import OgbgWorkload as JaxOgbgWorkload
from algorithmic_efficiency.workloads.ogbg.ogbg_pytorch.workload import OgbgWorkload as PyTorchOgbgWorkload
from algorithmic_efficiency.workloads.wmt.wmt_jax.workload import WmtWorkload as JaxWmtWorkload
from algorithmic_efficiency.workloads.wmt.wmt_pytorch.workload import WmtWorkload as PyTorchWmtWorkload
# pylint:enable=line-too-long
WORKLOADS = [
'cifar',
'criteo1tb',
'fastmri',
'imagenet_resnet',
'imagenet_vit',
'librispeech_conformer',
'librispeech_deepspeech',
'mnist',
'ogbg',
'wmt',
]
def count_param_types(param_types):
types_dict = {}
for t in param_types:
if t not in types_dict:
types_dict[t] = 1
else:
types_dict[t] += 1
return types_dict
def _count_mismatches(jax_param_types_dict, pytorch_param_types_dict, keys):
mismatches = ''
for key in keys:
jax_count = jax_param_types_dict.get(key, 0)
pytorch_count = pytorch_param_types_dict.get(key, 0)
if jax_count != pytorch_count:
mismatches += f'\nKey: {key}, Jax {jax_count} != Pytorch {pytorch_count}.'
return mismatches
def _check_attention_qkv_match(jax_param_types_dict, pytorch_param_types_dict):
# Sometimes one framework will implement QKV as a single parameter, so we need
# to make sure there are the same number of QKV params as Q, K, V.
num_qkv = {
'jax':
jax_param_types_dict.get(spec.ParameterType.ATTENTION_QKV, 0),
'pytorch':
pytorch_param_types_dict.get(spec.ParameterType.ATTENTION_QKV, 0),
}
num_q = {
'jax':
jax_param_types_dict.get(spec.ParameterType.ATTENTION_Q, 0),
'pytorch':
pytorch_param_types_dict.get(spec.ParameterType.ATTENTION_Q, 0),
}
num_k = {
'jax':
jax_param_types_dict.get(spec.ParameterType.ATTENTION_K, 0),
'pytorch':
pytorch_param_types_dict.get(spec.ParameterType.ATTENTION_K, 0),
}
num_v = {
'jax':
jax_param_types_dict.get(spec.ParameterType.ATTENTION_V, 0),
'pytorch':
pytorch_param_types_dict.get(spec.ParameterType.ATTENTION_V, 0),
}
num_bias = {
'jax':
jax_param_types_dict.get(spec.ParameterType.ATTENTION_BIAS, 0),
'pytorch':
pytorch_param_types_dict.get(spec.ParameterType.ATTENTION_BIAS, 0),
}
qkv_match = num_qkv['jax'] == num_qkv['pytorch']
q_match = num_q['jax'] == num_q['pytorch']
k_match = num_k['jax'] == num_k['pytorch']
v_match = num_v['jax'] == num_v['pytorch']
bias_match = num_bias['jax'] == num_bias['pytorch']
qkv_match = qkv_match and q_match and k_match and v_match and bias_match
# We subtract 2 * num_qkv from the number of biases because there are 2
# missing for each of q, k, v.
jax_qkv_match = (
num_q['pytorch'] == num_k['pytorch'] == num_v['pytorch'] == num_qkv['jax']
and (num_qkv['jax'] != 0 and
(num_bias['pytorch'] - 2 * num_qkv['jax']) == num_bias['jax']))
pytorch_qkv_match = (
num_q['jax'] == num_k['jax'] == num_v['jax'] == num_qkv['pytorch'] and
(num_qkv['pytorch'] != 0 and
(num_bias['jax'] - 2 * num_qkv['pytorch']) == num_bias['pytorch']))
qkv_match = qkv_match or jax_qkv_match or pytorch_qkv_match
return qkv_match
@pytest.mark.parametrize('workload_name', WORKLOADS)
def test_param_types(workload_name):
logging.info(f'Testing workload {workload_name}...')
jax_workload, pytorch_workload = get_workload(workload_name)
# Compare number of parameter tensors of both models.
jax_param_types = jax.tree_util.tree_leaves(jax_workload.model_params_types)
pytorch_param_types = jax.tree_util.tree_leaves(
pytorch_workload.model_params_types)
jax_param_types_dict = count_param_types(jax_param_types)
pytorch_param_types_dict = count_param_types(pytorch_param_types)
# Jax fuses LSTM cells together, whereas PyTorch exposes all the weight
# parameters, and there are two per cell, for each of the forward and backward
# directional LSTMs, and there are 6 layers of LSTM in librispeech_deepspeech,
# compared to the 6 Jax LSTM weights.
#
# We also subtract an additional 6 biases because the LSTM biases are
# concatenated to the weights in Jax.
if workload_name == 'librispeech_deepspeech':
pytorch_param_types_dict[spec.ParameterType.WEIGHT] -= 3 * 6
pytorch_param_types_dict[spec.ParameterType.BIAS] -= 3 * 6
pytorch_param_types_dict[spec.ParameterType.BIAS] -= 6
# Check if total number of each type match.
attention_keys = {
spec.ParameterType.ATTENTION_QKV,
spec.ParameterType.ATTENTION_Q,
spec.ParameterType.ATTENTION_K,
spec.ParameterType.ATTENTION_V,
spec.ParameterType.ATTENTION_BIAS,
}
non_attention_keys = set(jax_param_types_dict.keys()).union(
set(pytorch_param_types_dict.keys()))
non_attention_keys -= attention_keys
mismatches = ''
mismatches += _count_mismatches(jax_param_types_dict,
pytorch_param_types_dict,
non_attention_keys)
qkv_match = _check_attention_qkv_match(jax_param_types_dict,
pytorch_param_types_dict)
if not qkv_match:
mismatches += _count_mismatches(jax_param_types_dict,
pytorch_param_types_dict,
attention_keys)
if mismatches:
raise ValueError(
f'On workload {workload_name}, count mismatch: {mismatches}')
def get_workload(workload_name):
if workload_name == 'cifar':
jax_workload = JaxCifarWorkload()
pytorch_workload = PyTorchCifarWorkload()
elif workload_name == 'criteo1tb':
jax_workload = JaxCriteoWorkload()
pytorch_workload = PyTorchCriteoWorkload()
elif workload_name == 'fastmri':
jax_workload = JaxFastMRIWorkload()
pytorch_workload = PyTorchFastMRIWorkload()
elif workload_name == 'imagenet_resnet':
jax_workload = JaxImagenetResNetWorkload()
pytorch_workload = PyTorchImagenetResNetWorkload()
elif workload_name == 'imagenet_vit':
jax_workload = JaxImagenetViTWorkload()
pytorch_workload = PyTorchImagenetViTWorkload()
elif workload_name == 'librispeech_conformer':
jax_workload = JaxLibriSpeechConformerWorkload()
pytorch_workload = PytorchLibriSpeechConformerWorkload()
elif workload_name == 'librispeech_deepspeech':
jax_workload = JaxLibriSpeechDeepSpeechWorkload()
pytorch_workload = PytorchLibriSpeechDeepSpeechWorkload()
elif workload_name == 'mnist':
jax_workload = JaxMnistWorkload()
pytorch_workload = PyTorchMnistWorkload()
elif workload_name == 'ogbg':
jax_workload = JaxOgbgWorkload()
pytorch_workload = PyTorchOgbgWorkload()
elif workload_name == 'wmt':
jax_workload = JaxWmtWorkload()
pytorch_workload = PyTorchWmtWorkload()
else:
raise ValueError(f'Workload {workload_name} is not available.')
_ = jax_workload.init_model_fn(jax.random.PRNGKey(0))
_ = pytorch_workload.init_model_fn([0])
return jax_workload, pytorch_workload
if __name__ == '__main__':
for w in WORKLOADS:
test_param_types(w)
|
"""
Runs 10 steps of SGD for each workload and compares results.
Run it as:
python3 test_traindiffs.py
"""
import pickle
from subprocess import DEVNULL
from subprocess import run
from subprocess import STDOUT
from absl import flags
from absl.testing import absltest
FLAGS = flags.FLAGS
WORKLOADS = [
'imagenet_resnet',
'imagenet_vit',
'wmt',
'librispeech_conformer',
'librispeech_deepspeech',
'fastmri',
'ogbg',
'criteo1tb'
]
GLOBAL_BATCH_SIZE = 16
NUM_TRAIN_STEPS = 10
class ModelDiffTest(absltest.TestCase):
def test_workload(self):
# pylint: disable=line-too-long, unnecessary-lambda-assignment
"""
Compares the multi-gpu jax and ddp-pytorch models for each workload and compares the train and eval metrics collected
in the corresponding log files. We launch the multi-gpu jax model and the corresponding ddp-pytorch model separately
using subprocess because ddp-pytorch models are run using torchrun. Secondly, keeping these separate helps avoid
CUDA OOM errors resulting from the two frameworks competing with each other for GPU memory.
"""
for workload in WORKLOADS:
name = f'Testing {workload}'
jax_logs = '/tmp/jax_log.pkl'
pyt_logs = '/tmp/pyt_log.pkl'
run(
f'python3 tests/reference_algorithm_tests.py --workload={workload} --framework=jax --global_batch_size={GLOBAL_BATCH_SIZE} --log_file={jax_logs}'
f' --submission_path=tests/modeldiffs/vanilla_sgd_jax.py --identical=True --tuning_search_space=None --num_train_steps={NUM_TRAIN_STEPS}',
shell=True,
stdout=DEVNULL,
stderr=STDOUT,
check=True)
run(
f'torchrun --standalone --nnodes 1 --nproc_per_node 8 tests/reference_algorithm_tests.py --workload={workload} --framework=pytorch --global_batch_size={GLOBAL_BATCH_SIZE} --log_file={pyt_logs}'
f' --submission_path=tests/modeldiffs/vanilla_sgd_pytorch.py --identical=True --tuning_search_space=None --num_train_steps={NUM_TRAIN_STEPS}',
shell=True,
stdout=DEVNULL,
stderr=STDOUT,
check=True)
with open(jax_logs, 'rb') as f:
jax_results = pickle.load(f)
with open(pyt_logs, 'rb') as f:
pyt_results = pickle.load(f)
# PRINT RESULTS
k = next(
iter(
filter(lambda k: 'train' in k and 'loss' in k,
jax_results['eval_results'][0])))
header = [
'Iter',
'Eval (jax)',
'Eval (torch)',
'Grad Norm (jax)',
'Grad Norm (torch)',
'Train Loss (jax)',
'Train Loss (torch)',
]
fmt = lambda l: '|' + '|'.join(map(lambda x: f'{x:^20s}', l)) + '|'
header = fmt(header)
pad = (len(header) - len((name))) // 2
print('=' * pad, name, '=' * (len(header) - len(name) - pad), sep='')
print(header)
print('=' * len(header))
for i in range(NUM_TRAIN_STEPS):
row = map(lambda x: str(round(x, 5)),
[
jax_results['eval_results'][i][k],
pyt_results['eval_results'][i][k],
jax_results['scalars'][i]['grad_norm'],
pyt_results['scalars'][i]['grad_norm'],
jax_results['scalars'][i]['loss'],
pyt_results['scalars'][i]['loss'],
])
print(fmt([f'{i}', *row]))
print('=' * len(header))
if __name__ == '__main__':
absltest.main()
|
import jax
import jax.numpy as jnp
import jax.random as jax_rng
import jraph
import pytest
import torch
from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_jax.models import \
DlrmSmall as JaxDlrmSmall
from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_pytorch.models import \
DlrmSmall as PyTorchDlrmSmall
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax.models import \
ResNet18 as JaxResNet_c10
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax.models import \
ResNet50 as JaxResNet
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.models import \
resnet18 as PyTorchResNet_c10
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.models import \
resnet50 as PyTorchResNet
from algorithmic_efficiency.workloads.imagenet_vit.imagenet_jax.models import \
ViT as JaxViT
from algorithmic_efficiency.workloads.imagenet_vit.imagenet_pytorch.models import \
ViT as PyTorchViT
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax.models import \
Conformer as JaxConformer
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax.models import \
ConformerConfig as JaxConformerConfig
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.models import \
ConformerConfig as PytorchConformerConfig
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.models import \
ConformerEncoderDecoder as PytorchConformer
from algorithmic_efficiency.workloads.mnist.mnist_jax.workload import \
_Model as JaxMLP
from algorithmic_efficiency.workloads.mnist.mnist_pytorch.workload import \
_Model as PyTorchMLP
from algorithmic_efficiency.workloads.ogbg.ogbg_jax.models import GNN as JaxGNN
from algorithmic_efficiency.workloads.ogbg.ogbg_pytorch.models import \
GNN as PyTorchGNN
from algorithmic_efficiency.workloads.wmt.wmt_jax.models import \
Transformer as JaxTransformer
from algorithmic_efficiency.workloads.wmt.wmt_jax.models import \
TransformerConfig
from algorithmic_efficiency.workloads.wmt.wmt_pytorch.models import \
Transformer as PyTorchTransformer
WORKLOADS = [
'mnist',
'cifar',
'criteo1tb',
'imagenet_resnet',
'imagenet_vit',
'wmt',
'ogbg',
'librispeech_conformer',
]
@pytest.mark.parametrize('workload', WORKLOADS)
def test_matching_num_params(workload):
jax_model, pytorch_model = get_models(workload)
# Count parameters of both models.
num_jax_params = sum(x.size for x in jax.tree_util.tree_leaves(jax_model))
num_pytorch_params = sum(
p.numel() for p in pytorch_model.parameters() if p.requires_grad)
assert num_jax_params == num_pytorch_params
def get_models(workload):
init_rngs = {'params': jax_rng.PRNGKey(0), 'dropout': jax_rng.PRNGKey(1)}
if workload == 'mnist':
# Init Jax model.
init_val = jnp.ones((1, 28, 28, 1), jnp.float32)
jax_model = JaxMLP().init(init_rngs, init_val, train=True)['params']
# Init PyTorch model.
pytorch_model = PyTorchMLP()
elif workload == 'cifar':
# Init Jax model.
input_shape = (1, 32, 32, 3)
model_init = jax.jit(JaxResNet_c10(num_classes=10, dtype=jnp.float32).init)
jax_model = model_init(init_rngs, jnp.ones(input_shape,
jnp.float32))["params"]
# Init PyTorch model.
pytorch_model = PyTorchResNet_c10(num_classes=10)
elif workload == 'criteo1tb':
# Init Jax model.
mlp_bottom_dims = (512, 256, 128)
mlp_top_dims = (1024, 1024, 512, 256, 1)
embed_dim = 128
vocab_size = 32 * 128 * 1024
input_shape = (1, 39)
model_init = JaxDlrmSmall(
vocab_size=vocab_size,
num_dense_features=13,
mlp_bottom_dims=mlp_bottom_dims,
mlp_top_dims=mlp_top_dims,
embed_dim=embed_dim).init
jax_model = model_init(init_rngs, jnp.ones(input_shape, jnp.float32),
False)['params']
# Init PyTorch model.
pytorch_model = PyTorchDlrmSmall(
vocab_size=vocab_size,
num_dense_features=13,
mlp_bottom_dims=mlp_bottom_dims,
mlp_top_dims=mlp_top_dims,
embed_dim=embed_dim)
elif workload == 'imagenet_resnet':
# Init Jax model.
input_shape = (1, 224, 224, 3)
jax_model = JaxResNet(
num_classes=1000,
dtype=jnp.float32).init(init_rngs, jnp.ones(input_shape,
jnp.float32))['params']
# Init PyTorch model.
pytorch_model = PyTorchResNet()
elif workload == 'imagenet_vit':
# Init Jax model.
input_shape = (1, 224, 224, 3)
jax_model = JaxViT(num_classes=1000).init(
init_rngs, jnp.ones(input_shape, jnp.float32))['params']
# Init PyTorch model.
pytorch_model = PyTorchViT()
elif workload == 'librispeech_conformer':
jax_model = JaxConformer(JaxConformerConfig())
pytorch_model = PytorchConformer(PytorchConformerConfig())
# Init Jax model
input_shape = [(320000,), (320000,)]
fake_input_batch = [jnp.zeros((2, *x), jnp.float32) for x in input_shape]
jax_model = jax_model.init(
init_rngs, train=False, *fake_input_batch)["params"]
# Run model once to initialize lazy layers
wave = torch.randn(2, 320000)
pad = torch.zeros_like(wave)
pytorch_model(wave, pad)
elif workload == 'wmt':
# Init Jax model.
input_shape = (16, 256)
target_shape = (16, 256)
jax_model = JaxTransformer(TransformerConfig).init(
init_rngs,
jnp.ones(input_shape, jnp.float32),
jnp.ones(target_shape, jnp.float32))['params']
# Init PyTorch model.
pytorch_model = PyTorchTransformer()
elif workload == 'ogbg':
# Init Jax model.
fake_batch = jraph.GraphsTuple(
n_node=jnp.asarray([1]),
n_edge=jnp.asarray([1]),
nodes=jnp.ones((1, 9)),
edges=jnp.ones((1, 3)),
globals=jnp.zeros((1, 128)),
senders=jnp.asarray([0]),
receivers=jnp.asarray([0]))
jax_model = JaxGNN(num_outputs=128).init(
init_rngs, fake_batch, train=False)['params']
# Init PyTorch model.
pytorch_model = PyTorchGNN(num_outputs=128)
else:
raise ValueError(f'Models for workload {workload} are not available.')
return jax_model, pytorch_model
|
"""Check whether the __version__ attribute is set correctly."""
import algorithmic_efficiency
def test_version_attribute():
"""Check whether __version__ exists and is a valid string."""
assert hasattr(algorithmic_efficiency, "__version__")
version = algorithmic_efficiency.__version__
assert isinstance(version, str)
version_elements = version.split(".")
assert all(el.isnumeric() for el in version_elements)
|
"""Tests for submission.py for baselines.
This is an end-to-end test for all baselines on MNIST in PyTorch and Jax that
requires the dataset to be available.
"""
import copy
import os
import sys
from absl import flags
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
from algorithmic_efficiency.profiler import PassThroughProfiler
from algorithmic_efficiency.workloads import workloads
import submission_runner
FLAGS = flags.FLAGS
# Needed to avoid UnparsedFlagAccessError
# (see https://github.com/google/model_search/pull/8).
FLAGS(sys.argv)
MAX_GLOBAL_STEPS = 5
baselines = {
'jax': [
'adafactor',
'adamw',
'lamb',
'momentum',
'nadamw',
'nesterov',
'sam',
'shampoo',
],
'pytorch': [
'adamw',
'momentum',
'nadamw',
'nesterov',
],
}
frameworks = [
'pytorch',
'jax',
]
named_parameters = []
for f in frameworks:
for b in baselines[f]:
named_parameters.append(
dict(
testcase_name=f'{b}_{f}',
workload='mnist',
framework=f'{f}',
submission_path=f'baselines/{b}/{f}/submission.py',
tuning_search_space=f'baselines/{b}/tuning_search_space.json'))
class BaselineTest(parameterized.TestCase):
"""Tests for reference submissions."""
@parameterized.named_parameters(*named_parameters)
def test_baseline_submission(self,
workload,
framework,
submission_path,
tuning_search_space):
FLAGS.framework = framework
workload_metadata = copy.deepcopy(workloads.WORKLOADS[workload])
workload_metadata['workload_path'] = os.path.join(
workloads.BASE_WORKLOADS_DIR,
workload_metadata['workload_path'] + '_' + framework,
'workload.py')
workload_obj = workloads.import_workload(
workload_path=workload_metadata['workload_path'],
workload_class_name=workload_metadata['workload_class_name'],
workload_init_kwargs={})
score = submission_runner.score_submission_on_workload(
workload_obj,
workload,
submission_path,
data_dir='~/tensorflow_datasets', # The default in TFDS.
tuning_ruleset='external',
tuning_search_space=tuning_search_space,
num_tuning_trials=1,
profiler=PassThroughProfiler(),
max_global_steps=MAX_GLOBAL_STEPS,
)
logging.info(score)
if __name__ == '__main__':
absltest.main()
|
"""Test that each reference submission can run a train and eval step.
This is a brief test that runs the for the workload and reference submission
code for one train and one eval step for all workloads, without the real data
iterator because it is not realistic to have all datasets available at testing
time. For end-to-end tests of submission_runner.py see
submission_runner_test.py.
Assumes that each reference submission is using the external tuning ruleset and
that it is defined in:
# pylint: disable=line-too-long
"reference_algorithms/development_algorithms/{workload}/{workload}_{framework}/submission.py"
"reference_algorithms/development_algorithms/{workload}/tuning_search_space.json".
python3 tests/reference_algorithm_tests.py \
--workload=criteo1tb \
--framework=jax \
--global_batch_size=16 \
--submission_path=reference_algorithms/target_setting_algorithms/jax_adamw.py \
--tuning_search_space=reference_algorithms/target_setting_algorithms/criteo1tb/tuning_search_space.json
"""
import copy
import functools
import importlib
import json
import os
import pickle
from absl import flags
from absl import logging
from absl.testing import absltest
import flax
from flax import jax_utils
from flax.core.frozen_dict import FrozenDict
import jax
from jraph import GraphsTuple
import numpy as np
import tensorflow as tf
import torch
import torch.distributed as dist
from algorithmic_efficiency import halton
from algorithmic_efficiency import pytorch_utils
from algorithmic_efficiency import random_utils as prng
from algorithmic_efficiency.profiler import PassThroughProfiler
from algorithmic_efficiency.workloads import workloads
from algorithmic_efficiency.workloads.ogbg import \
input_pipeline as ogbg_input_pipeline
from algorithmic_efficiency.workloads.ogbg.ogbg_pytorch.workload import \
_graph_map
import submission_runner
from tests.modeldiffs import diff as diff_utils
flags.DEFINE_integer(
'global_batch_size',
-1,
('Global Batch size to use when running an individual workload. Otherwise '
'a per-device batch size of 2 is used.'))
flags.DEFINE_integer('num_train_steps', 1, 'Number of steps to train.')
flags.DEFINE_boolean('use_fake_input_queue', True, 'Use fake data examples.')
flags.DEFINE_string('log_file', '/tmp/log.pkl', 'The log file')
flags.DEFINE_boolean(
'all',
False,
'Run all workloads instead of using --workload and --framework.')
flags.DEFINE_boolean('identical',
False,
'Run jax and pytorch with identical weights.')
FLAGS = flags.FLAGS
USE_PYTORCH_DDP, RANK, PYTORCH_DEVICE, N_GPUS = pytorch_utils.pytorch_setup()
tf.config.set_visible_devices([], 'GPU')
_EXPECTED_METRIC_NAMES = {
'cifar': ['train/loss', 'validation/loss', 'test/accuracy'],
'criteo1tb': ['train/loss', 'validation/loss'],
'criteo1tb_test': ['train/loss', 'validation/loss'],
'fastmri': ['train/ssim', 'validation/ssim'],
'imagenet_resnet': ['train/accuracy', 'validation/accuracy'],
'imagenet_vit': ['train/accuracy', 'validation/accuracy'],
'librispeech_conformer': ['train/wer', 'validation/wer', 'train/ctc_loss'],
'librispeech_deepspeech': ['train/wer', 'validation/wer', 'train/ctc_loss'],
'mnist': ['train/loss', 'validation/accuracy', 'test/accuracy'],
'ogbg': [
'train/accuracy', 'validation/loss', 'test/mean_average_precision'
],
'wmt': ['train/bleu', 'validation/loss', 'validation/accuracy'],
}
def _make_fake_image_batch(batch_shape, data_shape, num_classes):
examples = np.random.normal(size=(*batch_shape,
*data_shape)).astype(np.float32)
labels = np.random.randint(0, num_classes, size=batch_shape)
masks = np.ones(batch_shape, dtype=np.float32)
return {'inputs': examples, 'targets': labels, 'weights': masks}
def _pytorch_map(inputs):
if USE_PYTORCH_DDP:
return jax.tree_map(
lambda a: torch.as_tensor(a[RANK], device=PYTORCH_DEVICE), inputs)
return jax.tree_map(
lambda a: torch.as_tensor(a, device=PYTORCH_DEVICE).view(-1, a.shape[-1])
if len(a.shape) == 3 else torch.as_tensor(a, device=PYTORCH_DEVICE).view(
-1),
inputs)
class _FakeTokenizer:
def detokenize(self, *args):
del args
return tf.constant('this is a fake sequence?')
@flax.struct.dataclass
class _FakeMetricsCollection:
def merge(self, *args):
del args
return self
def compute(self):
return {
'wer': 0.0,
'ctc_loss': 0.0,
}
def unreplicate(self):
return self
class _FakeMetricsLogger:
def __init__(self):
self.filename = FLAGS.log_file
self.scalars = []
self.eval_results = []
def append_scalar_metrics(self, scalars, step):
if USE_PYTORCH_DDP:
for k in sorted(scalars):
scalars[k] = torch.as_tensor([scalars[k]], device=PYTORCH_DEVICE)
dist.all_reduce(scalars[k], op=dist.ReduceOp.AVG)
scalars[k] = scalars[k].item()
if RANK == 0:
self.scalars.append(scalars)
self.save()
def append_eval_metrics(self, result):
if RANK == 0:
self.eval_results.append(result)
self.save()
def save(self):
with open(self.filename, 'wb') as f:
pickle.dump({'scalars': self.scalars, 'eval_results': self.eval_results},
f)
class _FakeMetricsBundle:
def gather_from_model_output(self, *args, **kwargs):
del args
del kwargs
return _FakeMetricsCollection()
def _make_one_batch_workload(workload_class,
workload_name,
framework,
global_batch_size,
use_fake_input_queue,
n_gpus):
class _OneEvalBatchWorkload(workload_class):
def __init__(self):
kwargs = {}
if 'librispeech' in workload_name:
kwargs['use_specaug'] = False
self.init_kwargs = kwargs
super().__init__(**kwargs)
self.summary_writer = None
self.metrics_logger = _FakeMetricsLogger()
if 'librispeech' in workload_name:
self.tokenizer = _FakeTokenizer()
def init_model_fn(self, rng, dropout_rate=None, aux_dropout_rate=None):
# pylint: disable=line-too-long
if not (FLAGS.identical and
os.path.exists(f'tests/modeldiffs/{workload_name}/compare.py')):
return super().init_model_fn(
rng, dropout_rate=dropout_rate, aux_dropout_rate=aux_dropout_rate)
if framework == 'jax':
compare_module = importlib.import_module(
f'tests.modeldiffs.{workload_name}.compare')
jax_params, model_state, _ = diff_utils.torch2jax(
jax_workload=super(),
pytorch_workload=compare_module.PytWorkload(**self.init_kwargs),
key_transform=compare_module.key_transform,
sd_transform=compare_module.sd_transform)
return (FrozenDict(**jax_utils.replicate(jax_params)),
FrozenDict(**jax_utils.replicate(model_state))
if model_state is not None else model_state)
return super().init_model_fn([0], dropout_rate=0.0, aux_dropout_rate=0.0)
@property
def num_eval_train_examples(self):
return global_batch_size
@property
def num_validation_examples(self):
return global_batch_size
@property
def num_test_examples(self):
super_num_test = super().num_test_examples
if super_num_test is not None:
return global_batch_size
return None
def _build_input_queue(self, *args, **kwargs):
if not use_fake_input_queue:
return super()._build_input_queue(*args, **kwargs)
del args
del kwargs
np.random.seed(42)
if framework == 'jax' or USE_PYTORCH_DDP:
batch_shape = (n_gpus, global_batch_size // n_gpus)
else:
batch_shape = (global_batch_size,)
if workload_name == 'cifar':
if framework == 'jax':
data_shape = (32, 32, 3)
else:
data_shape = (3, 32, 32)
fake_batch = _make_fake_image_batch(
batch_shape, data_shape=data_shape, num_classes=10)
elif workload_name == 'criteo1tb' or workload_name == 'criteo1tb_test':
targets = np.ones(batch_shape)
targets[0] = 0
fake_batch = {
'inputs': np.ones((*batch_shape, 13 + 26)),
'targets': targets,
'weights': np.ones(batch_shape),
}
elif workload_name in ['imagenet_resnet', 'imagenet_vit']:
data_shape = (224, 224, 3)
fake_batch = _make_fake_image_batch(
batch_shape, data_shape=data_shape, num_classes=1000)
if framework == 'pytorch':
num_dims = len(fake_batch['inputs'].shape)
fake_batch['inputs'] = fake_batch['inputs'].transpose(
(*range(num_dims - 3), num_dims - 1, num_dims - 3, num_dims - 2))
elif 'librispeech' in workload_name:
rate = 16000
l = None
while l is None or l.shape[-1] < 320000:
duration = 0.5
freq = 2**(np.random.rand(*batch_shape, 1) * 13)
wav = np.sin(2 * np.pi * freq * np.arange(rate * duration) / rate)
if l is None:
l = wav
else:
l = np.concatenate([l, wav], axis=-1)
inputs = l
targets = np.random.randint(low=1, high=1024, size=(*batch_shape, 256))
tgt_pad = np.arange(0, 256)[tuple([None] * len(batch_shape))]
tgt_lengths = np.random.randint(
low=100, high=256, size=(*batch_shape, 1))
tgt_pad = 1 * (tgt_pad > tgt_lengths)
fake_batch = {
'inputs': (inputs, np.zeros_like(inputs)),
'targets': (targets, tgt_pad),
}
elif workload_name == 'mnist':
fake_batch = _make_fake_image_batch(
batch_shape, data_shape=(28, 28, 1), num_classes=10)
elif workload_name == 'ogbg':
tf.random.set_seed(5)
def _fake_iter():
while True:
fake_batch = {
'num_nodes':
tf.ones((1,), dtype=tf.int64),
'edge_index':
tf.ones((1, 2), dtype=tf.int64),
'node_feat':
tf.random.normal((1, 9)),
'edge_feat':
tf.random.normal((1, 3)),
'labels':
tf.cast(
tf.random.uniform((self._num_outputs,),
minval=0,
maxval=2,
dtype=tf.int32),
tf.float32),
}
yield fake_batch
fake_batch_iter = ogbg_input_pipeline._get_batch_iterator(
_fake_iter(), global_batch_size)
fake_batch = next(fake_batch_iter) # pylint: disable=stop-iteration-return
if framework == 'pytorch':
fake_batch['inputs'] = _graph_map(_pytorch_map, fake_batch['inputs'])
fake_batch['targets'] = _pytorch_map(fake_batch['targets'])
fake_batch['weights'] = _pytorch_map(fake_batch['weights'])
elif workload_name == 'wmt':
max_len = 256
fake_batch = {
'inputs':
np.random.randint(
low=0, high=32000, size=(*batch_shape, max_len)),
'targets':
np.random.randint(
low=0, high=32000, size=(*batch_shape, max_len)),
'weights':
np.random.randint(low=0, high=2, size=(*batch_shape, max_len)),
}
self._tokenizer = _FakeTokenizer()
elif workload_name == 'fastmri':
data_shape = (320, 320)
fake_batch = {
'inputs':
_make_fake_image_batch(
batch_shape, data_shape=data_shape, num_classes=1000)
['inputs'],
'targets':
_make_fake_image_batch(
batch_shape, data_shape=data_shape, num_classes=1000)
['inputs'],
'mean':
np.zeros(batch_shape),
'std':
np.ones(batch_shape),
'volume_max':
np.zeros(batch_shape),
'weights':
np.ones(batch_shape),
}
else:
raise ValueError(
'Workload {} does not have a fake batch defined, you '
'can add it or use --use_fake_input_queue=false.'.format(
workload_name))
if framework == 'pytorch':
def to_device(k, v):
dtype = (
torch.long if (k == 'targets' and workload_name != 'fastmri') else
torch.bool if k == 'weights' else torch.float)
if USE_PYTORCH_DDP:
v = v[RANK]
return torch.as_tensor(v, device=PYTORCH_DEVICE, dtype=dtype)
new_fake_batch = {}
for k, v in fake_batch.items():
if isinstance(v, np.ndarray):
new_fake_batch[k] = to_device(k, v)
elif isinstance(v, tuple) and not isinstance(v, GraphsTuple):
new_fake_batch[k] = tuple(map(functools.partial(to_device, k), v))
else:
new_fake_batch[k] = v
fake_batch = new_fake_batch
# We set the number of examples to the batch size for all splits, so only
# yield two batches, one for each call to eval_model().
num_batches = 2
# For WMT we also iterate through the eval iters a second time to complute
# the BLEU score.
if workload_name == 'wmt':
num_batches *= 2
def _data_gen():
for _ in range(num_batches * FLAGS.num_train_steps):
yield fake_batch
return _data_gen()
def eval_model(self, *args, **kwargs):
eval_result = super().eval_model(*args, **kwargs)
self.metrics_logger.append_eval_metrics(eval_result)
return eval_result
return _OneEvalBatchWorkload()
def _test_submission(workload_name,
framework,
submission_path,
search_space_path,
data_dir,
use_fake_input_queue,
n_gpus):
logging.info(f'========= Testing {workload_name} in {framework}.')
FLAGS.framework = framework
workload_metadata = copy.deepcopy(submission_runner.WORKLOADS[workload_name])
workload_metadata['workload_path'] = os.path.join(
submission_runner.BASE_WORKLOADS_DIR,
workload_metadata['workload_path'] + '_' + framework,
'workload.py')
workload_class = workloads.import_workload(
workload_path=workload_metadata['workload_path'],
workload_class_name=workload_metadata['workload_class_name'],
return_class=True)
submission_module_path = workloads.convert_filepath_to_module(submission_path)
submission_module = importlib.import_module(submission_module_path)
init_optimizer_state = submission_module.init_optimizer_state
update_params = submission_module.update_params
data_selection = submission_module.data_selection
if FLAGS.all:
if FLAGS.global_batch_size > 0:
raise ValueError('Cannot set --global_batch_size and --all.')
global_batch_size = 2 * n_gpus
else:
global_batch_size = FLAGS.global_batch_size
if FLAGS.global_batch_size < 0:
raise ValueError('Must set --global_batch_size.')
workload = _make_one_batch_workload(workload_class,
workload_name,
framework,
global_batch_size,
use_fake_input_queue,
n_gpus)
# Get a sample hyperparameter setting.
hyperparameters = {}
if search_space_path != 'None':
with open(search_space_path, 'r', encoding='UTF-8') as search_space_file:
hyperparameters = halton.generate_search(
json.load(search_space_file), num_trials=1)[0]
rng = prng.PRNGKey(0)
data_rng, opt_init_rng, model_init_rng, rng = prng.split(rng, 4)
input_queue = workload._build_input_queue(
data_rng, 'train', data_dir=data_dir, global_batch_size=global_batch_size)
model_params, model_state = workload.init_model_fn(model_init_rng)
optimizer_state = init_optimizer_state(workload,
model_params,
model_state,
hyperparameters,
opt_init_rng)
if USE_PYTORCH_DDP:
torch.cuda.empty_cache()
dist.barrier()
for global_step in range(FLAGS.num_train_steps):
step_rng = prng.fold_in(rng, global_step)
data_select_rng, update_rng, eval_rng = prng.split(step_rng, 3)
batch = data_selection(workload,
input_queue,
optimizer_state,
model_params,
model_state,
hyperparameters,
global_step,
data_select_rng)
optimizer_state, model_params, model_state = update_params(
workload=workload,
current_param_container=model_params,
current_params_types=workload.model_params_types,
model_state=model_state,
hyperparameters=hyperparameters,
batch=batch,
loss_type=workload.loss_type,
optimizer_state=optimizer_state,
eval_results=[],
global_step=global_step,
rng=update_rng)
eval_result = workload.eval_model(
global_batch_size,
model_params,
model_state,
eval_rng,
data_dir,
imagenet_v2_data_dir=None,
global_step=global_step)
_ = workload.eval_model(
global_batch_size,
model_params,
model_state,
eval_rng,
data_dir,
imagenet_v2_data_dir=None,
global_step=global_step)
return eval_result
def _make_paths(repo_location, framework, workload_name):
if '_' in workload_name:
dataset_name = workload_name.split('_')[0]
else:
dataset_name = workload_name
workload_dir = (
f'{repo_location}/reference_algorithms/development_algorithms/'
f'{workload_name}')
search_space_path = f'{workload_dir}/tuning_search_space.json'
submission_path = (f'reference_algorithms/development_algorithms/'
f'{workload_name}/{dataset_name}_{framework}/'
'submission.py')
full_submission_path = f'{repo_location}/{submission_path}'
if not os.path.exists(full_submission_path):
return None, None
return search_space_path, submission_path
class ReferenceSubmissionTest(absltest.TestCase):
"""Tests for reference submissions."""
def _assert_eval_result(self, workload_name, eval_result):
expected_names = _EXPECTED_METRIC_NAMES[workload_name]
actual_names = list(eval_result.keys())
for expected_name in expected_names:
self.assertIn(expected_name, actual_names)
def test_submission(self):
profiler = PassThroughProfiler()
# Example: /home/znado/algorithmic-efficiency/tests
self_location = os.path.dirname(os.path.realpath(__file__))
# Example: /home/znado/algorithmic-efficiency
repo_location = '/'.join(self_location.split('/')[:-1])
if FLAGS.tuning_ruleset != 'external':
raise ValueError('--tuning_ruleset must be set to "external".')
if FLAGS.all:
if FLAGS.submission_path:
raise ValueError('Cannot set --submission_path and --all.')
if FLAGS.tuning_search_space:
raise ValueError('Cannot set --tuning_search_space and --all.')
references_dir = (
f'{repo_location}/reference_algorithms/development_algorithms')
for workload_name in os.listdir(references_dir):
for framework in ['jax', 'pytorch']:
if framework == 'pytorch':
pytorch_utils.pytorch_init(USE_PYTORCH_DDP, RANK, profiler)
# First jax operation has to be called after pytorch_init.
n_gpus = max(N_GPUS, jax.local_device_count())
search_space_path, submission_path = _make_paths(
repo_location, framework, workload_name)
if search_space_path is None:
continue
eval_result = _test_submission(
workload_name,
framework,
submission_path,
search_space_path,
data_dir=FLAGS.data_dir,
use_fake_input_queue=FLAGS.use_fake_input_queue,
n_gpus=n_gpus)
self._assert_eval_result(workload_name, eval_result)
else:
framework = FLAGS.framework
if framework == 'pytorch':
pytorch_utils.pytorch_init(USE_PYTORCH_DDP, RANK, profiler)
# First jax operation has to be called after pytorch_init.
n_gpus = max(N_GPUS, jax.local_device_count())
workload_name = FLAGS.workload
if FLAGS.submission_path and FLAGS.tuning_search_space:
search_space_path = FLAGS.tuning_search_space
submission_path = FLAGS.submission_path
else:
search_space_path, submission_path = _make_paths(
repo_location, framework, workload_name)
eval_result = _test_submission(
workload_name,
framework,
submission_path,
search_space_path,
data_dir=FLAGS.data_dir,
use_fake_input_queue=FLAGS.use_fake_input_queue,
n_gpus=n_gpus)
self._assert_eval_result(workload_name, eval_result)
if USE_PYTORCH_DDP:
# cleanup
dist.destroy_process_group()
if __name__ == '__main__':
absltest.main()
|
"""Tests for submission_runner.py.
This is an end-to-end test for MNIST in PyTorch and Jax that requires the
dataset to be available. For testing the workload and reference submission code
for all workloads, see reference_algorithm_tests.py.
"""
import copy
import os
import sys
from absl import flags
from absl import logging
from absl.testing import absltest
from absl.testing import parameterized
from algorithmic_efficiency.profiler import PassThroughProfiler
import submission_runner
FLAGS = flags.FLAGS
# Needed to avoid UnparsedFlagAccessError
# (see https://github.com/google/model_search/pull/8).
FLAGS(sys.argv)
_MNIST_DEV_ALGO_DIR = 'reference_algorithms/development_algorithms/mnist'
class SubmissionRunnerTest(parameterized.TestCase):
"""Tests for reference submissions."""
@parameterized.named_parameters(
dict(
testcase_name='mnist_jax',
workload='mnist',
framework='jax',
submission_path=(f'{_MNIST_DEV_ALGO_DIR}/mnist_jax/submission.py'),
tuning_search_space=(
f'{_MNIST_DEV_ALGO_DIR}/tuning_search_space.json')),
dict(
testcase_name='mnist_pytorch',
workload='mnist',
framework='pytorch',
submission_path=(
f'{_MNIST_DEV_ALGO_DIR}/mnist_pytorch/submission.py'),
tuning_search_space=(
f'{_MNIST_DEV_ALGO_DIR}/tuning_search_space.json')),
)
def test_submission(self,
workload,
framework,
submission_path,
tuning_search_space):
FLAGS.framework = framework
workload_metadata = copy.deepcopy(submission_runner.WORKLOADS[workload])
workload_metadata['workload_path'] = os.path.join(
submission_runner.BASE_WORKLOADS_DIR,
workload_metadata['workload_path'] + '_' + framework,
'workload.py')
workload_obj = submission_runner.import_workload(
workload_path=workload_metadata['workload_path'],
workload_class_name=workload_metadata['workload_class_name'],
workload_init_kwargs={})
score = submission_runner.score_submission_on_workload(
workload_obj,
workload,
submission_path,
data_dir='~/tensorflow_datasets', # The default in TFDS.
tuning_ruleset='external',
tuning_search_space=tuning_search_space,
num_tuning_trials=1,
profiler=PassThroughProfiler(),
max_global_steps=500,
)
logging.info(score)
def test_convert_filepath_to_module(self):
"""Sample test for the `convert_filepath_to_module` function."""
test_path = os.path.abspath(__file__)
module_path = submission_runner.convert_filepath_to_module(test_path)
self.assertNotIn('.py', module_path)
self.assertNotIn('/', module_path)
self.assertIsInstance(module_path, str)
if __name__ == '__main__':
absltest.main()
|
from collections import Counter
import pprint
def jax_like_pytorch_statedict(model, state_dict, keys=None):
if keys is None:
keys = []
c = Counter()
children = list(model.children())
for k, v in model.named_parameters():
if '.' not in k:
state_dict[(*keys, k)] = v
for i in children:
num_params = sum(p.numel() for p in i.parameters() if p.requires_grad)
if num_params != 0:
name = i.__class__.__name__
k = f'{name}_{c[name]}'
c[name] += 1
jax_like_pytorch_statedict(i, state_dict, keys + [k])
def flatten(jm, ret, keys=None):
if keys is None:
keys = []
for k in jm:
if isinstance(jm[k], dict):
flatten(jm[k], ret, keys + [k])
else:
ret[tuple(keys + [k])] = jm[k]
def value_transform(k, value, jax_value):
k_str = ''.join(k).lower()
if ('conv' in k_str and 'kernel' in k_str) or \
('embedding' in k_str and 'kernel' in k_str):
if 'transpose' in k_str:
# Assumes 2D ConvTranspose with stride equal to kernel_size.
return value.reshape(value.shape[0], value.shape[1],
-1).flip(-1).permute(2, 0,
1).reshape(*jax_value.shape)
else:
rank = len(value.shape)
if rank == 3:
value = value.permute(2, 1, 0)
elif rank == 4:
value = value.permute(2, 3, 1, 0)
elif rank == 2:
value = value.t()
elif 'attention' in k_str and 'kernel' in k_str:
value = value.t().reshape(*list(jax_value.shape))
elif 'attention' in k_str and 'bias' in k_str:
value = value.reshape(*list(jax_value.shape))
elif ('dense' in k_str and 'kernel' in k_str) or \
('lstm' in k_str and 'kernel' in k_str) or \
('head' in k_str and 'kernel' in k_str) or \
('pre_logits' in k_str and 'kernel' in k_str):
value = value.t()
return value
class Torch2Jax:
def __init__(self, torch_model, jax_model):
self.torch_model = torch_model
self.jax_model = jax_model
self.pytorch_sd = {}
jax_like_pytorch_statedict(torch_model, self.pytorch_sd)
self.flattened_jax_model = {}
flatten(jax_model, self.flattened_jax_model)
def key_transform(self, k_transform_fn):
self.pytorch_sd = {
k_transform_fn(k): self.pytorch_sd[k] for k in self.pytorch_sd
}
def value_transform(self, v_transform_fn):
self.pytorch_sd = {
k: v_transform_fn(k, self.pytorch_sd[k], self.flattened_jax_model[k])
for k in self.pytorch_sd
}
def sd_transform(self, sd_transform_fn):
self.pytorch_sd = sd_transform_fn(self.pytorch_sd)
def diff(self):
j_p = set(self.flattened_jax_model.keys()) - set(self.pytorch_sd.keys())
p_j = set(self.pytorch_sd.keys()) - set(self.flattened_jax_model.keys())
pj = set(self.pytorch_sd.keys()) & set(self.flattened_jax_model.keys())
print(f'Keys in jax but not in pytorch: {len(j_p)}')
pprint.pprint(sorted(list(j_p)))
print(f'Keys in pytorch but not in jax: {len(p_j)}')
pprint.pprint(sorted(list(p_j)))
print(f'Common keys: {len(pj)}')
if len(pj) == len(self.pytorch_sd):
count = 0
for k in self.pytorch_sd:
s_p = list(self.pytorch_sd[k].shape)
s_j = list(self.flattened_jax_model[k].shape)
if s_p == s_j:
count += 1
else:
print(k, s_p, s_j)
print(f'Number of values with identical shapes: {count}')
def update_jax_model(self):
for k in self.flattened_jax_model:
d = self.jax_model
for i in k[:-1]:
d = d[i]
d[k[-1]] = self.pytorch_sd[k].detach().cpu().numpy()
|
import torch
from algorithmic_efficiency import spec
from reference_algorithms.target_setting_algorithms.data_selection import \
data_selection # pylint: disable=unused-import
from reference_algorithms.target_setting_algorithms.pytorch_submission_base import \
update_params # pylint: disable=unused-import
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates a Vanilla SGD Optimizer."""
del model_state
del rng
optimizer_state = {
'optimizer':
torch.optim.SGD(model_params.parameters(), lr=0.001, weight_decay=0),
}
return optimizer_state
|
from flax import jax_utils
import jax
import numpy as np
import torch
from tests.modeldiffs.torch2jax_utils import Torch2Jax
from tests.modeldiffs.torch2jax_utils import value_transform
#pylint: disable=dangerous-default-value
def torch2jax(jax_workload,
pytorch_workload,
key_transform=None,
sd_transform=None,
init_kwargs=dict(dropout_rate=0.0, aux_dropout_rate=0.0)):
jax_params, model_state = jax_workload.init_model_fn(jax.random.PRNGKey(0),
**init_kwargs)
pytorch_model, _ = pytorch_workload.init_model_fn([0], **init_kwargs)
jax_params = jax_utils.unreplicate(jax_params).unfreeze()
if model_state is not None:
model_state = jax_utils.unreplicate(model_state)
if isinstance(
pytorch_model,
(torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel)):
pytorch_model = pytorch_model.module
# Map and copy params of pytorch_model to jax_model.
t2j = Torch2Jax(torch_model=pytorch_model, jax_model=jax_params)
if key_transform is not None:
t2j.key_transform(key_transform)
if sd_transform is not None:
t2j.sd_transform(sd_transform)
t2j.value_transform(value_transform)
t2j.diff()
t2j.update_jax_model()
return jax_params, model_state, pytorch_model
def out_diff(jax_workload,
pytorch_workload,
jax_model_kwargs,
pytorch_model_kwargs,
key_transform=None,
sd_transform=None,
out_transform=None):
jax_params, model_state, pytorch_model = torch2jax(jax_workload,
pytorch_workload,
key_transform,
sd_transform)
out_p, _ = pytorch_workload.model_fn(params=pytorch_model,
**pytorch_model_kwargs)
out_j, _ = jax_workload.model_fn(params=jax_params,
model_state=model_state,
**jax_model_kwargs)
if out_transform is not None:
out_p = out_transform(out_p)
out_j = out_transform(out_j)
print(np.abs(out_p.detach().numpy() - np.array(out_j)).max())
print(np.abs(out_p.detach().numpy() - np.array(out_j)).min())
|
from flax import jax_utils
import jax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
from reference_algorithms.target_setting_algorithms.data_selection import \
data_selection # pylint: disable=unused-import
from reference_algorithms.target_setting_algorithms.jax_submission_base import \
update_params # pylint: disable=unused-import
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates a Vanilla SGD Optimizer."""
del model_params
del model_state
del rng
# Create optimizer.
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
opt_init_fn, opt_update_fn = optax.sgd(learning_rate=0.001)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
|
import os
# Disable GPU access for both jax and pytorch.
os.environ['CUDA_VISIBLE_DEVICES'] = ''
import jax
import torch
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_jax.workload import \
LibriSpeechDeepSpeechWorkload as JaxWorkload
from algorithmic_efficiency.workloads.librispeech_deepspeech.librispeech_pytorch.workload import \
LibriSpeechDeepSpeechWorkload as PytWorkload
from tests.modeldiffs.diff import out_diff
def key_transform(k):
new_key = []
bn = False
for i in k:
bn = bn or 'BatchNorm' in i
if 'ModuleList' in i:
continue
if 'CustomBatchNorm' in i:
continue
if 'Linear' in i:
if 'NonDynamicallyQuantizableLinear' in i:
i = 'out'
else:
i = i.replace('Linear', 'Dense')
elif 'Conv1d' in i:
i = i.replace('Conv1d', 'Conv')
elif 'MHSAwithQS' in i:
i = i.replace('MHSAwithQS', 'SelfAttention')
elif 'LSTM' in i:
i = i.replace('LSTM', 'CudnnLSTM')
elif 'weight' in i:
if bn:
i = i.replace('weight', 'scale')
else:
i = i.replace('weight', 'kernel')
new_key.append(i)
return tuple(new_key)
def sd_transform(sd):
# pylint: disable=locally-disabled, modified-iterating-dict, consider-using-dict-items
out = {}
for k in sd:
if 'Attention' in ''.join(k):
if 'in_proj' in k[-1]:
new_key = k[:-1]
chunks = sd[k].chunk(3)
for t, c in zip(['query', 'key', 'value'], chunks):
out[new_key + (t, k[-1].split('_')[-1])] = c
else:
out[k] = sd[k]
elif 'LSTM' in ''.join(k):
l = out.get(k[:-1], dict())
l[k[-1]] = sd[k]
out[k[:-1]] = l
else:
out[k] = sd[k]
keys_to_del = []
updates = dict()
for k in out:
if isinstance(out[k], dict):
kernels = ['kernel_ih_l0', 'kernel_hh_l0']
biases = ['bias_ih_l0', 'bias_hh_l0']
weights = torch.cat([out[k][i].view(-1) for i in kernels] +
[out[k][i + '_reverse'].view(-1) for i in kernels] +
[out[k][i].view(-1) for i in biases] +
[out[k][i + '_reverse'].view(-1) for i in biases])
updates[k + ('weights',)] = weights
keys_to_del.append(k)
out.update(updates)
for k in keys_to_del:
del out[k]
return out
if __name__ == '__main__':
# pylint: disable=locally-disabled, not-callable
jax_workload = JaxWorkload()
pytorch_workload = PytWorkload()
# Test outputs for identical weights and inputs.
wave = torch.randn(2, 320000)
pad = torch.zeros_like(wave)
pad[0, 200000:] = 1
jax_batch = {'inputs': (wave.detach().numpy(), pad.detach().numpy())}
pyt_batch = {'inputs': (wave, pad)}
pytorch_model_kwargs = dict(
augmented_and_preprocessed_input_batch=pyt_batch,
model_state=None,
mode=spec.ForwardPassMode.EVAL,
rng=None,
update_batch_norm=False)
jax_model_kwargs = dict(
augmented_and_preprocessed_input_batch=jax_batch,
mode=spec.ForwardPassMode.EVAL,
rng=jax.random.PRNGKey(0),
update_batch_norm=False)
out_diff(
jax_workload=jax_workload,
pytorch_workload=pytorch_workload,
jax_model_kwargs=jax_model_kwargs,
pytorch_model_kwargs=pytorch_model_kwargs,
key_transform=key_transform,
sd_transform=sd_transform,
out_transform=lambda out_outpad: out_outpad[0] *
(1 - out_outpad[1][:, :, None]))
|
import os
# Disable GPU access for both jax and pytorch.
os.environ['CUDA_VISIBLE_DEVICES'] = ''
import jax
import torch
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.wmt.wmt_jax.workload import \
WmtWorkload as JaxWorkload
from algorithmic_efficiency.workloads.wmt.wmt_pytorch.workload import \
WmtWorkload as PytWorkload
from tests.modeldiffs.diff import out_diff
def key_transform(k):
new_key = []
for i in k:
if 'ModuleList' in i or\
'TransformerDecoder_' in i or\
'TransformerEncoder_' in i:
continue
if 'Linear' in i:
if 'NonDynamicallyQuantizableLinear' in i:
i = 'out'
else:
i = i.replace('Linear', 'Dense')
elif i == 'Decoder_0':
i = 'decoder'
elif i == 'Encoder_0':
i = 'encoder'
elif 'TransformerEncoderLayer' in i:
i = i.replace('TransformerEncoderLayer', 'encoderblock')
elif 'TransformerDecoderLayer' in i:
i = i.replace('TransformerDecoderLayer', 'encoderdecoderblock')
elif 'MultiheadAttention' in i:
i = i.replace('MultiheadAttention', 'SelfAttention')
elif 'weight' in i:
i = i.replace('weight', 'kernel')
new_key.append(i)
return tuple(new_key)
def sd_transform(sd):
out = {}
for k in sd:
k_str = ''.join(k)
if 'Dense' in k_str:
new_key = (*k[:2], 'MlpBlock_0', *k[2:])
out[new_key] = sd[k]
elif 'SelfAttention' in k_str:
new_key = list(k)
if '_' in new_key[-1]:
qkv = {'q': 'query', 'k': 'key', 'v': 'value'}[new_key[-1][0]]
new_key[-1] = qkv
new_key.append('kernel')
new_key = [
i if i != 'SelfAttention_1' else 'MultiHeadDotProductAttention_0'
for i in new_key
]
new_key = tuple(new_key)
out[new_key] = sd[k]
elif 'LayerNorm' in k_str:
new_key = list(k)
if len(k) == 3:
if k[0] == 'encoder':
new_key[1] = 'encoder_layernorm'
else:
new_key[1] = 'encoderdecoder_layernorm'
if k[-1] == 'kernel':
new_key[-1] = 'scale'
new_key = tuple(new_key)
out[new_key] = sd[k]
elif 'Embedding' in k_str:
new_key = ('shared_embedding', 'embedding')
out[new_key] = sd[k]
else:
out[k] = sd[k]
return out
if __name__ == '__main__':
# pylint: disable=locally-disabled, not-callable
jax_workload = JaxWorkload()
pytorch_workload = PytWorkload()
# Test outputs for identical weights and inputs.
inp_tokens = torch.randint(low=0, high=32000, size=(2, 256))
tgt_tokens = torch.randint(low=0, high=32000, size=(2, 256))
jax_batch = {
'inputs': inp_tokens.detach().numpy(),
'targets': tgt_tokens.detach().numpy(),
}
pyt_batch = {'inputs': inp_tokens, 'targets': tgt_tokens}
pytorch_model_kwargs = dict(
augmented_and_preprocessed_input_batch=pyt_batch,
model_state=None,
mode=spec.ForwardPassMode.EVAL,
rng=None,
update_batch_norm=False)
jax_model_kwargs = dict(
augmented_and_preprocessed_input_batch=jax_batch,
mode=spec.ForwardPassMode.EVAL,
rng=jax.random.PRNGKey(0),
update_batch_norm=False)
out_diff(
jax_workload=jax_workload,
pytorch_workload=pytorch_workload,
jax_model_kwargs=jax_model_kwargs,
pytorch_model_kwargs=pytorch_model_kwargs,
key_transform=key_transform,
sd_transform=sd_transform,
out_transform=None)
|
import os
# Disable GPU access for both jax and pytorch.
os.environ['CUDA_VISIBLE_DEVICES'] = ''
import jax
import torch
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.imagenet_vit.imagenet_jax.workload import \
ImagenetVitWorkload as JaxWorkload
from algorithmic_efficiency.workloads.imagenet_vit.imagenet_pytorch.workload import \
ImagenetVitWorkload as PytWorkload
from tests.modeldiffs.diff import out_diff
def key_transform(k):
if 'Conv' in k[0]:
k = ('embedding', *k[1:])
elif k[0] == 'Linear_0':
k = ('pre_logits', *k[1:])
elif k[0] == 'Linear_1':
k = ('head', *k[1:])
new_key = []
bn = False
attention = False
ln = False
enc_block = False
for idx, i in enumerate(k):
bn = bn or 'BatchNorm' in i
ln = ln or 'LayerNorm' in i
attention = attention or 'SelfAttention' in i
if 'ModuleList' in i or 'Sequential' in i:
continue
if 'CustomBatchNorm' in i:
continue
if 'Linear' in i:
if attention:
i = {
'Linear_0': 'query',
'Linear_1': 'key',
'Linear_2': 'value',
'Linear_3': 'out',
}[i]
else:
i = i.replace('Linear', 'Dense')
elif 'Conv2d' in i:
i = i.replace('Conv2d', 'Conv')
elif 'Encoder1DBlock' in i:
i = i.replace('Encoder1DBlock', 'encoderblock')
enc_block = True
elif 'Encoder' in i:
i = 'Transformer'
elif enc_block and 'SelfAttention' in i:
i = 'MultiHeadDotProductAttention_1'
elif enc_block and i == 'LayerNorm_1':
i = 'LayerNorm_2'
elif enc_block and 'MlpBlock' in i:
i = 'MlpBlock_3'
elif idx == 1 and i == 'LayerNorm_0':
i = 'encoder_layernorm'
elif 'weight' in i:
if bn or ln:
i = i.replace('weight', 'scale')
else:
i = i.replace('weight', 'kernel')
new_key.append(i)
return tuple(new_key)
sd_transform = None
if __name__ == '__main__':
# pylint: disable=locally-disabled, not-callable
jax_workload = JaxWorkload()
pytorch_workload = PytWorkload()
# Test outputs for identical weights and inputs.
image = torch.randn(2, 3, 224, 224)
jax_batch = {'inputs': image.permute(0, 2, 3, 1).detach().numpy()}
pyt_batch = {'inputs': image}
pytorch_model_kwargs = dict(
augmented_and_preprocessed_input_batch=pyt_batch,
model_state=None,
mode=spec.ForwardPassMode.EVAL,
rng=None,
update_batch_norm=False)
jax_model_kwargs = dict(
augmented_and_preprocessed_input_batch=jax_batch,
mode=spec.ForwardPassMode.EVAL,
rng=jax.random.PRNGKey(0),
update_batch_norm=False)
out_diff(
jax_workload=jax_workload,
pytorch_workload=pytorch_workload,
jax_model_kwargs=jax_model_kwargs,
pytorch_model_kwargs=pytorch_model_kwargs,
key_transform=key_transform,
sd_transform=None,
)
|
import os
# Disable GPU access for both jax and pytorch.
os.environ['CUDA_VISIBLE_DEVICES'] = ''
import jax
import torch
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_jax.workload import \
LibriSpeechConformerWorkload as JaxWorkload
from algorithmic_efficiency.workloads.librispeech_conformer.librispeech_pytorch.workload import \
LibriSpeechConformerWorkload as PytWorkload
from tests.modeldiffs.diff import out_diff
def key_transform(k):
new_key = []
for i in k:
if 'ModuleList' in i:
continue
if 'Linear' in i:
if 'NonDynamicallyQuantizableLinear' in i:
i = 'out'
else:
i = i.replace('Linear', 'Dense')
elif 'Conv1d' in i:
i = i.replace('Conv1d', 'Conv')
elif 'MHSAwithQS' in i:
i = i.replace('MHSAwithQS', 'SelfAttention')
elif 'weight' in i:
i = i.replace('weight', 'kernel')
new_key.append(i)
return tuple(new_key)
def sd_transform(sd):
out = {}
for k in sd:
if 'Attention' in ''.join(k):
if 'in_proj' in k[-1]:
new_key = k[:-1]
chunks = sd[k].chunk(3)
for t, c in zip(['query', 'key', 'value'], chunks):
out[new_key + (t, k[-1].split('_')[-1])] = c
else:
out[k] = sd[k]
else:
out[k] = sd[k]
return out
if __name__ == '__main__':
# pylint: disable=locally-disabled, not-callable
jax_workload = JaxWorkload()
pytorch_workload = PytWorkload()
# Test outputs for identical weights and inputs.
wave = torch.randn(2, 320000)
pad = torch.zeros_like(wave)
pad[0, 200000:] = 1
jax_batch = {'inputs': (wave.detach().numpy(), pad.detach().numpy())}
pyt_batch = {'inputs': (wave, pad)}
pytorch_model_kwargs = dict(
augmented_and_preprocessed_input_batch=pyt_batch,
model_state=None,
mode=spec.ForwardPassMode.EVAL,
rng=None,
update_batch_norm=False)
jax_model_kwargs = dict(
augmented_and_preprocessed_input_batch=jax_batch,
mode=spec.ForwardPassMode.EVAL,
rng=jax.random.PRNGKey(0),
update_batch_norm=False)
out_diff(
jax_workload=jax_workload,
pytorch_workload=pytorch_workload,
jax_model_kwargs=jax_model_kwargs,
pytorch_model_kwargs=pytorch_model_kwargs,
key_transform=key_transform,
sd_transform=sd_transform,
out_transform=lambda out_outpad: out_outpad[0] *
(1 - out_outpad[1][:, :, None]))
|
import os
# Disable GPU access for both jax and pytorch.
os.environ['CUDA_VISIBLE_DEVICES'] = ''
import jax
import jraph
import numpy as np
import torch
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.ogbg.ogbg_jax.workload import \
OgbgWorkload as JaxWorkload
from algorithmic_efficiency.workloads.ogbg.ogbg_pytorch.workload import \
OgbgWorkload as PytWorkload
from tests.modeldiffs.diff import out_diff
def key_transform(k):
new_key = []
bn = False
ln = False
for i in k:
bn = bn or 'BatchNorm' in i
ln = ln or 'LayerNorm' in i
if 'ModuleList' in i:
continue
if 'CustomBatchNorm' in i:
continue
if 'Linear' in i:
if 'NonDynamicallyQuantizableLinear' in i:
i = 'out'
else:
i = i.replace('Linear', 'Dense')
elif 'Conv1d' in i:
i = i.replace('Conv1d', 'Conv')
elif 'MHSAwithQS' in i:
i = i.replace('MHSAwithQS', 'SelfAttention')
elif 'weight' in i:
if bn or ln:
i = i.replace('weight', 'scale')
else:
i = i.replace('weight', 'kernel')
new_key.append(i)
return tuple(new_key)
def sd_transform(sd):
# pylint: disable=locally-disabled, modified-iterating-dict, consider-using-dict-items
keys = list(sd.keys())
out = {}
for k in keys:
new_key = k
if len(k) == 5:
_, gn_id, seq_id = k[:3]
gn_id = int(gn_id.split('_')[1])
seq_id = int(seq_id.split('_')[1])
if 'LayerNorm' in k[3]:
new_key = (k[3].replace('0', f'{gn_id*3+seq_id}'), k[4])
else:
new_key = (k[3].replace('0', f'{gn_id*3+seq_id+2}'), k[4])
elif len(k) == 2 and k[0] == 'Dense_2':
new_key = ('Dense_17', k[1])
out[new_key] = sd[k]
return out
if __name__ == '__main__':
# pylint: disable=locally-disabled, not-callable
jax_workload = JaxWorkload()
pytorch_workload = PytWorkload()
pyt_batch = dict(
n_node=torch.LongTensor([5]),
n_edge=torch.LongTensor([5]),
nodes=torch.randn(5, 9),
edges=torch.randn(5, 3),
globals=torch.randn(1, 128),
senders=torch.LongTensor(list(range(5))),
receivers=torch.LongTensor([(i + 1) % 5 for i in range(5)]))
jax_batch = {k: np.array(v) for k, v in pyt_batch.items()}
# Test outputs for identical weights and inputs.
graph_j = jraph.GraphsTuple(**jax_batch)
graph_p = jraph.GraphsTuple(**pyt_batch)
jax_batch = {'inputs': graph_j}
pyt_batch = {'inputs': graph_p}
pytorch_model_kwargs = dict(
augmented_and_preprocessed_input_batch=pyt_batch,
model_state=None,
mode=spec.ForwardPassMode.EVAL,
rng=None,
update_batch_norm=False)
jax_model_kwargs = dict(
augmented_and_preprocessed_input_batch=jax_batch,
mode=spec.ForwardPassMode.EVAL,
rng=jax.random.PRNGKey(0),
update_batch_norm=False)
out_diff(
jax_workload=jax_workload,
pytorch_workload=pytorch_workload,
jax_model_kwargs=jax_model_kwargs,
pytorch_model_kwargs=pytorch_model_kwargs,
key_transform=key_transform,
sd_transform=sd_transform,
out_transform=None)
|
import os
# Disable GPU access for both jax and pytorch.
os.environ['CUDA_VISIBLE_DEVICES'] = ''
import jax
import torch
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax.workload import \
ImagenetResNetWorkload as JaxWorkload
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_pytorch.workload import \
ImagenetResNetWorkload as PytWorkload
from tests.modeldiffs.diff import out_diff
def key_transform(k):
new_key = []
bn = False
for i in k:
bn = bn or 'BatchNorm' in i
if 'ModuleList' in i:
continue
if 'Linear' in i:
if 'NonDynamicallyQuantizableLinear' in i:
i = 'out'
else:
i = i.replace('Linear', 'Dense')
elif 'Conv2d' in i:
i = i.replace('Conv2d', 'Conv')
elif 'BatchNorm2d' in i:
i = i.replace('BatchNorm2d', 'BatchNorm')
elif 'MHSAwithQS' in i:
i = i.replace('MHSAwithQS', 'SelfAttention')
elif 'weight' in i:
if bn:
i = i.replace('weight', 'scale')
else:
i = i.replace('weight', 'kernel')
new_key.append(i)
return tuple(new_key)
def sd_transform(sd):
# pylint: disable=locally-disabled, consider-using-generator
keys = sorted(sd.keys())
c = -1
prev = None
for k in keys:
if 'Bottleneck' in ''.join(k):
if prev is None or prev != k[:2]:
prev = k[:2]
c += 1
new_key = (f'BottleneckResNetBlock_{c}',) + k[2:]
if 'Sequential' in ''.join(new_key):
new_key = tuple([
(i.replace('_0', '_proj') if 'BatchNorm' in i or 'Conv' in i else i)
for i in new_key
if 'Sequential' not in i
])
sd[new_key] = sd[k]
del sd[k]
elif 'BatchNorm' in k[0] or 'Conv' in k[0]:
new_key = (k[0].replace('_0', '_init'), *k[1:])
sd[new_key] = sd[k]
del sd[k]
return sd
if __name__ == '__main__':
# pylint: disable=locally-disabled, not-callable
jax_workload = JaxWorkload()
pytorch_workload = PytWorkload()
# Test outputs for identical weights and inputs.
image = torch.randn(2, 3, 224, 224)
jax_batch = {'inputs': image.permute(0, 2, 3, 1).detach().numpy()}
pyt_batch = {'inputs': image}
pytorch_model_kwargs = dict(
augmented_and_preprocessed_input_batch=pyt_batch,
model_state=None,
mode=spec.ForwardPassMode.EVAL,
rng=None,
update_batch_norm=False)
jax_model_kwargs = dict(
augmented_and_preprocessed_input_batch=jax_batch,
mode=spec.ForwardPassMode.EVAL,
rng=jax.random.PRNGKey(0),
update_batch_norm=False)
out_diff(
jax_workload=jax_workload,
pytorch_workload=pytorch_workload,
jax_model_kwargs=jax_model_kwargs,
pytorch_model_kwargs=pytorch_model_kwargs,
key_transform=key_transform,
sd_transform=sd_transform,
)
|
import os
# Disable GPU access for both jax and pytorch.
os.environ['CUDA_VISIBLE_DEVICES'] = ''
import jax
import numpy as np
import torch
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_jax.workload import \
Criteo1TbDlrmSmallWorkload as JaxWorkload
from algorithmic_efficiency.workloads.criteo1tb.criteo1tb_pytorch.workload import \
Criteo1TbDlrmSmallWorkload as PytWorkload
from tests.modeldiffs.diff import out_diff
def key_transform(k):
new_key = []
s_count = None
for i in k:
if 'Sequential' in i:
s_count = int(i.split('_')[1])
continue
if 'Embedding' in i:
return ('embedding_table',)
if 'Linear' in i:
i = i.replace('Linear', 'Dense')
name, count = i.split('_')
i = name + '_' + str(s_count * 3 + int(count))
elif 'weight' in i:
i = i.replace('weight', 'kernel')
new_key.append(i)
return tuple(new_key)
sd_transform = None
if __name__ == '__main__':
# pylint: disable=locally-disabled, not-callable
jax_workload = JaxWorkload()
pytorch_workload = PytWorkload()
pyt_batch = {
'inputs': torch.ones((2, 13 + 26)),
'targets': torch.randint(low=0, high=1, size=(2,)),
'weights': torch.ones(2),
}
jax_batch = {k: np.array(v) for k, v in pyt_batch.items()}
# Test outputs for identical weights and inputs.
pytorch_model_kwargs = dict(
augmented_and_preprocessed_input_batch=pyt_batch,
model_state=None,
mode=spec.ForwardPassMode.EVAL,
rng=None,
update_batch_norm=False)
jax_model_kwargs = dict(
augmented_and_preprocessed_input_batch=jax_batch,
mode=spec.ForwardPassMode.EVAL,
rng=jax.random.PRNGKey(0),
update_batch_norm=False)
out_diff(
jax_workload=jax_workload,
pytorch_workload=pytorch_workload,
jax_model_kwargs=jax_model_kwargs,
pytorch_model_kwargs=pytorch_model_kwargs,
key_transform=key_transform,
sd_transform=sd_transform,
out_transform=None)
|
import os
# Disable GPU access for both jax and pytorch.
os.environ['CUDA_VISIBLE_DEVICES'] = ''
import jax
import torch
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.fastmri.fastmri_jax.workload import \
FastMRIWorkload as JaxWorkload
from algorithmic_efficiency.workloads.fastmri.fastmri_pytorch.workload import \
FastMRIWorkload as PytWorkload
from tests.modeldiffs.diff import out_diff
def sd_transform(sd):
def sort_key(k):
if k[0] == 'ModuleList_0':
return (0, *k)
if k[0] == 'ConvBlock_0':
return (1, *k)
if k[0] == 'ModuleList_1':
return (2, *k)
if k[0] == 'ModuleList_2':
return (3, *k)
keys = sorted(sd.keys(), key=sort_key)
c = 0
for idx, k in enumerate(keys):
new_key = []
for idx2, i in enumerate(k):
if 'ModuleList' in i or 'Sequential' in i:
continue
if i.startswith('ConvBlock'):
if idx != 0 and keys[idx - 1][:idx2 + 1] != k[:idx2 + 1]:
c += 1
i = f'ConvBlock_{c}'
if 'Conv2d' in i:
i = i.replace('Conv2d', 'Conv')
if 'ConvTranspose2d' in i:
i = i.replace('ConvTranspose2d', 'ConvTranspose')
if 'weight' in i:
i = i.replace('weight', 'kernel')
new_key.append(i)
new_key = tuple(new_key)
sd[new_key] = sd[k]
del sd[k]
return sd
key_transform = None
if __name__ == '__main__':
# pylint: disable=locally-disabled, not-callable
jax_workload = JaxWorkload()
pytorch_workload = PytWorkload()
# Test outputs for identical weights and inputs.
image = torch.randn(2, 320, 320)
jax_batch = {'inputs': image.detach().numpy()}
pyt_batch = {'inputs': image}
pytorch_model_kwargs = dict(
augmented_and_preprocessed_input_batch=pyt_batch,
model_state=None,
mode=spec.ForwardPassMode.EVAL,
rng=None,
update_batch_norm=False)
jax_model_kwargs = dict(
augmented_and_preprocessed_input_batch=jax_batch,
mode=spec.ForwardPassMode.EVAL,
rng=jax.random.PRNGKey(0),
update_batch_norm=False)
out_diff(
jax_workload=jax_workload,
pytorch_workload=pytorch_workload,
jax_model_kwargs=jax_model_kwargs,
pytorch_model_kwargs=pytorch_model_kwargs,
key_transform=None,
sd_transform=sd_transform,
)
|
"""Tests for imagenet_resnet/imagenet_jax/workload.py."""
from absl.testing import absltest
import jax
import jax.numpy as jnp
from algorithmic_efficiency import spec
from algorithmic_efficiency.workloads.imagenet_resnet.imagenet_jax.workload import \
ImagenetResNetWorkload
def _pytree_total_diff(pytree_a, pytree_b):
pytree_diff = jax.tree_map(lambda a, b: jnp.sum(a - b), pytree_a, pytree_b)
pytree_diff = jax.tree_util.tree_leaves(pytree_diff)
return jnp.sum(jnp.array(pytree_diff))
class ModelsTest(absltest.TestCase):
"""Tests for imagenet_resnet/imagenet_jax/workload.py."""
def test_forward_pass(self):
batch_size = 11
rng = jax.random.PRNGKey(0)
rng, model_init_rng, *data_rngs = jax.random.split(rng, 4)
workload = ImagenetResNetWorkload()
model_params, batch_stats = workload.init_model_fn(model_init_rng)
input_shape = (jax.local_device_count(), batch_size, 224, 224, 3)
first_input_batch = jax.random.normal(data_rngs[0], shape=input_shape)
expected_logits_shape = (jax.local_device_count(), batch_size, 1000)
# static_broadcasted_argnums=(3, 5) will recompile each time we call it in
# this function because we call it with a different combination of those two
# args each time. Can't call with kwargs.
pmapped_model_fn = jax.pmap(
workload.model_fn,
axis_name='batch',
in_axes=(0, 0, 0, None, None, None),
static_broadcasted_argnums=(3, 5))
logits, updated_batch_stats = pmapped_model_fn(
model_params,
{'inputs': first_input_batch},
batch_stats,
spec.ForwardPassMode.TRAIN,
rng,
True)
self.assertEqual(logits.shape, expected_logits_shape)
# Test that batch stats are updated.
self.assertNotEqual(
_pytree_total_diff(batch_stats, updated_batch_stats), 0.0)
second_input_batch = jax.random.normal(data_rngs[1], shape=input_shape)
# Test that batch stats are not updated when we say so.
_, same_batch_stats = pmapped_model_fn(
model_params,
{'inputs': second_input_batch},
updated_batch_stats,
spec.ForwardPassMode.TRAIN,
rng,
False)
self.assertEqual(
_pytree_total_diff(same_batch_stats, updated_batch_stats), 0.0)
# Test eval model.
logits, _ = pmapped_model_fn(
model_params,
{'inputs': second_input_batch},
batch_stats,
spec.ForwardPassMode.EVAL,
rng,
False)
self.assertEqual(logits.shape, expected_logits_shape)
if __name__ == '__main__':
absltest.main()
|
"""Update submission function in Jax."""
import functools
from typing import Dict, List, Tuple
import jax
from jax import lax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
_GRAD_CLIP_EPS = 1e-6
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, None, 0, 0, 0, 0, 0, None, None),
static_broadcasted_argnums=(0, 1),
donate_argnums=(2, 3, 4))
def pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
batch,
rng,
grad_clip,
label_smoothing):
def _loss_fn(params):
"""Loss function used for training."""
logits, new_model_state = workload.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.TRAIN,
rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(
label_batch=batch['targets'],
logits_batch=logits,
mask_batch=batch.get('weights'),
label_smoothing=label_smoothing)
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
return summed_loss, (n_valid_examples, new_model_state)
grad_fn = jax.value_and_grad(_loss_fn, has_aux=True)
(summed_loss, (n_valid_examples, new_model_state)), grad = grad_fn(
current_param_container)
# Get correct global mean loss and grad.
(summed_loss, n_valid_examples, grad) = lax.psum(
(summed_loss, n_valid_examples, grad), axis_name='batch')
loss = summed_loss / n_valid_examples
grad = jax.tree_map(lambda x: x / n_valid_examples, grad)
grad_norm = jnp.sqrt(
sum(jnp.sum(g**2) for g in jax.tree_util.tree_leaves(grad)))
if grad_clip is not None:
grad_scaling_factor = grad_clip / (grad_norm + _GRAD_CLIP_EPS)
grad_scaling_factor = jax.lax.clamp(min=0.0, x=grad_scaling_factor, max=1.0)
grad = jax.tree_map(lambda x: x * grad_scaling_factor, grad)
updates, new_optimizer_state = opt_update_fn(grad, optimizer_state,
current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_optimizer_state, updated_params, new_model_state, loss, grad_norm
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
optimizer_state, opt_update_fn = optimizer_state
per_device_rngs = jax.random.split(rng, jax.local_device_count())
if hasattr(hyperparameters, 'label_smoothing'):
label_smoothing = hyperparameters.label_smoothing
else:
label_smoothing = 0.0
if hasattr(hyperparameters, 'grad_clip'):
grad_clip = hyperparameters.grad_clip
else:
grad_clip = None
new_optimizer_state, new_params, new_model_state, loss, grad_norm = pmapped_train_step( # pylint: disable=line-too-long
workload, opt_update_fn, model_state, optimizer_state,
current_param_container, batch, per_device_rngs, grad_clip,
label_smoothing)
# Log loss, grad_norm.
if ((global_step <= 100 or global_step % 500 == 0) and
workload.metrics_logger is not None):
workload.metrics_logger.append_scalar_metrics(
{
'loss': loss[0],
'grad_norm': grad_norm[0],
}, global_step)
return (new_optimizer_state, opt_update_fn), new_params, new_model_state
|
"""Submission file for an AdamW optimizer with warmup+cosine LR in PyTorch."""
import torch
from algorithmic_efficiency import spec
from reference_algorithms.target_setting_algorithms import cosine_warmup
from reference_algorithms.target_setting_algorithms.data_selection import \
data_selection # pylint: disable=unused-import
from reference_algorithms.target_setting_algorithms.get_batch_size import \
get_batch_size # pylint: disable=unused-import
from reference_algorithms.target_setting_algorithms.pytorch_submission_base import \
update_params # pylint: disable=unused-import
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates an AdamW optimizer and a learning rate schedule."""
del model_state
del rng
epsilon = (
hyperparameters.epsilon if hasattr(hyperparameters, 'epsilon') else 1e-8)
optimizer_state = {
'optimizer':
torch.optim.AdamW(
model_params.parameters(),
lr=hyperparameters.learning_rate,
betas=(hyperparameters.beta1, hyperparameters.beta2),
eps=epsilon,
weight_decay=hyperparameters.weight_decay),
}
target_setting_step_hint = int(0.75 * workload.step_hint)
optimizer_state['scheduler'] = cosine_warmup.pytorch_cosine_warmup(
target_setting_step_hint, hyperparameters, optimizer_state['optimizer'])
return optimizer_state
|
"""Submission file for a NAdamW optimizer in PyTorch."""
import math
from typing import List
import torch
from torch import Tensor
from algorithmic_efficiency import spec
from reference_algorithms.target_setting_algorithms import cosine_warmup
from reference_algorithms.target_setting_algorithms.data_selection import \
data_selection # pylint: disable=unused-import
from reference_algorithms.target_setting_algorithms.get_batch_size import \
get_batch_size # pylint: disable=unused-import
from reference_algorithms.target_setting_algorithms.pytorch_submission_base import \
update_params # pylint: disable=unused-import
# Modified from github.com/pytorch/pytorch/blob/v1.12.1/torch/optim/adamw.py
class NAdamW(torch.optim.Optimizer):
r"""Implements NAdamW algorithm.
See Table 1 in https://arxiv.org/abs/1910.05446 for the implementation of
the NAdam algorithm (there is also a comment in the code which highlights
the only difference of NAdamW and AdamW).
For further details regarding the algorithm we refer to
`Decoupled Weight Decay Regularization`_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay coefficient (default: 1e-2)
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=1e-2):
if not 0.0 <= lr:
raise ValueError(f'Invalid learning rate: {lr}')
if not 0.0 <= eps:
raise ValueError(f'Invalid epsilon value: {eps}')
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f'Invalid beta parameter at index 0: {betas[0]}')
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f'Invalid beta parameter at index 1: {betas[1]}')
if not 0.0 <= weight_decay:
raise ValueError(f'Invalid weight_decay value: {weight_decay}')
defaults = {
'lr': lr, 'betas': betas, 'eps': eps, 'weight_decay': weight_decay
}
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
state_values = list(self.state.values())
step_is_tensor = (len(state_values) != 0) and torch.is_tensor(
state_values[0]['step'])
if not step_is_tensor:
for s in state_values:
s['step'] = torch.tensor(float(s['step']))
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
self._cuda_graph_capture_health_check()
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
state_steps = []
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('NAdamW does not support sparse gradients')
grads.append(p.grad)
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = torch.tensor(0.)
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(
p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(
p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
exp_avg_sqs.append(state['exp_avg_sq'])
state_steps.append(state['step'])
nadamw(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
state_steps,
beta1=beta1,
beta2=beta2,
lr=group['lr'],
weight_decay=group['weight_decay'],
eps=group['eps'])
return loss
def nadamw(params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
eps: float):
r"""Functional API that performs NAdamW algorithm computation.
See NAdamW class for details.
"""
if not all(isinstance(t, torch.Tensor) for t in state_steps):
raise RuntimeError(
'API has changed, `state_steps` argument must contain a list of' +
' singleton tensors')
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step_t = state_steps[i]
# update step
step_t += 1
# Perform stepweight decay
param.mul_(1 - lr * weight_decay)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
# Only difference between NAdamW and AdamW in this implementation.
# The official PyTorch implementation of NAdam uses a different algorithm.
# We undo these ops later on, which could cause numerical issues but saves
# us from having to make an extra copy of the gradients.
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
step = step_t.item()
bias_correction1 = 1 - beta1**step
bias_correction2 = 1 - beta2**step
step_size = lr / bias_correction1
bias_correction2_sqrt = math.sqrt(bias_correction2)
denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps)
param.addcdiv_(exp_avg, denom, value=-step_size)
exp_avg.sub_(grad, alpha=1 - beta1).div_(beta1)
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates a NAdamW optimizer and a learning rate schedule."""
del model_state
del rng
epsilon = (
hyperparameters.epsilon if hasattr(hyperparameters, 'epsilon') else 1e-8)
optimizer_state = {
'optimizer':
NAdamW(
model_params.parameters(),
lr=hyperparameters.learning_rate,
betas=(hyperparameters.beta1, hyperparameters.beta2),
eps=epsilon,
weight_decay=hyperparameters.weight_decay),
}
target_setting_step_hint = int(0.75 * workload.step_hint)
optimizer_state['scheduler'] = cosine_warmup.pytorch_cosine_warmup(
target_setting_step_hint, hyperparameters, optimizer_state['optimizer'])
return optimizer_state
|
"""Submission file for a SGD with Nesterov optimizer in Jax."""
from typing import Callable
from flax import jax_utils
import jax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
from reference_algorithms.target_setting_algorithms.data_selection import \
data_selection # pylint: disable=unused-import
from reference_algorithms.target_setting_algorithms.get_batch_size import \
get_batch_size # pylint: disable=unused-import
from reference_algorithms.target_setting_algorithms.jax_submission_base import \
update_params # pylint: disable=unused-import
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates a Nesterov optimizer and a learning rate schedule."""
del model_params
del model_state
del rng
# Create learning rate schedule.
target_setting_step_hint = int(0.75 * workload.step_hint)
lr_schedule_fn = create_lr_schedule_fn(target_setting_step_hint,
hyperparameters)
# Create optimizer.
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
opt_init_fn, opt_update_fn = sgd(
learning_rate=lr_schedule_fn,
weight_decay=hyperparameters.weight_decay,
momentum=hyperparameters.beta1,
nesterov=True)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
def create_lr_schedule_fn(
step_hint: int,
hyperparameters: spec.Hyperparameters) -> Callable[[int], float]:
warmup_fn = optax.linear_schedule(
init_value=0.,
end_value=hyperparameters.learning_rate,
transition_steps=hyperparameters.warmup_steps)
decay_steps = step_hint - hyperparameters.warmup_steps
polynomial_schedule_fn = optax.polynomial_schedule(
init_value=hyperparameters.learning_rate,
end_value=hyperparameters.learning_rate * hyperparameters.end_factor,
power=1,
transition_steps=int(decay_steps * hyperparameters.decay_steps_factor))
lr_schedule_fn = optax.join_schedules(
schedules=[warmup_fn, polynomial_schedule_fn],
boundaries=[hyperparameters.warmup_steps])
return lr_schedule_fn
# Forked from github.com/google/init2winit/blob/master/init2winit/ (cont. below)
# optimizer_lib/optimizers.py.
def sgd(learning_rate, weight_decay, momentum=None, nesterov=False):
r"""A customizable gradient descent optimizer.
NOTE: We apply weight decay **before** computing the momentum update.
This is equivalent to applying WD after for heavy-ball momentum,
but slightly different when using Nesterov acceleration. This is the same as
how the Flax optimizers handle weight decay
https://flax.readthedocs.io/en/latest/_modules/flax/optim/momentum.html.
Args:
learning_rate: The learning rate. Expected as the positive learning rate,
for example `\alpha` in `w -= \alpha * u` (as opposed to `\alpha`).
weight_decay: The weight decay hyperparameter.
momentum: The momentum hyperparameter.
nesterov: Whether or not to use Nesterov momentum.
Returns:
An optax gradient transformation that applies weight decay and then one of a
{SGD, Momentum, Nesterov} update.
"""
return optax.chain(
optax.add_decayed_weights(weight_decay),
optax.sgd(
learning_rate=learning_rate, momentum=momentum, nesterov=nesterov))
|
"""Submission file for a SGD with HeavyBall momentum optimizer in PyTorch."""
import torch
from torch.optim.lr_scheduler import LambdaLR
from algorithmic_efficiency import spec
from reference_algorithms.target_setting_algorithms.data_selection import \
data_selection # pylint: disable=unused-import
from reference_algorithms.target_setting_algorithms.get_batch_size import \
get_batch_size # pylint: disable=unused-import
from reference_algorithms.target_setting_algorithms.jax_momentum import \
create_lr_schedule_fn
from reference_algorithms.target_setting_algorithms.pytorch_submission_base import \
update_params # pylint: disable=unused-import
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates a Nesterov optimizer and a learning rate schedule."""
del model_state
del rng
# Create optimizer.
optimizer_state = {
'optimizer':
torch.optim.SGD(
model_params.parameters(),
lr=hyperparameters.learning_rate,
momentum=hyperparameters.beta1,
weight_decay=hyperparameters.weight_decay,
nesterov=False),
}
# Create learning rate schedule.
target_setting_step_hint = int(0.75 * workload.step_hint)
lr_schedule_fn = create_lr_schedule_fn(target_setting_step_hint,
hyperparameters)
# PyTorch's LambdaLR expects the lr_lambda fn to return a factor which will
# be multiplied with the base lr, so we have to divide by it here.
def _lr_lambda(step: int) -> float:
return lr_schedule_fn(step).item() / hyperparameters.learning_rate
optimizer_state['scheduler'] = LambdaLR(
optimizer_state['optimizer'], lr_lambda=_lr_lambda)
return optimizer_state
|
"""Collection of the target-setting runs for all workloads."""
|
"""Submission file for an AdamW optimizer with warmup+cosine LR in Jax."""
from flax import jax_utils
import jax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
from reference_algorithms.target_setting_algorithms import cosine_warmup
from reference_algorithms.target_setting_algorithms.data_selection import \
data_selection # pylint: disable=unused-import
from reference_algorithms.target_setting_algorithms.get_batch_size import \
get_batch_size # pylint: disable=unused-import
from reference_algorithms.target_setting_algorithms.jax_submission_base import \
update_params # pylint: disable=unused-import
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates an AdamW optimizer and a learning rate schedule."""
del model_params
del model_state
del rng
target_setting_step_hint = int(0.75 * workload.step_hint)
lr_schedule_fn = cosine_warmup.jax_cosine_warmup(target_setting_step_hint,
hyperparameters)
# Create optimizer.
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
epsilon = (
hyperparameters.epsilon if hasattr(hyperparameters, 'epsilon') else 1e-8)
opt_init_fn, opt_update_fn = optax.adamw(
learning_rate=lr_schedule_fn,
b1=hyperparameters.beta1,
b2=hyperparameters.beta2,
eps=epsilon,
weight_decay=hyperparameters.weight_decay)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
|
"""Submission file for a NAdamW optimizer with warmup+cosine LR in Jax."""
from typing import Any, Callable, NamedTuple, Optional, Union
import chex
from flax import jax_utils
import jax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
from reference_algorithms.target_setting_algorithms import cosine_warmup
from reference_algorithms.target_setting_algorithms.data_selection import \
data_selection # pylint: disable=unused-import
from reference_algorithms.target_setting_algorithms.get_batch_size import \
get_batch_size # pylint: disable=unused-import
from reference_algorithms.target_setting_algorithms.jax_submission_base import \
update_params # pylint: disable=unused-import
# Forked from
# github.com/google/init2winit/blob/master/init2winit/optimizer_lib/alias.py
def nadamw(
learning_rate: Union[float, optax.Schedule],
b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-8,
eps_root: float = 0.0,
debias: bool = True,
weight_decay: float = 0.0,
weight_decay_mask: Optional[Union[Any, Callable[[optax.Params],
Any]]] = None,
) -> optax.GradientTransformation:
"""Rescale updates according to the NAdam algorithm.
References:
There seem to be multiple versions of NAdam. The original version is here
https://openreview.net/forum?id=OM0jvwB8jIp57ZJjtNEZ (the official PyTorch
implementation also follows this).
Current code implements a simpler version with no momentum decay and slightly
different bias correction terms. The exact description can be found here
https://arxiv.org/pdf/1910.05446.pdf (Table 1).
Args:
learning_rate: this is a fixed global scaling factor.
b1: decay rate for the exponentially weighted average of grads.
b2: decay rate for the exponentially weighted average of squared grads.
eps: term added to the denominator to improve numerical stability.
eps_root: term added to the denominator inside the square-root to improve
numerical stability when backpropagating gradients through the rescaling.
debias: whether to use bias correction.
weight_decay: strength of the weight decay regularization. Note that this
weight decay is multiplied with the learning rate. This is consistent with
other frameworks such as PyTorch, but different from (Loshchilov et al,
2019) where the weight decay is only multiplied with the "schedule
multiplier", but not the base learning rate.
weight_decay_mask: a tree with same structure as (or a prefix of) the params
PyTree, or a Callable that returns such a pytree given the params/updates.
The leaves should be booleans, `True` for leaves/subtrees you want to
apply the weight decay to, and `False` for those you want to skip. Note
that the Nadam gradient transformations are applied to all parameters.
Returns:
An (init_fn, update_fn) tuple.
"""
return optax.chain(
scale_by_nadam(b1, b2, eps, eps_root, debias),
optax.add_decayed_weights(weight_decay, weight_decay_mask),
scale_by_learning_rate(learning_rate))
# All functions below are forked from
# github.com/google/init2winit/blob/master/init2winit/optimizer_lib/transform.py
def scale_by_nadam(b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-8,
eps_root: float = 0.0,
debias: bool = True,
power: float = 0.5) -> optax.GradientTransformation:
"""Rescale updates according to the NAdam algorithm.
References:
There seem to be multiple versions of NAdam. The original version is here
https://openreview.net/forum?id=OM0jvwB8jIp57ZJjtNEZ (the pytorch imp. also
follows this)
Current code implements a simpler version with no momentum decay and slightly
different (standard Adam) bias correction terms. The exact description can be
found here https://arxiv.org/pdf/1910.05446.pdf (Table 1)
Args:
b1: decay rate for the exponentially weighted average of grads.
b2: decay rate for the exponentially weighted average of squared grads.
eps: term added to the denominator to improve numerical stability.
eps_root: term added to the denominator inside the square-root to improve
numerical stability when backpropagating gradients through the rescaling.
debias: whether to use bias correction.
power: the power to use in the preconditioner (0.5 in default adam).
Returns:
An (init_fn, update_fn) tuple.
"""
raise_power = jnp.sqrt if power == 0.5 else lambda x: jnp.power(x, power)
def init_fn(params):
mu = jax.tree_map(jnp.zeros_like, params) # First moment
nu = jax.tree_map(jnp.zeros_like, params) # Second moment
return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu)
def update_fn(updates, state, params=None):
del params
mu = _update_moment(updates, state.mu, b1, 1)
nu = _update_moment(updates, state.nu, b2, 2)
count = state.count + jnp.array(1, dtype=jnp.int32)
mu_hat = _update_moment(updates, mu, b1, 1)
mu_hat = mu_hat if not debias else _bias_correction(mu_hat, b1, count)
nu_hat = nu if not debias else _bias_correction(nu, b2, count)
updates = jax.tree_map(
lambda m, v: m / (raise_power(v + eps_root) + eps), mu_hat, nu_hat)
return updates, ScaleByAdamState(count=count, mu=mu, nu=nu)
return optax.GradientTransformation(init_fn, update_fn)
class ScaleByAdamState(NamedTuple):
"""State for the NAdam algorithm."""
count: chex.Array # shape=(), dtype=jnp.int32.
mu: optax.Updates
nu: optax.Updates
def _update_moment(updates, moments, decay, order):
"""Compute the exponential moving average of the `order-th` moment."""
return jax.tree_map(
lambda g, t: (1 - decay) * (g**order) + decay * t, updates, moments)
def _bias_correction(moment, decay, count):
"""Perform bias correction. This becomes a no-op as count goes to infinity."""
beta = 1 - decay**count
return jax.tree_map(lambda t: t / beta.astype(t.dtype), moment)
def scale_by_learning_rate(learning_rate, flip_sign=True):
m = -1 if flip_sign else 1
if callable(learning_rate):
return optax.scale_by_schedule(lambda count: m * learning_rate(count))
return optax.scale(m * learning_rate)
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates a NAdamW optimizer and a learning rate schedule."""
del model_params
del model_state
del rng
target_setting_step_hint = int(0.75 * workload.step_hint)
lr_schedule_fn = cosine_warmup.jax_cosine_warmup(target_setting_step_hint,
hyperparameters)
# Create optimizer.
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
epsilon = (
hyperparameters.epsilon if hasattr(hyperparameters, 'epsilon') else 1e-8)
opt_init_fn, opt_update_fn = nadamw(
learning_rate=lr_schedule_fn,
b1=hyperparameters.beta1,
b2=hyperparameters.beta2,
eps=epsilon,
weight_decay=hyperparameters.weight_decay)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
|
"""Implementions of a linear warmup then cosine decay LR schedule."""
import optax
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.optim.lr_scheduler import LinearLR
from torch.optim.lr_scheduler import SequentialLR
def jax_cosine_warmup(step_hint: int, hyperparameters):
# Create learning rate schedule.
warmup_fn = optax.linear_schedule(
init_value=0.,
end_value=hyperparameters.learning_rate,
transition_steps=hyperparameters.warmup_steps)
cosine_steps = max(step_hint - hyperparameters.warmup_steps, 1)
cosine_fn = optax.cosine_decay_schedule(
init_value=hyperparameters.learning_rate, decay_steps=cosine_steps)
schedule_fn = optax.join_schedules(
schedules=[warmup_fn, cosine_fn],
boundaries=[hyperparameters.warmup_steps])
return schedule_fn
def pytorch_cosine_warmup(step_hint: int, hyperparameters, optimizer):
warmup = LinearLR(
optimizer,
start_factor=1e-10,
end_factor=1.,
total_iters=hyperparameters.warmup_steps)
cosine_steps = max(step_hint - hyperparameters.warmup_steps, 1)
cosine_decay = CosineAnnealingLR(optimizer, T_max=cosine_steps)
return SequentialLR(
optimizer,
schedulers=[warmup, cosine_decay],
milestones=[hyperparameters.warmup_steps])
|
from typing import Dict, Iterator, Tuple
from algorithmic_efficiency import spec
def data_selection(
workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Tuple[spec.Tensor, spec.Tensor, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
batch = next(input_queue)
return batch
|
"""Batch size selection submission function."""
def get_batch_size(workload_name):
# Return the global batch size.
if workload_name == 'criteo1tb':
return 262_144
elif workload_name == 'fastmri':
return 32
elif workload_name == 'imagenet_resnet':
return 1024
elif workload_name == 'imagenet_vit':
return 1024
elif workload_name == 'librispeech_conformer':
return 256
elif workload_name == 'librispeech_deepspeech':
return 256
elif workload_name == 'ogbg':
return 512
elif workload_name == 'wmt':
return 128
else:
raise ValueError(f'Unsupported workload name: {workload_name}.')
|
"""Batch size and update submission functions in PyTorch."""
from typing import Dict, List, Tuple
from absl import logging
import torch
import torch.distributed.nn as dist_nn
from algorithmic_efficiency import spec
from algorithmic_efficiency.pytorch_utils import pytorch_setup
USE_PYTORCH_DDP = pytorch_setup()[0]
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
current_model = current_param_container
current_model.train()
optimizer_state['optimizer'].zero_grad()
logits_batch, new_model_state = workload.model_fn(
params=current_model,
augmented_and_preprocessed_input_batch=batch,
model_state=model_state,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
update_batch_norm=True)
label_smoothing = (
hyperparameters.label_smoothing if hasattr(hyperparameters,
'label_smoothing') else 0.0)
if hasattr(hyperparameters, 'grad_clip'):
grad_clip = hyperparameters.grad_clip
else:
grad_clip = None
loss_dict = workload.loss_fn(
label_batch=batch['targets'],
logits_batch=logits_batch,
mask_batch=batch.get('weights'),
label_smoothing=label_smoothing)
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
if USE_PYTORCH_DDP:
# Use dist_nn.all_reduce to ensure correct loss and gradient scaling.
summed_loss = dist_nn.all_reduce(summed_loss)
n_valid_examples = dist_nn.all_reduce(n_valid_examples)
loss = summed_loss / n_valid_examples
loss.backward()
if grad_clip is not None:
torch.nn.utils.clip_grad_norm_(
current_model.parameters(), max_norm=grad_clip)
optimizer_state['optimizer'].step()
if 'scheduler' in optimizer_state:
optimizer_state['scheduler'].step()
# Log training metrics - loss, grad_norm, batch_size.
if global_step <= 100 or global_step % 500 == 0:
with torch.no_grad():
parameters = [p for p in current_model.parameters() if p.grad is not None]
grad_norm = torch.norm(
torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2)
if workload.metrics_logger is not None:
workload.metrics_logger.append_scalar_metrics(
{
'loss': loss.item(),
'grad_norm': grad_norm.item(),
}, global_step)
logging.info('%d) loss = %0.3f, grad_norm = %0.3f',
global_step,
loss.item(),
grad_norm.item())
return (optimizer_state, current_param_container, new_model_state)
|
"""Submission file for a SGD with Nesterov optimizer in PyTorch."""
import torch
from torch.optim.lr_scheduler import LambdaLR
from algorithmic_efficiency import spec
from reference_algorithms.target_setting_algorithms.data_selection import \
data_selection # pylint: disable=unused-import
from reference_algorithms.target_setting_algorithms.get_batch_size import \
get_batch_size # pylint: disable=unused-import
from reference_algorithms.target_setting_algorithms.jax_nesterov import \
create_lr_schedule_fn
from reference_algorithms.target_setting_algorithms.pytorch_submission_base import \
update_params # pylint: disable=unused-import
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates a Nesterov optimizer and a learning rate schedule."""
del model_state
del rng
# Create optimizer.
optimizer_state = {
'optimizer':
torch.optim.SGD(
model_params.parameters(),
lr=hyperparameters.learning_rate,
momentum=hyperparameters.beta1,
weight_decay=hyperparameters.weight_decay,
nesterov=True),
}
# Create learning rate schedule.
target_setting_step_hint = int(0.75 * workload.step_hint)
lr_schedule_fn = create_lr_schedule_fn(target_setting_step_hint,
hyperparameters)
# PyTorch's LambdaLR expects the lr_lambda fn to return a factor which will
# be multiplied with the base lr, so we have to divide by it here.
def _lr_lambda(step: int) -> float:
return lr_schedule_fn(step).item() / hyperparameters.learning_rate
optimizer_state['scheduler'] = LambdaLR(
optimizer_state['optimizer'], lr_lambda=_lr_lambda)
return optimizer_state
|
"""Submission file for a SGD with HeavyBall momentum optimizer in Jax."""
from typing import Callable
from flax import jax_utils
import jax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
from reference_algorithms.target_setting_algorithms.data_selection import \
data_selection # pylint: disable=unused-import
from reference_algorithms.target_setting_algorithms.get_batch_size import \
get_batch_size # pylint: disable=unused-import
from reference_algorithms.target_setting_algorithms.jax_submission_base import \
update_params # pylint: disable=unused-import
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
"""Creates a Nesterov optimizer and a learning rate schedule."""
del model_params
del model_state
del rng
# Create learning rate schedule.
target_setting_step_hint = int(0.75 * workload.step_hint)
lr_schedule_fn = create_lr_schedule_fn(target_setting_step_hint,
hyperparameters)
# Create optimizer.
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
opt_init_fn, opt_update_fn = sgd(
learning_rate=lr_schedule_fn,
weight_decay=hyperparameters.weight_decay,
momentum=hyperparameters.beta1,
nesterov=False)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
def create_lr_schedule_fn(
step_hint: int,
hyperparameters: spec.Hyperparameters) -> Callable[[int], float]:
warmup_fn = optax.linear_schedule(
init_value=0.,
end_value=hyperparameters.learning_rate,
transition_steps=hyperparameters.warmup_steps)
decay_steps = step_hint - hyperparameters.warmup_steps
polynomial_schedule_fn = optax.polynomial_schedule(
init_value=hyperparameters.learning_rate,
end_value=hyperparameters.learning_rate * hyperparameters.end_factor,
power=1,
transition_steps=int(decay_steps * hyperparameters.decay_steps_factor))
lr_schedule_fn = optax.join_schedules(
schedules=[warmup_fn, polynomial_schedule_fn],
boundaries=[hyperparameters.warmup_steps])
return lr_schedule_fn
# Forked from github.com/google/init2winit/blob/master/init2winit/ (cont. below)
# optimizer_lib/optimizers.py.
def sgd(learning_rate, weight_decay, momentum=None, nesterov=False):
r"""A customizable gradient descent optimizer.
NOTE: We apply weight decay **before** computing the momentum update.
This is equivalent to applying WD after for heavy-ball momentum,
but slightly different when using Nesterov acceleration. This is the same as
how the Flax optimizers handle weight decay
https://flax.readthedocs.io/en/latest/_modules/flax/optim/momentum.html.
Args:
learning_rate: The learning rate. Expected as the positive learning rate,
for example `\alpha` in `w -= \alpha * u` (as opposed to `\alpha`).
weight_decay: The weight decay hyperparameter.
momentum: The momentum hyperparameter.
nesterov: Whether or not to use Nesterov momentum.
Returns:
An optax gradient transformation that applies weight decay and then one of a
{SGD, Momentum, Nesterov} update.
"""
return optax.chain(
optax.add_decayed_weights(weight_decay),
optax.sgd(
learning_rate=learning_rate, momentum=momentum, nesterov=nesterov))
|
"""Training algorithm track submission functions for MNIST."""
import functools
from typing import Dict, Iterator, List, Tuple
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import optax
from algorithmic_efficiency import spec
def get_batch_size(workload_name):
# Return the global batch size.
batch_sizes = {'mnist': 1024}
return batch_sizes[workload_name]
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del model_params
del model_state
del rng
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
opt_init_fn, opt_update_fn = optax.chain(
optax.scale_by_adam(
b1=1.0 - hyperparameters.one_minus_beta_1,
b2=0.999,
eps=hyperparameters.epsilon),
optax.scale(-hyperparameters.learning_rate))
return jax_utils.replicate(opt_init_fn(params_zeros_like)), opt_update_fn
# We need to jax.pmap here instead of inside update_params because the latter
# would recompile the function every step.
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, None, 0, 0, None, 0, 0, 0),
static_broadcasted_argnums=(0, 1))
def pmapped_update_params(workload: spec.Workload,
opt_update_fn,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
optimizer_state: spec.OptimizerState,
rng: spec.RandomState) -> spec.UpdateReturn:
del hyperparameters
def loss_fn(params):
logits_batch, new_model_state = workload.model_fn(
params=params,
augmented_and_preprocessed_input_batch=batch,
model_state=model_state,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(batch['targets'], logits_batch)
loss = loss_dict['summed'] / loss_dict['n_valid_examples']
return loss, new_model_state
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(_, new_model_state), grad = grad_fn(current_param_container)
grad = lax.pmean(grad, axis_name='batch')
updates, new_optimizer_state = opt_update_fn(grad, optimizer_state,
current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_optimizer_state, updated_params, new_model_state
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params, updated_model_state)."""
del current_params_types
del loss_type
del eval_results
del global_step
per_device_rngs = jax.random.split(rng, jax.local_device_count())
optimizer_state, opt_update_fn = optimizer_state
new_optimizer_state, updated_params, new_model_state = pmapped_update_params(
workload,
opt_update_fn,
current_param_container,
model_state,
hyperparameters,
batch,
optimizer_state,
per_device_rngs)
return (new_optimizer_state, opt_update_fn), updated_params, new_model_state
# Not allowed to update the model parameters, hyperparameters, global step, or
# optimzier state.
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del workload
del optimizer_state
del current_param_container
del model_state
del hyperparameters
del global_step
del rng
return next(input_queue)
|
"""Training algorithm track submission functions for MNIST."""
from typing import Dict, Iterator, List, Tuple
import torch
from algorithmic_efficiency import spec
def get_batch_size(workload_name):
# Return the global batch size.
batch_sizes = {'mnist': 1024}
return batch_sizes[workload_name]
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del model_state
del workload
del rng
optimizer_state = {
'optimizer':
torch.optim.Adam(
model_params.parameters(),
lr=hyperparameters.learning_rate,
betas=(1.0 - hyperparameters.one_minus_beta_1, 0.999),
eps=hyperparameters.epsilon),
}
return optimizer_state
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params)."""
del hyperparameters
del loss_type
del current_params_types
del eval_results
del global_step
current_model = current_param_container
current_model.train()
for param in current_model.parameters():
param.grad = None
output, new_model_state = workload.model_fn(
params=current_model,
augmented_and_preprocessed_input_batch=batch,
model_state=model_state,
mode=spec.ForwardPassMode.TRAIN,
rng=rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(
label_batch=batch['targets'], logits_batch=output)
loss = loss_dict['summed'] / loss_dict['n_valid_examples']
loss.backward()
optimizer_state['optimizer'].step()
return (optimizer_state, current_param_container, new_model_state)
# Not allowed to update the model parameters, hyperparameters, global step, or
# optimzier state.
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del optimizer_state
del current_param_container
del global_step
del rng
return next(input_queue)
|
"""Training algorithm track submission functions for LibriSpeech."""
import functools
from typing import Dict, Iterator, List, Tuple
from absl import logging
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
import optax
from algorithmic_efficiency import spec
_GRAD_CLIP_EPS = 1e-6
def get_batch_size(workload_name):
# Return the global batch size.
del workload_name
return 256
def get_learning_rate(step, hyperparams):
warmup_steps = hyperparams.warmup_steps
if step < warmup_steps:
current_lr = (step * hyperparams.base_lr) / warmup_steps
else:
decay_factor = (1 + np.cos(step / hyperparams.training_steps * np.pi)) * 0.5
current_lr = hyperparams.base_lr * decay_factor
return current_lr
def optimizer(hyperparameters: spec.Hyperparameters, num_train_examples: int):
opt_init_fn, opt_update_fn = optax.inject_hyperparams(optax.adamw)(
b1=hyperparameters.beta1,
b2=hyperparameters.beta2,
eps=hyperparameters.epsilon,
weight_decay=hyperparameters.weight_decay,
learning_rate=0.0)
return opt_init_fn, opt_update_fn
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del model_state
del rng
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
opt_init_fn, opt_update_fn = optimizer(hyperparameters,
workload.num_train_examples)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
def l2_regularization(params, l2_decay_rank_threshold):
"""Computes the squared l2 norm of the given parameters.
This function will only filter for parameters with
rank >= l2_decay_rank_threshold. So if this threshold is set to 2, then all
1d (and lower) parameter arrays, including all bias and batch norm params,
will be ignored in this computation.
Args:
params: Pytree containing parameters.
l2_decay_rank_threshold: The calculation will only include parameters with
param.ndim >= l2_decay_rank_threshold. Set to 2 to ignore all bias and
batch_norm params in the model.
Returns:
weight_l2: the squared l2 norm of all params matching the threshold.
"""
weight_penalty_params = jax.tree_util.tree_leaves(params)
weight_l2 = sum(
jnp.sum(x**2)
for x in weight_penalty_params
if x.ndim >= l2_decay_rank_threshold)
return weight_l2
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, None, 0, 0, 0, None, 0, 0, None),
static_broadcasted_argnums=(0, 1))
def pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
hyperparameters,
batch,
rng,
lr):
optimizer_state.hyperparams['learning_rate'] = lr
def _loss_fn(params):
"""loss function used for training."""
(logits, logit_paddings), new_model_state = workload.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.TRAIN,
rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(batch['targets'], (logits, logit_paddings))
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
return summed_loss, (n_valid_examples, new_model_state)
grad_fn = jax.value_and_grad(_loss_fn, has_aux=True)
(summed_loss, (n_valid_examples, new_model_state)), grad = grad_fn(
current_param_container)
# Get correct global mean loss and grad.
(summed_loss, n_valid_examples, grad) = lax.psum(
(summed_loss, n_valid_examples, grad), axis_name='batch')
loss = summed_loss / n_valid_examples
grad = jax.tree_map(lambda x: x / n_valid_examples, grad)
grad_norm = jnp.sqrt(l2_regularization(grad, 0))
grad_clip = hyperparameters.grad_clip
grad_scaling_factor = grad_clip / (grad_norm + _GRAD_CLIP_EPS)
grad_scaling_factor = jax.lax.clamp(min=0.0, x=grad_scaling_factor, max=1.0)
grad = jax.tree_map(lambda x: x * grad_scaling_factor, grad)
updates, new_optimizer_state = opt_update_fn(grad, optimizer_state,
current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_model_state, new_optimizer_state, updated_params, loss, grad_norm
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params)."""
del current_params_types
del eval_results
del loss_type
lr = get_learning_rate(global_step, hyperparameters)
optimizer_state, opt_update_fn = optimizer_state
per_device_rngs = jax.random.split(rng, jax.local_device_count())
outputs = pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
hyperparameters,
batch,
per_device_rngs,
lr)
new_model_state, new_optimizer_state, new_params, loss, grad_norm = outputs
if global_step <= 1000 or global_step % 100 == 0:
logging.info('%d) loss = %0.3f, grad_norm = %0.3f lr = %0.6f',
global_step,
loss.mean(),
grad_norm.mean(),
lr)
if workload.summary_writer is not None:
workload.summary_writer.scalar('train_step_ctc_loss',
loss.mean(),
global_step)
workload.summary_writer.scalar('grad_norm', grad_norm.mean(), global_step)
workload.summary_writer.scalar('learning_rate', lr, global_step)
return (new_optimizer_state, opt_update_fn), new_params, new_model_state
# Not allowed to update the model parameters, hyperparameters, global step, or
# optimzier state.
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del optimizer_state
del current_param_container
del global_step
del rng
del hyperparameters
del workload
return next(input_queue)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.